hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a98cb6290a1bb5a19aec935105d986c6174a875
| 68,919 |
ipynb
|
Jupyter Notebook
|
MS416.ipynb
|
g4brielvs/MS416
|
d2dea28e194ac5b30194718a347bffe78e68a71a
|
[
"MIT"
] | null | null | null |
MS416.ipynb
|
g4brielvs/MS416
|
d2dea28e194ac5b30194718a347bffe78e68a71a
|
[
"MIT"
] | 6 |
2020-11-18T21:21:24.000Z
|
2022-03-11T23:24:44.000Z
|
MS416.ipynb
|
g4brielvs/MS416
|
d2dea28e194ac5b30194718a347bffe78e68a71a
|
[
"MIT"
] | null | null | null | 72.317943 | 30,241 | 0.694946 |
[
[
[
"%matplotlib notebook\n\nfrom pylab import *\nfrom scipy.stats import *",
"_____no_output_____"
],
[
"# Population\ntotal_population = 208e6\n\npercentage_0_14 = 0.23\npercentage_15_64 = 0.69\npercentage_65_ = 0.08\n\nnum_adults = total_population*(percentage_15_64 + percentage_65_)",
"_____no_output_____"
],
[
"# Labor force\npercentage_labor_force = 0.71\nlabor_force = num_adults*percentage_labor_force\n\ndisabled_adults = 19e6",
"_____no_output_____"
],
[
"# Monetary\nbasic_income = 880*12 # salario minimo nominal anual\ncurrent_wealth_transfers = 240e9 # aproximadamente 10% do PIB",
"_____no_output_____"
],
[
"def jk_rowling(num_non_workers):\n num_of_jk_rowlings = binom(num_non_workers, 1e-9).rvs()\n return num_of_jk_rowlings * 1e9\n\ndef basic_income_cost_benefit():\n direct_costs = num_adults * basic_income\n administrative_cost_per_person = norm(250,75)\n non_worker_multiplier = uniform(-0.10, 0.15).rvs()\n non_workers = (num_adults-labor_force-disabled_adults) * (1+non_worker_multiplier)\n marginal_worker_productivity = norm(1.2*basic_income,0.1*basic_income)\n\n administrative_costs = num_adults * administrative_cost_per_person.rvs()\n labor_effect_costs_benefit = -1 * ((num_adults-labor_force-disabled_adults) *\n non_worker_multiplier *\n (marginal_worker_hourly_productivity.rvs())\n )\n return direct_costs + administrative_costs + labor_effect_costs_benefit - jk_rowling(non_workers)\n\ndef basic_job_cost_benefit():\n administrative_cost_per_disabled_person = norm(500,150).rvs()\n administrative_cost_per_worker = norm(5000, 1500).rvs()\n non_worker_multiplier = uniform(-0.20, 0.25).rvs()\n basic_job_productivity = uniform(0.0, basic_income).rvs()\n\n disabled_cost = disabled_adults * (basic_income + administrative_cost_per_disabled_person)\n num_basic_workers = ((num_adults - disabled_adults - labor_force) *\n (1+non_worker_multiplier)\n )\n\n basic_worker_cost_benefit = num_basic_workers * (\n basic_income +\n administrative_cost_per_worker -\n basic_job_productivity\n )\n return disabled_cost + basic_worker_cost_benefit\n\n\nN = 1024*4\nbi = zeros(shape=(N,), dtype=float)\nbj = zeros(shape=(N,), dtype=float)\n\nfor k in range(N):\n bi[k] = basic_income_cost_benefit()\n bj[k] = basic_job_cost_benefit()",
"_____no_output_____"
],
[
"subplot(211)\nstart = 0\nwidth = 8e12\nheight= 400*N/1024\n\ntitle(\"Income Guarantee\")\nhist(bi, bins=5, color='red')\naxis([start,width,0,height])\n\nsubplot(212)\ntitle(\"Job Guarantee\")\nhist(bj, bins=20, color='blue')\n\naxis([start,width,0,height])\n\ntight_layout()\nshow()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a98eac756968962f7598617d69bb06dbe2ba975
| 218,961 |
ipynb
|
Jupyter Notebook
|
Wikipedia_NLP_Sentiment_Analysis.ipynb
|
alijablack/data-science
|
86b7ec73bc8145cbbb9d1e5249d4d4e17077e93d
|
[
"Apache-2.0"
] | 1 |
2021-03-16T18:58:26.000Z
|
2021-03-16T18:58:26.000Z
|
Wikipedia_NLP_Sentiment_Analysis.ipynb
|
alijablack/data-science
|
86b7ec73bc8145cbbb9d1e5249d4d4e17077e93d
|
[
"Apache-2.0"
] | null | null | null |
Wikipedia_NLP_Sentiment_Analysis.ipynb
|
alijablack/data-science
|
86b7ec73bc8145cbbb9d1e5249d4d4e17077e93d
|
[
"Apache-2.0"
] | null | null | null | 47.240777 | 4,162 | 0.475847 |
[
[
[
"<a href=\"https://colab.research.google.com/github/alijablack/data-science/blob/main/Wikipedia_NLP_Sentiment_Analysis.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Natural Language Processing",
"_____no_output_____"
],
[
"## Problem Statement\n\nUse natural language processing on Wikipedia articles to identify the overall sentiment analysis for a page and number of authors.",
"_____no_output_____"
],
[
"## Data Collection",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
],
[
"!python -m textblob.download_corpora",
"[nltk_data] Downloading package brown to /root/nltk_data...\n[nltk_data] Unzipping corpora/brown.zip.\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Unzipping corpora/wordnet.zip.\n[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /root/nltk_data...\n[nltk_data] Unzipping taggers/averaged_perceptron_tagger.zip.\n[nltk_data] Downloading package conll2000 to /root/nltk_data...\n[nltk_data] Unzipping corpora/conll2000.zip.\n[nltk_data] Downloading package movie_reviews to /root/nltk_data...\n[nltk_data] Unzipping corpora/movie_reviews.zip.\nFinished.\n"
],
[
"from textblob import TextBlob\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.feature_extraction.text import CountVectorizer",
"_____no_output_____"
],
[
"people_path = '/content/drive/My Drive/Copy of people_db.csv'\npeople_df = pd.read_csv(people_path)",
"_____no_output_____"
]
],
[
[
"## Exploratory Data Analysis",
"_____no_output_____"
],
[
"## Part 1 of Project\n\n",
"_____no_output_____"
],
[
"This dataset from dbpedia includes over 42,000 entries.",
"_____no_output_____"
]
],
[
[
"people_df.info",
"_____no_output_____"
]
],
[
[
"Explore the first 100 to decide who to choose.",
"_____no_output_____"
]
],
[
[
"people_df.head(100).T",
"_____no_output_____"
]
],
[
[
"Select a person, Armen Ra, from the list to use as the input for sentiment analysis. Output Armen Ra's overview from the database.",
"_____no_output_____"
]
],
[
[
"my_person = [people_df.iloc[96]['text']]\nmy_person",
"_____no_output_____"
]
],
[
[
"### Data Processing",
"_____no_output_____"
],
[
"#### Vector Analysis",
"_____no_output_____"
]
],
[
[
"vect_people = CountVectorizer(stop_words='english')\nword_weight = vect_people.fit_transform(people_df['text'])",
"_____no_output_____"
],
[
"word_weight",
"_____no_output_____"
]
],
[
[
"#### Nearest Neighbors",
"_____no_output_____"
],
[
"Fit the nearest neighbors model with content from people dataframe.",
"_____no_output_____"
]
],
[
[
"nn = NearestNeighbors(metric='euclidean')\nnn.fit(word_weight)",
"_____no_output_____"
],
[
"ra_index = people_df[people_df['name'] == 'Armen Ra'].index[0]\nra_index",
"_____no_output_____"
]
],
[
[
"Use the nearest neighbor model to output people with overviews similar to Armen Ra's page.",
"_____no_output_____"
]
],
[
[
"distances, indices = nn.kneighbors(word_weight[ra_index], n_neighbors=11)",
"_____no_output_____"
],
[
"distances",
"_____no_output_____"
]
],
[
[
"Show the index of 10 similar overviews.",
"_____no_output_____"
]
],
[
[
"indices",
"_____no_output_____"
]
],
[
[
"Output the 10 people with overviews closest to Armen Ra.",
"_____no_output_____"
]
],
[
[
"people_df.iloc[indices[0],:]",
"_____no_output_____"
],
[
"top_ten = people_df.iloc[indices[0],1:11]",
"_____no_output_____"
],
[
"top_ten.head(11)",
"_____no_output_____"
],
[
"df2 = people_df[['text','name']]\n# For each row, combine all the columns into one column\ndf3 = df2.apply(lambda x: ','.join(x.astype(str)), axis=1)\n# Store them in a pandas dataframe\ndf_clean = pd.DataFrame({'clean': df3})\n# Create the list of list format of the custom corpus for gensim modeling \nsent = [row.split(',') for row in df_clean['clean']]\n# show the example of list of list format of the custom corpus for gensim modeling \nsent[:2]",
"_____no_output_____"
]
],
[
[
"Another way to output the 10 people with overviews closest to Armen Ra's page.",
"_____no_output_____"
]
],
[
[
"import gensim \nfrom gensim.models import Word2Vec\n\nmodel = Word2Vec(sent, min_count=1,size= 50,workers=3, window =3, sg = 1)",
"_____no_output_____"
],
[
"model['Armen Ra']",
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"model.most_similar('Armen Ra'[:10])",
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: DeprecationWarning: Call to deprecated `most_similar` (Method will be removed in 4.0.0, use self.wv.most_similar() instead).\n \"\"\"Entry point for launching an IPython kernel.\n/usr/local/lib/python3.6/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`.\n if np.issubdtype(vec.dtype, np.int):\n"
]
],
[
[
"This method outputs a different set of people than the nearest neighbors method. The nearest neighbors method output appears more closely aligned with the substance of Armen Ra's overview by similarly outputting people in creative industries. Whereas the similarity method outputs people with overviews that share a similar tone and format as Armen Ra's overview that is brief, informational, neutral.",
"_____no_output_____"
],
[
"#### Sentiment Analysis",
"_____no_output_____"
],
[
"Make Armen Ra's overview a string.",
"_____no_output_____"
]
],
[
[
"df2 = pd.DataFrame(my_person)\n# For each row, combine all the columns into one column\ndf3 = df2.apply(lambda x: ','.join(x.astype(str)), axis=1)\n# Store them in a pandas dataframe\ndf_clean = pd.DataFrame({'clean': df3})\n# Create the list of list format of the custom corpus for gensim modeling \nsent1 = [row.split(',') for row in df_clean['clean']]\n# show the example of list of list format of the custom corpus for gensim modeling \nsent1[:2]",
"_____no_output_____"
]
],
[
[
"Assign tags to each word in the overview.",
"_____no_output_____"
]
],
[
[
"!python -m textblob.download_corpora\nfrom textblob import TextBlob\nwiki = TextBlob(str(sent1))\nwiki.tags",
"[nltk_data] Downloading package brown to /root/nltk_data...\n[nltk_data] Package brown is already up-to-date!\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /root/nltk_data...\n[nltk_data] Package averaged_perceptron_tagger is already up-to-\n[nltk_data] date!\n[nltk_data] Downloading package conll2000 to /root/nltk_data...\n[nltk_data] Package conll2000 is already up-to-date!\n[nltk_data] Downloading package movie_reviews to /root/nltk_data...\n[nltk_data] Package movie_reviews is already up-to-date!\nFinished.\n"
]
],
[
[
"Identify the nouns in the overview.",
"_____no_output_____"
]
],
[
[
"wiki.noun_phrases",
"_____no_output_____"
],
[
"zen = TextBlob(str(sent1))",
"_____no_output_____"
]
],
[
[
"Identify the words in the overview.",
"_____no_output_____"
]
],
[
[
"zen.words",
"_____no_output_____"
]
],
[
[
"Identify the sentences in the overview.",
"_____no_output_____"
]
],
[
[
"zen.sentences",
"_____no_output_____"
],
[
"sentence = TextBlob(str(sent1))",
"_____no_output_____"
],
[
"sentence.words",
"_____no_output_____"
],
[
"sentence.words[-1].pluralize()",
"_____no_output_____"
],
[
"sentence.words[-1].singularize()",
"_____no_output_____"
],
[
"b = TextBlob(str(sentence))\nprint(b.correct())",
"[['armed ra is an american artist and performer of iranianarmenian descent born in than ran he was raised by his mother a concert pianist and his aunt an opera singer and ikebana master he taught himself to play therein his music fuses armenian folk music with modern instrumentation along with melody lounge standards and classical areas his concerts are known for their combination of both visual arts and his musicarmen ra has played at the united nations winner konzerthaus mozartsaal vienna clubs knitting factory la mamma etc does pub bolder museum of modern art lincoln center the gershwin hotel by king museum and ditch projects he has been features on and appeared in can ho mt the vogue the new york times the new york post the village voice rolling stone and glamour has performed and recorded with various bands and on many projects including a collaboration with british recording artist mary almond on the song my madness i from his 2010 release variety his debut solo d plays the therein released on bowl fork records in 2010 showcases many classical armenian laments and folk songs representing both arms heritage and his very first musical influence he has a cameo appearance in the film party monster currently besides in hollywood californian october 2010 armed appeared as a guest judge on the log network show the arrangement appeared on the cover track everyday is halloween playing the therein for charon needles album pg in 2014 he played the therein for voltaires album raised by bath']]\n"
]
],
[
[
"Output the sentiment for Armen Ra's overview.",
"_____no_output_____"
]
],
[
[
"for sentence in zen.sentences:\n print(sentence.sentiment[0])",
"0.09986631016042781\n"
]
],
[
[
"## Part 2 of Project",
"_____no_output_____"
],
[
"### Data Collection",
"_____no_output_____"
],
[
"Install Wikipedia API. Wikipedia will be the main datasource for this step to access the full content of Armen Ra's page.",
"_____no_output_____"
]
],
[
[
"!pip install wikipedia",
"Collecting wikipedia\n Downloading https://files.pythonhosted.org/packages/67/35/25e68fbc99e672127cc6fbb14b8ec1ba3dfef035bf1e4c90f78f24a80b7d/wikipedia-1.4.0.tar.gz\nRequirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.6/dist-packages (from wikipedia) (4.6.3)\nRequirement already satisfied: requests<3.0.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from wikipedia) (2.23.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (2020.6.20)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (2.10)\nBuilding wheels for collected packages: wikipedia\n Building wheel for wikipedia (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for wikipedia: filename=wikipedia-1.4.0-cp36-none-any.whl size=11686 sha256=6d5f7fc6b4f0f0e4c31c37d8d160860598453ba077438b8e07d1e5f5e039efe8\n Stored in directory: /root/.cache/pip/wheels/87/2a/18/4e471fd96d12114d16fe4a446d00c3b38fb9efcb744bd31f4a\nSuccessfully built wikipedia\nInstalling collected packages: wikipedia\nSuccessfully installed wikipedia-1.4.0\n"
],
[
"import wikipedia",
"_____no_output_____"
]
],
[
[
"### Data Processing",
"_____no_output_____"
],
[
"Produce the entire page of Armen Ra",
"_____no_output_____"
]
],
[
[
"#search wikipedia for Armen Ra\nprint(wikipedia.search('Armen Ra'))",
"['Armen Ra', 'Ra (disambiguation)', 'Armen (name)', 'List of American musicians of Armenian descent', 'Party Monster (film)', 'Tammie Brown', 'Miss Fame', 'Blue Madonna', 'Armen Hambardzumyan', '2003 in film']\n"
],
[
"#output the summary for Armen Ra\nprint(wikipedia.summary(\"Armen Ra\"))\n",
"Armen Ra is a Persian-Armenian artist, self-taught thereminist, production designer, director, and performer.\n"
],
[
"#output the page for Armen Ra\nprint(wikipedia.page(\"Armen Ra\"))",
"<WikipediaPage 'Armen Ra'>\n"
],
[
"#output the page content for Armen Ra\nprint(wikipedia.page('Armen Ra').content)",
"Armen Ra is a Persian-Armenian artist, self-taught thereminist, production designer, director, and performer.\n\n\n== Musical career ==\n\n\n=== Career (2010-2013) ===\nRa began studying the theremin in 2001, debuting with the orchestral group Antony & the Johnsons in New York City.\nRa has played at the United Nations, Wiener Konzerthaus Mozartsaal Vienna, CBGBs, Knitting Factory, La MaMa E.T.C., Joe's Pub, Boulder Museum of Modern Art, Lincoln Center, The Gershwin Hotel, B.B. King Museum, and Dietch Projects.\nHe has performed and recorded with various bands and on many projects (including a collaboration with British recording artist Marc Almond \n(of Soft Cell), on the song \"My Madness & I\" from his 2010 release Varieté). His debut solo CD Plays the Theremin (released on Bowl & Fork Records in 2010) showcases many classical Armenian laments and folk songs. Ra performed on the Sharon Needles album PG-13 on band Ministry's cover track \"Everyday Is Halloween\".\n\n\n=== Career (2014-present) ===\nIn recent years, Ra has appeared in the following works:\n\nArmen plays on the Current 93 album entitled HoneySuckle Æons. He appears in multiple songs on this 2011 release.\nArmen plays on the debut album of Sharon Needles. The songs “Everyday Is Halloween” and “This Club Is a Haunted House” were released in 2013.\nIn 2014, he played the theremin for Voltaire's album Raised by Bats.\nIn 2015, he released Theremin Classique, a collection of European arias.\nArmen's recording of “Dle Yaman” was used for the video of designer Michael Schmidt’s 3-D gown in 2015.\nHe was featured on Selena Gomez’s Revival on the track “Me and My Girls” in 2016\nHe was featured on Gwen Stefani’s album on the track “Naughty” in 2016.\nHe was featured on track 11 \"Supernatural\" of BØRNS' Blue Madonna\n\n\n== Appearances in Media ==\nHe has a cameo appearance in the film Party Monster. Other appearances in media include:\n\nCameo as a desk clerk in Tomorrow Always Comes in 2006.\nMusic for the short film Connect in 2010.\nGuest judge on the Logo Network show The Arrangement.\nOpener for Nick Cave & The Bad Seeds's Grinderman on their 2010 tour.\nProduction designer for the 2012 horror movie Excision.\nRelease of the documentary When My Sorrow Died: The Legend of Armen Ra & the Theremin.\nMusic for the movie Hara Kiri in 2016.\nPromotional videos for electropop artist BØRNS, entitled \"The Search for the Lost Sounds\" and \"The Faded Heart Sessions\".\n\n\n== References ==\n\n\n== External links ==\nOfficial Website\nArmen Ra at a benefit concert on April 4 at the Angel Orensanz Center in New York\n"
],
[
"#output the url for Armen Ra's Wikipedia page\nprint(wikipedia.page('Armen Ra').url)",
"https://en.wikipedia.org/wiki/Armen_Ra\n"
],
[
"ra_df = pd.read_html('https://en.wikipedia.org/wiki/Armen_Ra')",
"_____no_output_____"
],
[
"type(ra_df)",
"_____no_output_____"
],
[
"page = wikipedia.page('Armen Ra')",
"_____no_output_____"
],
[
"page.summary",
"_____no_output_____"
],
[
"page.content",
"_____no_output_____"
],
[
"type(page.content)",
"_____no_output_____"
],
[
"wiki1 = TextBlob(page.content)\nwiki1.tags",
"_____no_output_____"
],
[
"wiki1.noun_phrases",
"_____no_output_____"
]
],
[
[
"##### Sentiment Analysis",
"_____no_output_____"
],
[
"Produce the sentiment for Armen Ra's page.",
"_____no_output_____"
]
],
[
[
"testimonial = TextBlob(page.content)",
"_____no_output_____"
],
[
"testimonial.sentiment",
"_____no_output_____"
]
],
[
[
"Sentiment analysis shows a primarily neutral and objective tone throughout the page. ",
"_____no_output_____"
]
],
[
[
"zen = TextBlob(page.content)",
"_____no_output_____"
]
],
[
[
"Process Armen Ra's page into words and sentences to determine how the sentiment changes throughout the page.",
"_____no_output_____"
]
],
[
[
"zen.words",
"_____no_output_____"
],
[
"zen.sentences",
"_____no_output_____"
]
],
[
[
"Determine any changes in sentiment throughout the page.",
"_____no_output_____"
]
],
[
[
"for sentence in zen.sentences:\n print(sentence.sentiment[0])",
"0.0\n0.06818181818181818\n0.05\n0.0\n0.15\n0.25\n-0.2\n0.0\n0.0\n0.0\n-0.2\n0.0\n0.0\n0.0\n-0.15000000000000002\n0.08333333333333333\n-0.125\n0.0\n0.0\n-0.6999999999999998\n0.0\n0.0\n0.0\n0.0\n0.012121212121212116\n"
]
],
[
[
"Estimate 6 or 7 authors contributed to the Wikipedia article based on changes in the sentiment analysis.",
"_____no_output_____"
],
[
"Output a summary of the Armen Ra page",
"_____no_output_____"
]
],
[
[
"page.summary",
"_____no_output_____"
],
[
"sentence = TextBlob(page.content)\nsentence.words",
"_____no_output_____"
],
[
"sentence.words[2].singularize()",
"_____no_output_____"
],
[
"sentence.words[2].pluralize()",
"_____no_output_____"
],
[
"b = TextBlob(page.content)\nprint(b.correct())",
"Men A is a Persian-Armenian artist, self-taught thereminist, production designer, director, and performer.\n\n\n== Musical career ==\n\n\n=== Career (2010-2013) ===\nA began studying the therein in 2001, refuting with the orchestra group Anthony & the Johnson in New Work City.\nA has played at the United Nations, Dinner Konzerthaus Mozartsaal Vienna, CBGBs, Knitting Factory, A papa E.T.C., Toe's Sub, Shoulder Museum of Modern Art, Lincoln Enter, The Gershwin Hotel, B.B. King Museum, and Fetch Projects.\nHe has performed and recorded with various bands and on many projects (including a collaboration with British recording artist Arc Almond \n(of Soft Well), on the song \"By Sadness & I\" from his 2010 release Variety). His debut solo of Plays the Therein (released on Bowl & Work Records in 2010) showcases many classical Armenian laments and folk songs. A performed on the Charon Needles album of-13 on band Ministry's cover track \"Everyday Is Halloween\".\n\n\n=== Career (2014-present) ===\nIn recent years, A has appeared in the following works:\n\nMen plays on the Current 93 album entitled HoneySuckle Sons. He appears in multiple songs on this 2011 release.\nMen plays on the debut album of Charon Needles. The songs “Everyday Is Halloween” and “His Club Is a Taunted House” were released in 2013.\nIn 2014, he played the therein for Voltaire's album Raised by Oats.\nIn 2015, he released Therein Classique, a collection of European areas.\nMen's recording of “Le Maman” was used for the video of designer Michael Schmidt’s 3-D gown in 2015.\nHe was features on Helena Gomez’s Revival on the track “He and By Girls” in 2016\nHe was features on Wen Tetani’s album on the track “Naughty” in 2016.\nHe was features on track 11 \"Supernatural\" of BØRNS' Blue Madonna\n\n\n== Appearances in Media ==\nHe has a cameo appearance in the film Party Monster. Other appearances in media include:\n\nCameo as a desk clerk in Tomorrow Always Comes in 2006.\nMusic for the short film Connect in 2010.\nGuest judge on the Go Network show The Arrangement.\nOpened for Sick Have & The Had Needs's Grinderman on their 2010 tour.\nProduction designer for the 2012 horror movie Excision.\nRelease of the documentary When By Sorrow Died: The Legend of Men A & the Therein.\nMusic for the movie Vara Fire in 2016.\nPromotional video for electropop artist BØRNS, entitled \"The Search for the Most Wounds\" and \"The Faded Heart Sessions\".\n\n\n== References ==\n\n\n== External links ==\nOfficial Website\nMen A at a benefit concert on April 4 at the Angel Orensanz Enter in New Work\n"
]
],
[
[
"Consider algorithmic bias and errors in the natural language processing tools as Armen Ra's name is being shortened to 'Men A' or 'A'.",
"_____no_output_____"
]
],
[
[
"blob = TextBlob(page.content)\nblob.ngrams(n=3)",
"_____no_output_____"
],
[
"#The sentiment of Armen Ra's page is in a informational, neutral tone\ntestimonial = TextBlob(page.content)\ntestimonial.sentiment",
"_____no_output_____"
]
],
[
[
"### Communication of Results",
"_____no_output_____"
],
[
"Ultimately, the sentiment analysis for Armen Ra's page shows the tone is primarily informational, objective, and neutral. When using Nearest Neighbors or Model Most Similar to identify Wikipedia pages similar to Armen Ra's, there were different results presented based on the method was used. Nearest Neighbors presented pages of individuals that had similarly neutral tones, while Most Similar showed individuals in similar industries as Armen Ra. The natural language processing tools at times output errors in Armen Ra's name and typos throughout the content. Consider further analysis into algorithmic bias present within the natural language processing tools and alternative data analysis and visualization methods available.",
"_____no_output_____"
],
[
"## Live Coding",
"_____no_output_____"
],
[
"In addition to presenting our slides to each other, at the end of the presentation each analyst will demonstrate their code using a famous person randomly selected from the database.",
"_____no_output_____"
]
],
[
[
"Roddy = people_df[people_df['name'].str.contains('Roddy Piper')]",
"_____no_output_____"
],
[
"Roddy",
"_____no_output_____"
],
[
"wikipedia.search('Roddy Piper')\n\n",
"_____no_output_____"
],
[
"wikipedia.summary('Roddy Piper')\n",
"_____no_output_____"
],
[
"wikipedia.page('Roddy Piper')\n",
"_____no_output_____"
],
[
"wikipedia.page('Roddy Piper').url",
"_____no_output_____"
],
[
"famous_page = wikipedia.page('Roddy Piper')",
"_____no_output_____"
],
[
"famous_page.summary",
"_____no_output_____"
],
[
"testimonial = TextBlob(famous_page.content)",
"_____no_output_____"
],
[
"testimonial.sentiment",
"_____no_output_____"
]
],
[
[
"Nearest Neighbors",
"_____no_output_____"
]
],
[
[
"people_df1 = [people_df.iloc[32819]['text']]\npeople_df1",
"_____no_output_____"
],
[
"nn = NearestNeighbors(metric='euclidean')\nnn.fit(word_weight)",
"_____no_output_____"
],
[
"roddy_index = people_df[people_df['name'] == 'Roddy Piper'].index[0]\nroddy_index",
"_____no_output_____"
],
[
"distances, indices = nn.kneighbors(word_weight[roddy_index], n_neighbors=11)",
"_____no_output_____"
],
[
"distances",
"_____no_output_____"
],
[
"indices",
"_____no_output_____"
],
[
"people_df.iloc[indices[0],:]",
"_____no_output_____"
],
[
"people_df.iloc[2037]['text']",
"_____no_output_____"
],
[
"people_df.iloc[18432]['text']",
"_____no_output_____"
],
[
"people_df.iloc[21038]['text']",
"_____no_output_____"
],
[
"people_df.iloc[35633]['text']",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a98eb228f46b1557c0c9a52f84e7a080ebe9337
| 55,964 |
ipynb
|
Jupyter Notebook
|
Notebooks/trainings/poolings/Train sup-avg_first_last.ipynb
|
gokg/SimCSE
|
eeb7347f6b59d7b2e32ca75ff5c5aae02890f666
|
[
"MIT"
] | null | null | null |
Notebooks/trainings/poolings/Train sup-avg_first_last.ipynb
|
gokg/SimCSE
|
eeb7347f6b59d7b2e32ca75ff5c5aae02890f666
|
[
"MIT"
] | null | null | null |
Notebooks/trainings/poolings/Train sup-avg_first_last.ipynb
|
gokg/SimCSE
|
eeb7347f6b59d7b2e32ca75ff5c5aae02890f666
|
[
"MIT"
] | null | null | null | 55,964 | 55,964 | 0.743299 |
[
[
[
"from google.colab import drive\ndrive.flush_and_unmount()\ndrive.mount('/content/gdrive/')",
"Mounted at /content/gdrive/\n"
],
[
"!pip uninstall folium\n!pip install torch==1.7.1+cu110 -f https://download.pytorch.org/whl/torch_stable.html\n!pip install -r /content/gdrive/MyDrive/SimCSE/requirements.txt",
"\u001b[33mWARNING: Skipping folium as it is not installed.\u001b[0m\nLooking in links: https://download.pytorch.org/whl/torch_stable.html\nRequirement already satisfied: torch==1.7.1+cu110 in /usr/local/lib/python3.7/dist-packages (1.7.1+cu110)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch==1.7.1+cu110) (3.10.0.2)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torch==1.7.1+cu110) (1.19.5)\nRequirement already satisfied: transformers==4.2.1 in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (4.2.1)\nRequirement already satisfied: scipy==1.5.4 in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 2)) (1.5.4)\nRequirement already satisfied: datasets==1.2.1 in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 3)) (1.2.1)\nRequirement already satisfied: pandas==1.1.5 in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 4)) (1.1.5)\nRequirement already satisfied: scikit-learn==0.24.0 in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 5)) (0.24.0)\nRequirement already satisfied: prettytable==2.1.0 in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 6)) (2.1.0)\nRequirement already satisfied: gradio in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (2.7.5.1)\nRequirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 8)) (1.7.1+cu110)\nRequirement already satisfied: setuptools==49.3.0 in /usr/local/lib/python3.7/dist-packages (from -r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 9)) (49.3.0)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (21.3)\nRequirement already satisfied: tokenizers==0.9.4 in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (0.9.4)\nRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (4.49.0)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (2.23.0)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (4.10.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (3.4.2)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (1.19.5)\nRequirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (0.0.47)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (2019.12.20)\nRequirement already satisfied: pyarrow>=0.17.1 in /usr/local/lib/python3.7/dist-packages (from datasets==1.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 3)) (3.0.0)\nRequirement already satisfied: xxhash in /usr/local/lib/python3.7/dist-packages (from datasets==1.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 3)) (2.0.2)\nRequirement already satisfied: multiprocess in /usr/local/lib/python3.7/dist-packages (from datasets==1.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 3)) (0.70.12.2)\nRequirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from datasets==1.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 3)) (0.3.4)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas==1.1.5->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 4)) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas==1.1.5->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 4)) (2.8.2)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.24.0->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 5)) (3.0.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.24.0->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 5)) (1.1.0)\nRequirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prettytable==2.1.0->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 6)) (0.2.5)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas==1.1.5->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 4)) (1.15.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (2021.10.8)\nRequirement already satisfied: pydub in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.25.1)\nRequirement already satisfied: markdown2 in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (2.4.2)\nRequirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (7.1.2)\nRequirement already satisfied: analytics-python in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.4.0)\nRequirement already satisfied: aiohttp in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (3.8.1)\nRequirement already satisfied: python-multipart in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.0.5)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (3.2.2)\nRequirement already satisfied: fastapi in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.72.0)\nRequirement already satisfied: uvicorn in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.17.0)\nRequirement already satisfied: paramiko in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (2.9.2)\nRequirement already satisfied: ffmpy in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.3.0)\nRequirement already satisfied: pycryptodome in /usr/local/lib/python3.7/dist-packages (from gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (3.12.0)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 8)) (3.10.0.2)\nRequirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.3.0)\nRequirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (4.0.2)\nRequirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.7.2)\nRequirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.2.0)\nRequirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (6.0.1)\nRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (21.4.0)\nRequirement already satisfied: charset-normalizer<3.0,>=2.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (2.0.10)\nRequirement already satisfied: asynctest==0.13.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.13.0)\nRequirement already satisfied: backoff==1.10.0 in /usr/local/lib/python3.7/dist-packages (from analytics-python->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.10.0)\nRequirement already satisfied: monotonic>=1.5 in /usr/local/lib/python3.7/dist-packages (from analytics-python->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.6)\nRequirement already satisfied: starlette==0.17.1 in /usr/local/lib/python3.7/dist-packages (from fastapi->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.17.1)\nRequirement already satisfied: pydantic!=1.7,!=1.7.1,!=1.7.2,!=1.7.3,!=1.8,!=1.8.1,<2.0.0,>=1.6.2 in /usr/local/lib/python3.7/dist-packages (from fastapi->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.9.0)\nRequirement already satisfied: anyio<4,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from starlette==0.17.1->fastapi->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (3.5.0)\nRequirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.7/dist-packages (from anyio<4,>=3.0.0->starlette==0.17.1->fastapi->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.2.0)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (3.7.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.3.2)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (3.0.6)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.11.0)\nRequirement already satisfied: pynacl>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from paramiko->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.5.0)\nRequirement already satisfied: cryptography>=2.5 in /usr/local/lib/python3.7/dist-packages (from paramiko->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (36.0.1)\nRequirement already satisfied: bcrypt>=3.1.3 in /usr/local/lib/python3.7/dist-packages (from paramiko->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (3.2.0)\nRequirement already satisfied: cffi>=1.1 in /usr/local/lib/python3.7/dist-packages (from bcrypt>=3.1.3->paramiko->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (1.15.0)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.1->bcrypt>=3.1.3->paramiko->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (2.21)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==4.2.1->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 1)) (7.1.2)\nRequirement already satisfied: asgiref>=3.4.0 in /usr/local/lib/python3.7/dist-packages (from uvicorn->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (3.5.0)\nRequirement already satisfied: h11>=0.8 in /usr/local/lib/python3.7/dist-packages (from uvicorn->gradio->-r /content/gdrive/MyDrive/SimCSE/requirements.txt (line 7)) (0.13.0)\n"
],
[
"!pip install simcse",
"Requirement already satisfied: simcse in /usr/local/lib/python3.7/dist-packages (0.4)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from simcse) (0.24.0)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from simcse) (4.49.0)\nRequirement already satisfied: scipy<1.6,>=1.5.4 in /usr/local/lib/python3.7/dist-packages (from simcse) (1.5.4)\nRequirement already satisfied: numpy<1.20,>=1.19.5 in /usr/local/lib/python3.7/dist-packages (from simcse) (1.19.5)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from simcse) (49.3.0)\nRequirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from simcse) (1.7.1+cu110)\nRequirement already satisfied: transformers in /usr/local/lib/python3.7/dist-packages (from simcse) (4.2.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->simcse) (1.1.0)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->simcse) (3.0.0)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch->simcse) (3.10.0.2)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers->simcse) (3.4.2)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers->simcse) (21.3)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers->simcse) (4.10.0)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers->simcse) (2019.12.20)\nRequirement already satisfied: tokenizers==0.9.4 in /usr/local/lib/python3.7/dist-packages (from transformers->simcse) (0.9.4)\nRequirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers->simcse) (0.0.47)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers->simcse) (2.23.0)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers->simcse) (3.7.0)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers->simcse) (3.0.6)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers->simcse) (2021.10.8)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers->simcse) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers->simcse) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers->simcse) (3.0.4)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers->simcse) (7.1.2)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers->simcse) (1.15.0)\n"
],
[
"import os\n#os.environ['TRANSFORMERS_CACHE'] = '/content/gdrive/MyDrive/SimCSE/transformers'\nfrom simcse import SimCSE",
"_____no_output_____"
],
[
"import sys\nPATH_TO_SENTEVAL = '/content/gdrive/MyDrive/SimCSE/SentEval'\nsys.path.insert(0, PATH_TO_SENTEVAL)\nimport senteval",
"_____no_output_____"
],
[
"\n!python /content/gdrive/MyDrive/SimCSE/train.py --model_name_or_path bert-base-uncased --train_file /content/gdrive/MyDrive/SimCSE/data/nli_for_simcse.csv --output_dir /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased --num_train_epochs 3 --per_device_train_batch_size 128 --learning_rate 5e-5 --max_seq_length 32 --evaluation_strategy steps --metric_for_best_model stsb_spearman --load_best_model_at_end --eval_steps 125 --pooler_type avg_first_last --overwrite_output_dir --temp 0.05 --do_train --do_eval --fp16 \"$@\"",
"01/23/2022 14:57:31 - INFO - __main__ - PyTorch: setting up devices\n01/23/2022 14:57:32 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1 distributed training: False, 16-bits training: True\n01/23/2022 14:57:32 - INFO - __main__ - Training/evaluation parameters OurTrainingArguments(output_dir='/content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased', overwrite_output_dir=True, do_train=True, do_eval=True, do_predict=False, evaluation_strategy=<EvaluationStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=128, per_device_eval_batch_size=8, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=1, eval_accumulation_steps=None, learning_rate=5e-05, weight_decay=0.0, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=3.0, max_steps=-1, lr_scheduler_type=<SchedulerType.LINEAR: 'linear'>, warmup_steps=0, logging_dir='runs/Jan23_14-57-31_2c304b25680d', logging_first_step=False, logging_steps=500, save_steps=500, save_total_limit=None, no_cuda=False, seed=42, fp16=True, fp16_opt_level='O1', fp16_backend='auto', local_rank=-1, tpu_num_cores=None, tpu_metrics_debug=False, debug=False, dataloader_drop_last=False, eval_steps=125, dataloader_num_workers=0, past_index=-1, run_name='/content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased', disable_tqdm=False, remove_unused_columns=True, label_names=None, load_best_model_at_end=True, metric_for_best_model='stsb_spearman', greater_is_better=True, ignore_data_skip=False, sharded_ddp=False, deepspeed=None, label_smoothing_factor=0.0, adafactor=False, eval_transfer=False)\nDownloading: 5.33kB [00:00, 5.86MB/s] \nUsing custom data configuration default\nReusing dataset csv (/content/gdrive/MyDrive/SimCSE/data/csv/default-6b8e4f1a08e7f75f/0.0.0/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2)\n[INFO|file_utils.py:1272] 2022-01-23 14:57:38,855 >> https://huggingface.co/bert-base-uncased/resolve/main/config.json not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmpddaib1r8\nDownloading: 100% 570/570 [00:00<00:00, 566kB/s]\n[INFO|file_utils.py:1276] 2022-01-23 14:57:39,153 >> storing https://huggingface.co/bert-base-uncased/resolve/main/config.json in cache at /root/.cache/huggingface/transformers/3c61d016573b14f7f008c02c4e51a366c67ab274726fe2910691e2a761acf43e.37395cee442ab11005bcd270f3c34464dc1704b715b5d7d52b1a461abe3b9e4e\n[INFO|file_utils.py:1279] 2022-01-23 14:57:39,153 >> creating metadata file for /root/.cache/huggingface/transformers/3c61d016573b14f7f008c02c4e51a366c67ab274726fe2910691e2a761acf43e.37395cee442ab11005bcd270f3c34464dc1704b715b5d7d52b1a461abe3b9e4e\n[INFO|configuration_utils.py:445] 2022-01-23 14:57:39,153 >> loading configuration file https://huggingface.co/bert-base-uncased/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/3c61d016573b14f7f008c02c4e51a366c67ab274726fe2910691e2a761acf43e.37395cee442ab11005bcd270f3c34464dc1704b715b5d7d52b1a461abe3b9e4e\n[INFO|configuration_utils.py:481] 2022-01-23 14:57:39,154 >> Model config BertConfig {\n \"architectures\": [\n \"BertForMaskedLM\"\n ],\n \"attention_probs_dropout_prob\": 0.1,\n \"gradient_checkpointing\": false,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 768,\n \"initializer_range\": 0.02,\n \"intermediate_size\": 3072,\n \"layer_norm_eps\": 1e-12,\n \"max_position_embeddings\": 512,\n \"model_type\": \"bert\",\n \"num_attention_heads\": 12,\n \"num_hidden_layers\": 12,\n \"pad_token_id\": 0,\n \"position_embedding_type\": \"absolute\",\n \"transformers_version\": \"4.2.1\",\n \"type_vocab_size\": 2,\n \"use_cache\": true,\n \"vocab_size\": 30522\n}\n\n[INFO|configuration_utils.py:445] 2022-01-23 14:57:39,426 >> loading configuration file https://huggingface.co/bert-base-uncased/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/3c61d016573b14f7f008c02c4e51a366c67ab274726fe2910691e2a761acf43e.37395cee442ab11005bcd270f3c34464dc1704b715b5d7d52b1a461abe3b9e4e\n[INFO|configuration_utils.py:481] 2022-01-23 14:57:39,427 >> Model config BertConfig {\n \"architectures\": [\n \"BertForMaskedLM\"\n ],\n \"attention_probs_dropout_prob\": 0.1,\n \"gradient_checkpointing\": false,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 768,\n \"initializer_range\": 0.02,\n \"intermediate_size\": 3072,\n \"layer_norm_eps\": 1e-12,\n \"max_position_embeddings\": 512,\n \"model_type\": \"bert\",\n \"num_attention_heads\": 12,\n \"num_hidden_layers\": 12,\n \"pad_token_id\": 0,\n \"position_embedding_type\": \"absolute\",\n \"transformers_version\": \"4.2.1\",\n \"type_vocab_size\": 2,\n \"use_cache\": true,\n \"vocab_size\": 30522\n}\n\n[INFO|file_utils.py:1272] 2022-01-23 14:57:39,706 >> https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmp38ar747_\nDownloading: 100% 232k/232k [00:00<00:00, 925kB/s]\n[INFO|file_utils.py:1276] 2022-01-23 14:57:40,229 >> storing https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt in cache at /root/.cache/huggingface/transformers/45c3f7a79a80e1cf0a489e5c62b43f173c15db47864303a55d623bb3c96f72a5.d789d64ebfe299b0e416afc4a169632f903f693095b4629a7ea271d5a0cf2c99\n[INFO|file_utils.py:1279] 2022-01-23 14:57:40,229 >> creating metadata file for /root/.cache/huggingface/transformers/45c3f7a79a80e1cf0a489e5c62b43f173c15db47864303a55d623bb3c96f72a5.d789d64ebfe299b0e416afc4a169632f903f693095b4629a7ea271d5a0cf2c99\n[INFO|file_utils.py:1272] 2022-01-23 14:57:40,503 >> https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmp52tyediw\nDownloading: 100% 466k/466k [00:00<00:00, 1.47MB/s]\n[INFO|file_utils.py:1276] 2022-01-23 14:57:41,099 >> storing https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json in cache at /root/.cache/huggingface/transformers/534479488c54aeaf9c3406f647aa2ec13648c06771ffe269edabebd4c412da1d.7f2721073f19841be16f41b0a70b600ca6b880c8f3df6f3535cbc704371bdfa4\n[INFO|file_utils.py:1279] 2022-01-23 14:57:41,099 >> creating metadata file for /root/.cache/huggingface/transformers/534479488c54aeaf9c3406f647aa2ec13648c06771ffe269edabebd4c412da1d.7f2721073f19841be16f41b0a70b600ca6b880c8f3df6f3535cbc704371bdfa4\n[INFO|tokenization_utils_base.py:1766] 2022-01-23 14:57:41,100 >> loading file https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt from cache at /root/.cache/huggingface/transformers/45c3f7a79a80e1cf0a489e5c62b43f173c15db47864303a55d623bb3c96f72a5.d789d64ebfe299b0e416afc4a169632f903f693095b4629a7ea271d5a0cf2c99\n[INFO|tokenization_utils_base.py:1766] 2022-01-23 14:57:41,100 >> loading file https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json from cache at /root/.cache/huggingface/transformers/534479488c54aeaf9c3406f647aa2ec13648c06771ffe269edabebd4c412da1d.7f2721073f19841be16f41b0a70b600ca6b880c8f3df6f3535cbc704371bdfa4\n[INFO|file_utils.py:1272] 2022-01-23 14:57:41,395 >> https://huggingface.co/bert-base-uncased/resolve/main/pytorch_model.bin not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmps95de7xy\nDownloading: 100% 440M/440M [00:08<00:00, 55.0MB/s]\n[INFO|file_utils.py:1276] 2022-01-23 14:57:49,523 >> storing https://huggingface.co/bert-base-uncased/resolve/main/pytorch_model.bin in cache at /root/.cache/huggingface/transformers/a8041bf617d7f94ea26d15e218abd04afc2004805632abc0ed2066aa16d50d04.faf6ea826ae9c5867d12b22257f9877e6b8367890837bd60f7c54a29633f7f2f\n[INFO|file_utils.py:1279] 2022-01-23 14:57:49,523 >> creating metadata file for /root/.cache/huggingface/transformers/a8041bf617d7f94ea26d15e218abd04afc2004805632abc0ed2066aa16d50d04.faf6ea826ae9c5867d12b22257f9877e6b8367890837bd60f7c54a29633f7f2f\n[INFO|modeling_utils.py:1027] 2022-01-23 14:57:49,524 >> loading weights file https://huggingface.co/bert-base-uncased/resolve/main/pytorch_model.bin from cache at /root/.cache/huggingface/transformers/a8041bf617d7f94ea26d15e218abd04afc2004805632abc0ed2066aa16d50d04.faf6ea826ae9c5867d12b22257f9877e6b8367890837bd60f7c54a29633f7f2f\n[WARNING|modeling_utils.py:1135] 2022-01-23 14:57:53,228 >> Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForCL: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'bert.pooler.dense.weight', 'bert.pooler.dense.bias']\n- This IS expected if you are initializing BertForCL from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing BertForCL from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n[INFO|modeling_utils.py:1152] 2022-01-23 14:57:53,228 >> All the weights of BertForCL were initialized from the model checkpoint at bert-base-uncased.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use BertForCL for predictions without further training.\nLoading cached processed dataset at /content/gdrive/MyDrive/SimCSE/data/csv/default-6b8e4f1a08e7f75f/0.0.0/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2/cache-b7264c1fe0561b33.arrow\n[INFO|trainer.py:442] 2022-01-23 14:58:06,251 >> The following columns in the training set don't have a corresponding argument in `BertForCL.forward` and have been ignored: .\n[INFO|trainer.py:358] 2022-01-23 14:58:06,251 >> Using amp fp16 backend\n01/23/2022 14:58:06 - INFO - simcse.trainers - ***** Running training *****\n01/23/2022 14:58:06 - INFO - simcse.trainers - Num examples = 275601\n01/23/2022 14:58:06 - INFO - simcse.trainers - Num Epochs = 3\n01/23/2022 14:58:06 - INFO - simcse.trainers - Instantaneous batch size per device = 128\n01/23/2022 14:58:06 - INFO - simcse.trainers - Total train batch size (w. parallel, distributed & accumulation) = 128\n01/23/2022 14:58:06 - INFO - simcse.trainers - Gradient Accumulation steps = 1\n01/23/2022 14:58:06 - INFO - simcse.trainers - Total optimization steps = 6462\n 0% 0/6462 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/torch/optim/lr_scheduler.py:136: UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\n \"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\", UserWarning)\n{'eval_stsb_spearman': 0.8407765865989767, 'eval_sickr_spearman': 0.8041623008539487, 'eval_avg_sts': 0.8224694437264627, 'epoch': 0.06}\n 2% 125/6462 [02:21<1:08:35, 1.54it/s][INFO|trainer.py:1344] 2022-01-23 15:00:27,519 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:00:27,763 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:00:35,324 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'eval_stsb_spearman': 0.8503395593821568, 'eval_sickr_spearman': 0.8046127365232352, 'eval_avg_sts': 0.827476147952696, 'epoch': 0.12}\n 4% 250/6462 [05:24<1:10:57, 1.46it/s][INFO|trainer.py:1344] 2022-01-23 15:03:30,378 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:03:30,387 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:03:32,468 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'eval_stsb_spearman': 0.8500629755616361, 'eval_sickr_spearman': 0.8189387810247754, 'eval_avg_sts': 0.8345008782932057, 'epoch': 0.17}\n{'loss': 0.6379, 'learning_rate': 4.6131228721757973e-05, 'epoch': 0.23}\n{'eval_stsb_spearman': 0.8521491549354193, 'eval_sickr_spearman': 0.8137970035847238, 'eval_avg_sts': 0.8329730792600716, 'epoch': 0.23}\n 8% 500/6462 [10:27<1:08:18, 1.45it/s][INFO|trainer.py:1344] 2022-01-23 15:08:33,923 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:08:33,932 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:08:35,919 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'eval_stsb_spearman': 0.8539158429857994, 'eval_sickr_spearman': 0.8212754941097397, 'eval_avg_sts': 0.8375956685477695, 'epoch': 0.29}\n 10% 625/6462 [13:03<1:07:17, 1.45it/s][INFO|trainer.py:1344] 2022-01-23 15:11:10,080 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:11:10,087 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:11:12,283 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'eval_stsb_spearman': 0.8559717561956718, 'eval_sickr_spearman': 0.8009665034886624, 'eval_avg_sts': 0.8284691298421671, 'epoch': 0.35}\n 12% 750/6462 [15:39<1:08:05, 1.40it/s][INFO|trainer.py:1344] 2022-01-23 15:13:45,691 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:13:45,700 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:13:47,756 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'eval_stsb_spearman': 0.8562306203819061, 'eval_sickr_spearman': 0.8125436319926964, 'eval_avg_sts': 0.8343871261873013, 'epoch': 0.41}\n 14% 875/6462 [18:15<1:04:31, 1.44it/s][INFO|trainer.py:1344] 2022-01-23 15:16:21,514 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:16:21,524 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:16:23,715 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'loss': 0.4891, 'learning_rate': 4.226245744351594e-05, 'epoch': 0.46}\n{'eval_stsb_spearman': 0.8558544546422174, 'eval_sickr_spearman': 0.8150382233080876, 'eval_avg_sts': 0.8354463389751525, 'epoch': 0.46}\n{'eval_stsb_spearman': 0.8552639613526745, 'eval_sickr_spearman': 0.8175042361181232, 'eval_avg_sts': 0.8363840987353988, 'epoch': 0.52}\n{'eval_stsb_spearman': 0.8514021703019382, 'eval_sickr_spearman': 0.8140010397036331, 'eval_avg_sts': 0.8327016050027857, 'epoch': 0.58}\n{'eval_stsb_spearman': 0.8565789159956246, 'eval_sickr_spearman': 0.7967110919945498, 'eval_avg_sts': 0.8266450039950872, 'epoch': 0.64}\n 21% 1375/6462 [28:14<58:33, 1.45it/s][INFO|trainer.py:1344] 2022-01-23 15:26:20,982 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:26:20,989 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:26:23,033 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'loss': 0.4645, 'learning_rate': 3.8393686165273915e-05, 'epoch': 0.7}\n{'eval_stsb_spearman': 0.8613869104126018, 'eval_sickr_spearman': 0.8167996678911277, 'eval_avg_sts': 0.8390932891518648, 'epoch': 0.7}\n 23% 1500/6462 [30:49<57:03, 1.45it/s][INFO|trainer.py:1344] 2022-01-23 15:28:55,792 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:28:55,799 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:28:57,792 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'eval_stsb_spearman': 0.8613821096975495, 'eval_sickr_spearman': 0.8136843226207502, 'eval_avg_sts': 0.8375332161591498, 'epoch': 0.75}\n{'eval_stsb_spearman': 0.8580021109266994, 'eval_sickr_spearman': 0.803990877852916, 'eval_avg_sts': 0.8309964943898077, 'epoch': 0.81}\n{'eval_stsb_spearman': 0.8600916283810753, 'eval_sickr_spearman': 0.8179943935082992, 'eval_avg_sts': 0.8390430109446873, 'epoch': 0.87}\n{'loss': 0.4434, 'learning_rate': 3.452491488703188e-05, 'epoch': 0.93}\n{'eval_stsb_spearman': 0.8621104909824838, 'eval_sickr_spearman': 0.8176073588929109, 'eval_avg_sts': 0.8398589249376973, 'epoch': 0.93}\n 31% 2000/6462 [40:48<51:26, 1.45it/s][INFO|trainer.py:1344] 2022-01-23 15:38:55,023 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 15:38:55,030 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 15:38:57,035 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n{'eval_stsb_spearman': 0.8578035279224304, 'eval_sickr_spearman': 0.8166384274835998, 'eval_avg_sts': 0.8372209777030151, 'epoch': 0.99}\n{'eval_stsb_spearman': 0.854882809844425, 'eval_sickr_spearman': 0.8230142199817991, 'eval_avg_sts': 0.8389485149131121, 'epoch': 1.04}\n{'eval_stsb_spearman': 0.8554300966984953, 'eval_sickr_spearman': 0.8143609367467087, 'eval_avg_sts': 0.834895516722602, 'epoch': 1.1}\n{'loss': 0.3259, 'learning_rate': 3.065614360878985e-05, 'epoch': 1.16}\n{'eval_stsb_spearman': 0.8546460994568771, 'eval_sickr_spearman': 0.8117820508482937, 'eval_avg_sts': 0.8332140751525854, 'epoch': 1.16}\n{'eval_stsb_spearman': 0.8544010301529412, 'eval_sickr_spearman': 0.8127830670333654, 'eval_avg_sts': 0.8335920485931534, 'epoch': 1.22}\n{'eval_stsb_spearman': 0.8509708408227785, 'eval_sickr_spearman': 0.7982505368267429, 'eval_avg_sts': 0.8246106888247606, 'epoch': 1.28}\n{'eval_stsb_spearman': 0.8549122412296136, 'eval_sickr_spearman': 0.8015967676017333, 'eval_avg_sts': 0.8282545044156735, 'epoch': 1.33}\n{'loss': 0.2935, 'learning_rate': 2.678737233054782e-05, 'epoch': 1.39}\n{'eval_stsb_spearman': 0.8558388692222231, 'eval_sickr_spearman': 0.8068334544982264, 'eval_avg_sts': 0.8313361618602247, 'epoch': 1.39}\n{'eval_stsb_spearman': 0.8549321105901477, 'eval_sickr_spearman': 0.8094366441339691, 'eval_avg_sts': 0.8321843773620584, 'epoch': 1.45}\n{'eval_stsb_spearman': 0.854370410914756, 'eval_sickr_spearman': 0.8124125070857704, 'eval_avg_sts': 0.8333914590002631, 'epoch': 1.51}\n{'eval_stsb_spearman': 0.8577771321239525, 'eval_sickr_spearman': 0.8160808344270049, 'eval_avg_sts': 0.8369289832754787, 'epoch': 1.57}\n{'loss': 0.283, 'learning_rate': 2.291860105230579e-05, 'epoch': 1.62}\n{'eval_stsb_spearman': 0.8556489580593036, 'eval_sickr_spearman': 0.8070280284901522, 'eval_avg_sts': 0.8313384932747279, 'epoch': 1.62}\n{'eval_stsb_spearman': 0.856015782155543, 'eval_sickr_spearman': 0.811115859471347, 'eval_avg_sts': 0.833565820813445, 'epoch': 1.68}\n{'eval_stsb_spearman': 0.8565665138518453, 'eval_sickr_spearman': 0.8109027454742662, 'eval_avg_sts': 0.8337346296630558, 'epoch': 1.74}\n{'eval_stsb_spearman': 0.8523715394303285, 'eval_sickr_spearman': 0.8069375859261442, 'eval_avg_sts': 0.8296545626782363, 'epoch': 1.8}\n{'loss': 0.2808, 'learning_rate': 1.9049829774063757e-05, 'epoch': 1.86}\n{'eval_stsb_spearman': 0.852529843809898, 'eval_sickr_spearman': 0.80151617141352, 'eval_avg_sts': 0.8270230076117091, 'epoch': 1.86}\n{'eval_stsb_spearman': 0.8556679143489765, 'eval_sickr_spearman': 0.8100993292405109, 'eval_avg_sts': 0.8328836217947437, 'epoch': 1.92}\n{'eval_stsb_spearman': 0.8544446992221848, 'eval_sickr_spearman': 0.8160187782439468, 'eval_avg_sts': 0.8352317387330658, 'epoch': 1.97}\n{'eval_stsb_spearman': 0.8544106946201457, 'eval_sickr_spearman': 0.8142910514941164, 'eval_avg_sts': 0.8343508730571311, 'epoch': 2.03}\n{'loss': 0.2471, 'learning_rate': 1.518105849582173e-05, 'epoch': 2.09}\n{'eval_stsb_spearman': 0.8532692393372991, 'eval_sickr_spearman': 0.8090788604593567, 'eval_avg_sts': 0.831174049898328, 'epoch': 2.09}\n{'eval_stsb_spearman': 0.8537559140499501, 'eval_sickr_spearman': 0.812663085341973, 'eval_avg_sts': 0.8332094996959616, 'epoch': 2.15}\n{'eval_stsb_spearman': 0.8528377977588015, 'eval_sickr_spearman': 0.811354526014393, 'eval_avg_sts': 0.8320961618865972, 'epoch': 2.21}\n{'eval_stsb_spearman': 0.8524389558469001, 'eval_sickr_spearman': 0.8103079283140566, 'eval_avg_sts': 0.8313734420804784, 'epoch': 2.26}\n{'loss': 0.2047, 'learning_rate': 1.1312287217579697e-05, 'epoch': 2.32}\n{'eval_stsb_spearman': 0.8523512618362816, 'eval_sickr_spearman': 0.8079922528515222, 'eval_avg_sts': 0.8301717573439019, 'epoch': 2.32}\n{'eval_stsb_spearman': 0.8533240489836159, 'eval_sickr_spearman': 0.8074667926017893, 'eval_avg_sts': 0.8303954207927027, 'epoch': 2.38}\n{'eval_stsb_spearman': 0.8525774879708493, 'eval_sickr_spearman': 0.8158298719219905, 'eval_avg_sts': 0.8342036799464199, 'epoch': 2.44}\n{'eval_stsb_spearman': 0.8557380613235626, 'eval_sickr_spearman': 0.8140288016802644, 'eval_avg_sts': 0.8348834315019135, 'epoch': 2.5}\n{'loss': 0.1995, 'learning_rate': 7.443515939337667e-06, 'epoch': 2.55}\n{'eval_stsb_spearman': 0.8524508455348102, 'eval_sickr_spearman': 0.810563741960316, 'eval_avg_sts': 0.8315072937475632, 'epoch': 2.55}\n{'eval_stsb_spearman': 0.8536681221744954, 'eval_sickr_spearman': 0.8106714757208415, 'eval_avg_sts': 0.8321697989476684, 'epoch': 2.61}\n{'eval_stsb_spearman': 0.8533979369348971, 'eval_sickr_spearman': 0.812888927580935, 'eval_avg_sts': 0.8331434322579161, 'epoch': 2.67}\n{'eval_stsb_spearman': 0.8530761271045578, 'eval_sickr_spearman': 0.8093014365834209, 'eval_avg_sts': 0.8311887818439894, 'epoch': 2.73}\n{'loss': 0.1991, 'learning_rate': 3.574744661095636e-06, 'epoch': 2.79}\n{'eval_stsb_spearman': 0.8532235791634804, 'eval_sickr_spearman': 0.8104624443673829, 'eval_avg_sts': 0.8318430117654316, 'epoch': 2.79}\n{'eval_stsb_spearman': 0.8540175245505425, 'eval_sickr_spearman': 0.8099605193573547, 'eval_avg_sts': 0.8319890219539485, 'epoch': 2.84}\n{'eval_stsb_spearman': 0.8536770321921293, 'eval_sickr_spearman': 0.8113687912515202, 'eval_avg_sts': 0.8325229117218247, 'epoch': 2.9}\n{'eval_stsb_spearman': 0.8532243425092022, 'eval_sickr_spearman': 0.810646835765804, 'eval_avg_sts': 0.8319355891375031, 'epoch': 2.96}\n100% 6462/6462 [2:08:13<00:00, 1.91it/s]01/23/2022 17:06:19 - INFO - simcse.trainers - \n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n\n01/23/2022 17:06:19 - INFO - simcse.trainers - Loading best model from /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased (score: 0.8621104909824838).\n[INFO|configuration_utils.py:443] 2022-01-23 17:06:19,353 >> loading configuration file /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|configuration_utils.py:481] 2022-01-23 17:06:19,354 >> Model config BertConfig {\n \"_name_or_path\": \"bert-base-uncased\",\n \"architectures\": [\n \"BertForCL\"\n ],\n \"attention_probs_dropout_prob\": 0.1,\n \"gradient_checkpointing\": false,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 768,\n \"initializer_range\": 0.02,\n \"intermediate_size\": 3072,\n \"layer_norm_eps\": 1e-12,\n \"max_position_embeddings\": 512,\n \"model_type\": \"bert\",\n \"num_attention_heads\": 12,\n \"num_hidden_layers\": 12,\n \"pad_token_id\": 0,\n \"position_embedding_type\": \"absolute\",\n \"transformers_version\": \"4.2.1\",\n \"type_vocab_size\": 2,\n \"use_cache\": true,\n \"vocab_size\": 30522\n}\n\n[INFO|modeling_utils.py:1025] 2022-01-23 17:06:19,355 >> loading weights file /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n[INFO|modeling_utils.py:1143] 2022-01-23 17:06:23,077 >> All model checkpoint weights were used when initializing BertForCL.\n\n[INFO|modeling_utils.py:1152] 2022-01-23 17:06:23,078 >> All the weights of BertForCL were initialized from the model checkpoint at /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use BertForCL for predictions without further training.\n{'train_runtime': 7697.0087, 'train_samples_per_second': 0.84, 'epoch': 3.0}\n100% 6462/6462 [2:08:16<00:00, 1.19s/it]\n[INFO|trainer.py:1344] 2022-01-23 17:06:23,268 >> Saving model checkpoint to /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased\n[INFO|configuration_utils.py:300] 2022-01-23 17:06:23,275 >> Configuration saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/config.json\n[INFO|modeling_utils.py:817] 2022-01-23 17:06:25,366 >> Model weights saved in /content/gdrive/MyDrive/SimCSE/trained/poolings/avg_first_last-sup-simcse-bert-base-uncased/pytorch_model.bin\n01/23/2022 17:06:25 - INFO - __main__ - ***** Train results *****\n01/23/2022 17:06:25 - INFO - __main__ - epoch = 3.0\n01/23/2022 17:06:25 - INFO - __main__ - train_runtime = 7697.0087\n01/23/2022 17:06:25 - INFO - __main__ - train_samples_per_second = 0.84\n01/23/2022 17:06:25 - INFO - __main__ - *** Evaluate ***\n01/23/2022 17:07:28 - INFO - root - Generating sentence embeddings\n01/23/2022 17:07:56 - INFO - root - Generated sentence embeddings\n01/23/2022 17:07:56 - INFO - root - Training pytorch-MLP-nhid0-rmsprop-bs128 with (inner) 5-fold cross-validation\n01/23/2022 17:08:05 - INFO - root - Best param found at split 1: l2reg = 0.001 with score 82.94\n01/23/2022 17:08:16 - INFO - root - Best param found at split 2: l2reg = 0.001 with score 82.8\n01/23/2022 17:08:25 - INFO - root - Best param found at split 3: l2reg = 0.001 with score 83.08\n01/23/2022 17:08:35 - INFO - root - Best param found at split 4: l2reg = 0.001 with score 82.74\n01/23/2022 17:08:45 - INFO - root - Best param found at split 5: l2reg = 0.001 with score 82.67\n01/23/2022 17:08:47 - INFO - root - Generating sentence embeddings\n01/23/2022 17:08:54 - INFO - root - Generated sentence embeddings\n01/23/2022 17:08:54 - INFO - root - Training pytorch-MLP-nhid0-rmsprop-bs128 with (inner) 5-fold cross-validation\n01/23/2022 17:08:57 - INFO - root - Best param found at split 1: l2reg = 0.01 with score 89.21\n01/23/2022 17:09:00 - INFO - root - Best param found at split 2: l2reg = 0.01 with score 89.5\n01/23/2022 17:09:03 - INFO - root - Best param found at split 3: l2reg = 0.01 with score 88.97\n01/23/2022 17:09:07 - INFO - root - Best param found at split 4: l2reg = 0.01 with score 89.04\n01/23/2022 17:09:10 - INFO - root - Best param found at split 5: l2reg = 0.01 with score 89.3\n01/23/2022 17:09:11 - INFO - root - Generating sentence embeddings\n01/23/2022 17:09:40 - INFO - root - Generated sentence embeddings\n01/23/2022 17:09:40 - INFO - root - Training pytorch-MLP-nhid0-rmsprop-bs128 with (inner) 5-fold cross-validation\n01/23/2022 17:09:49 - INFO - root - Best param found at split 1: l2reg = 0.001 with score 94.95\n01/23/2022 17:09:59 - INFO - root - Best param found at split 2: l2reg = 1e-05 with score 95.09\n01/23/2022 17:10:08 - INFO - root - Best param found at split 3: l2reg = 0.001 with score 94.95\n01/23/2022 17:10:19 - INFO - root - Best param found at split 4: l2reg = 0.0001 with score 95.24\n01/23/2022 17:10:28 - INFO - root - Best param found at split 5: l2reg = 0.001 with score 94.85\n01/23/2022 17:10:29 - INFO - root - Generating sentence embeddings\n01/23/2022 17:10:35 - INFO - root - Generated sentence embeddings\n01/23/2022 17:10:35 - INFO - root - Training pytorch-MLP-nhid0-rmsprop-bs128 with (inner) 5-fold cross-validation\n01/23/2022 17:10:44 - INFO - root - Best param found at split 1: l2reg = 0.01 with score 89.43\n01/23/2022 17:10:54 - INFO - root - Best param found at split 2: l2reg = 0.001 with score 89.39\n01/23/2022 17:11:04 - INFO - root - Best param found at split 3: l2reg = 1e-05 with score 89.89\n01/23/2022 17:11:14 - INFO - root - Best param found at split 4: l2reg = 0.001 with score 89.42\n01/23/2022 17:11:25 - INFO - root - Best param found at split 5: l2reg = 0.001 with score 89.3\n01/23/2022 17:11:26 - INFO - root - Computing embedding for train\n01/23/2022 17:12:57 - INFO - root - Computed train embeddings\n01/23/2022 17:12:57 - INFO - root - Computing embedding for dev\n01/23/2022 17:12:59 - INFO - root - Computed dev embeddings\n01/23/2022 17:12:59 - INFO - root - Computing embedding for test\n01/23/2022 17:13:03 - INFO - root - Computed test embeddings\n01/23/2022 17:13:03 - INFO - root - Training pytorch-MLP-nhid0-rmsprop-bs128 with standard validation..\n01/23/2022 17:13:24 - INFO - root - [('reg:1e-05', 87.84), ('reg:0.0001', 87.96), ('reg:0.001', 87.96), ('reg:0.01', 86.93)]\n01/23/2022 17:13:24 - INFO - root - Validation : best param found is reg = 0.0001 with score 87.96\n01/23/2022 17:13:24 - INFO - root - Evaluating...\n01/23/2022 17:13:29 - INFO - root - ***** Transfer task : TREC *****\n\n\n01/23/2022 17:13:37 - INFO - root - Computed train embeddings\n01/23/2022 17:13:37 - INFO - root - Computed test embeddings\n01/23/2022 17:13:37 - INFO - root - Training pytorch-MLP-nhid0-rmsprop-bs128 with 5-fold cross-validation\n01/23/2022 17:13:43 - INFO - root - [('reg:1e-05', 81.91), ('reg:0.0001', 81.77), ('reg:0.001', 80.96), ('reg:0.01', 75.86)]\n01/23/2022 17:13:43 - INFO - root - Cross-validation : best param found is reg = 1e-05 with score 81.91\n01/23/2022 17:13:43 - INFO - root - Evaluating...\n01/23/2022 17:13:44 - INFO - root - ***** Transfer task : MRPC *****\n\n\n01/23/2022 17:13:45 - INFO - root - Computing embedding for train\n01/23/2022 17:14:06 - INFO - root - Computed train embeddings\n01/23/2022 17:14:06 - INFO - root - Computing embedding for test\n01/23/2022 17:14:15 - INFO - root - Computed test embeddings\n01/23/2022 17:14:15 - INFO - root - Training pytorch-MLP-nhid0-rmsprop-bs128 with 5-fold cross-validation\n01/23/2022 17:14:21 - INFO - root - [('reg:1e-05', 76.57), ('reg:0.0001', 76.72), ('reg:0.001', 76.89), ('reg:0.01', 75.34)]\n01/23/2022 17:14:21 - INFO - root - Cross-validation : best param found is reg = 0.001 with score 76.89\n01/23/2022 17:14:21 - INFO - root - Evaluating...\n01/23/2022 17:14:21 - INFO - __main__ - ***** Eval results *****\n01/23/2022 17:14:21 - INFO - __main__ - epoch = 3.0\n01/23/2022 17:14:21 - INFO - __main__ - eval_CR = 89.2\n01/23/2022 17:14:21 - INFO - __main__ - eval_MPQA = 89.49\n01/23/2022 17:14:21 - INFO - __main__ - eval_MR = 82.85\n01/23/2022 17:14:21 - INFO - __main__ - eval_MRPC = 76.89\n01/23/2022 17:14:21 - INFO - __main__ - eval_SST2 = 87.96\n01/23/2022 17:14:21 - INFO - __main__ - eval_SUBJ = 95.02\n01/23/2022 17:14:21 - INFO - __main__ - eval_TREC = 81.91\n01/23/2022 17:14:21 - INFO - __main__ - eval_avg_sts = 0.8398589249376973\n01/23/2022 17:14:21 - INFO - __main__ - eval_avg_transfer = 86.18857142857142\n01/23/2022 17:14:21 - INFO - __main__ - eval_sickr_spearman = 0.8176073588929109\n01/23/2022 17:14:21 - INFO - __main__ - eval_stsb_spearman = 0.8621104909824838\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a98ef0cccefc26480b08549a534aa5bc25bd855
| 17,651 |
ipynb
|
Jupyter Notebook
|
Application.ipynb
|
TingHui21/BACT
|
b22f74bca16dc1f5c80b2af86482225cb9aeb133
|
[
"MIT"
] | null | null | null |
Application.ipynb
|
TingHui21/BACT
|
b22f74bca16dc1f5c80b2af86482225cb9aeb133
|
[
"MIT"
] | null | null | null |
Application.ipynb
|
TingHui21/BACT
|
b22f74bca16dc1f5c80b2af86482225cb9aeb133
|
[
"MIT"
] | 1 |
2020-12-10T07:23:34.000Z
|
2020-12-10T07:23:34.000Z
| 39.665169 | 204 | 0.544558 |
[
[
[
"# Application",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"property_type = pd.read_csv(\"final_df.csv\")\n\nproperty_type1 = property_type.iloc[:,1:33]\nfor i in range(len(property_type1)):\n for j in range(2, len(property_type1.columns)):\n if type(property_type1.iloc[i,j]) != str:\n continue\n elif len(property_type1.iloc[i,j]) <= 4:\n property_type1.iloc[i,j] = property_type1.iloc[i,j]\n else:\n property_type1.iloc[i,j] = property_type1.iloc[i,j].split(\",\")[0] + property_type1.iloc[i,j].split(\",\")[1]\n\nproperty_type2 = property_type1.loc[:, [\"Property Type\", \"Mean Price\"]]\nproperty_type2 = property_type2.groupby([\"Property Type\"]).mean()\n\n",
"_____no_output_____"
],
[
"plt.figure(figsize = (7,4))\n\nplt.bar(property_type2.index, property_type2[\"Mean Price\"], color=('red','yellow','orange','blue','green','purple','black','grey'))\n\nplt.title(\"Mean Price of Different Property Types\")\n\nplt.xlabel(\"Property Type\")\nplt.xticks(rotation=90)\n\nplt.ylabel(\"Mean Price\")\n\nplt.show()",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd \nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport webbrowser\nfrom threading import Timer\nimport dash_table\nimport dash_table.FormatTemplate as FormatTemplate\nimport plotly.express as px\n\n#Import datasets \ndf_details = pd.read_csv('dfclean_1adult.csv')\ndf_details = df_details.rename(columns = {'Unnamed: 0':'Name',\n 'reviews': 'no. of reviews'})\n\ndf_dates = pd.read_csv('final_df.csv').drop('Unnamed: 0', 1)\n\n# Merge datasets\ndf = df_details.merge(df_dates, on='Name')\ndf = df.replace(to_replace = ['Y','N'],value = [1,0])\n\ndf.iloc[:,7:37] = df.iloc[:,7:37].apply(lambda x: x.astype(str))\ndf.iloc[:,7:37] = df.iloc[:,7:37].apply(lambda x: x.str.replace(',', '').astype(float), axis=1)\n\nuser_df = df.copy()\ndate_cols = user_df.columns[7:37]\nhotel_types = user_df['Property Type'].unique()\nfeatures = ['Price'] + list(user_df.columns[2:5]) + list(user_df.columns[37:])\ncontinuous_features = features[:9]\ncontinuous_features_A = ['Price', 'Distance to Mall', 'Distance to MRT']\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\napp.title = 'Hotel Booking'\n\ndef generate_table(dataframe, max_rows=5):\n df_drop_link = dataframe.drop(columns='link')\n \n return html.Table([\n html.Thead(\n html.Tr([html.Th(col) for col in df_drop_link.columns]) \n ),\n html.Tbody([\n html.Tr([\n html.Td(dataframe.iloc[i][col]) if col != 'Name' else html.Td(html.A(href=dataframe.iloc[i]['link'], children=dataframe.iloc[i][col], target='_blank')) for col in df_drop_link.columns\n ]) for i in range(min(len(dataframe), max_rows))\n ])\n ])\n\ncolors = {'background': '#111111', 'text': '#7FDBFF'}\n\napp.layout = html.Div([\n \n #introduction\n html.Div([\n html.H2(children='Hello!',\n style={'color': colors['text']}),\n \n #inputs for date and hotel type \n html.Div([html.H4(\"Step 1: Input Date (eg. 4Nov): \"),\n dcc.Input(id='date-input', value='4Nov', type='text')],\n style={'width':'30%', 'float':'left'}),\n \n html.Div(id='date-output-hotel'),\n \n html.Div([ \n html.H4('Step 2: Select Your Preferred Hotel Types:'),\n dcc.Dropdown(id='hotel-input',\n options=[{'label': i, 'value': i} for i in hotel_types],\n value= hotel_types,\n multi=True)],\n style={'width':'70%', 'float':'right'}),\n html.Br(), html.Br()\n ]),\n \n #return available hotels for given date\n html.Div([\n html.Br(), html.Br(), html.Hr(),\n dcc.Graph(id='output-submit'),\n html.Hr(),\n ]),\n \n #input top 3 features\n html.Div([\n html.H4(children='Step 3: Select Your Top 3 Features:'),\n ]),\n \n html.Div([\n dcc.Dropdown(\n id='feature1',\n options=[{'label': i, 'value': i} for i in features],\n value= features[0]\n ), html.Br(), \n dcc.Slider(id='weight1',\n min= 10, max= 90, step= 10,\n marks={i: '{}%'.format(i) for i in np.arange(10, 90, 10).tolist()},\n value=50)\n ], style={\"display\": \"grid\", \"grid-template-columns\": \"20% 10% 70%\", \"grid-template-rows\": \"50px\"}\n ),\n \n html.Div([\n dcc.Dropdown(\n id='feature2',\n options=[{'label': i, 'value': i} for i in features],\n value= features[1]\n ), html.Br(),\n dcc.Slider(id='weight2',\n min= 10, max= 90, step= 10,\n marks={i: '{}%'.format(i) for i in np.arange(10, 90, 10).tolist()},\n value=30)\n ], style={\"display\": \"grid\", \"grid-template-columns\": \"20% 10% 70%\", \"grid-template-rows\": \"50px\"}\n ),\n \n html.Div([\n dcc.Dropdown(\n id='feature3',\n options=[{'label': i, 'value': i} for i in features],\n value= features[2]\n ), html.Br(),\n dcc.Slider(id='weight3',\n min= 10, max= 90, step= 10,\n marks={i: '{}%'.format(i) for i in np.arange(10, 90, 10).tolist()},\n value=20)\n ], style={\"display\": \"grid\", \"grid-template-columns\": \"20% 10% 70%\", \"grid-template-rows\": \"50px\"}\n ),\n \n #return top 5 hotels recommended\n html.Div([ \n html.Hr(),\n html.H2(children='Top 5 Hotels Recommended For You',\n style={'color': colors['text']}),\n html.Div(id='output-feature'),\n html.Hr()\n ])\n])\n\n#update available hotels for given date\[email protected](Output('output-submit', 'figure'),\n [Input('hotel-input', 'value'), Input('date-input', 'value')])\ndef update_hotels(hotel_input, date_input):\n user_df = df.copy()\n user_df = user_df[user_df[date_input].notnull()]\n user_df = user_df[user_df['Property Type'].isin(hotel_input)]\n plot_df = pd.DataFrame(user_df.groupby('Property Type')['Name'].count()).reset_index()\n fig = px.bar(plot_df, x='Property Type', y='Name', color=\"Property Type\", title=\"Hotel Types available on {}:\".format(date_input))\n fig.update_layout(transition_duration=500)\n return fig\n\n#update top 5 hotels recommended\[email protected](Output('output-feature', 'children'),\n [Input('hotel-input', 'value'), Input('date-input', 'value'), \n Input('feature1', 'value'), Input('feature2', 'value'), Input('feature3', 'value'),\n Input('weight1', 'value'), Input('weight2', 'value'), Input('weight3', 'value')])\ndef update_features(hotel_input, date_input, feature1, feature2, feature3, weight1, weight2, weight3):\n user_df = df.copy()\n user_df = user_df[user_df[date_input].notnull()]\n user_df['Price'] = user_df[date_input]\n user_df = user_df[user_df['Property Type'].isin(hotel_input)]\n features= [feature1, feature2, feature3]\n selected_features = features.copy()\n selected_continuous = set(selected_features) & set(continuous_features)\n\n for i in selected_continuous:\n col = i + str(' rank')\n\n if i in continuous_features_A:\n user_df[col] = user_df[i].rank(ascending=False) #higher value, lower score\n else:\n user_df[col] = user_df[i].rank(ascending=True) #higher value, higher score\n\n selected_features[selected_features.index(i)] = col #replace element in list name with new col name\n\n #Scoring: weight * feature's score\n user_df['Score'] = (((weight1/100) * user_df[selected_features[0]]) \n + ((weight2/100) * user_df[selected_features[1]]) \n + ((weight3/100) * user_df[selected_features[2]])).round(1)\n \n #Score-to-Price ratio\n user_df['Value_to_Price ratio'] = (user_df['Score'] / user_df['Price']).round(1)\n user_df = user_df.sort_values(by=['Value_to_Price ratio'], ascending = False).reset_index()\n features_result = [i for i in features if i != 'Price']\n selected_features_result = [i for i in selected_features if i not in features_result]\n user_df_results = user_df[['Name', 'Property Type', 'Price', 'Score', 'Value_to_Price ratio'] + ['link'] + features_result + selected_features_result] \n\n return generate_table(user_df_results.head(5))\n\nport = 8050\nurl = \"http://127.0.0.1:{}\".format(port)\ndef open_browser():\n webbrowser.open_new(url)\n\nif __name__ == '__main__':\n Timer(0.5, open_browser).start();\n app.run_server( debug= False, port=port)",
"_____no_output_____"
]
],
[
[
"# Price Prediciton",
"_____no_output_____"
]
],
[
[
"import glob\nimport pandas as pd\nimport numpy as np\nimport statsmodels.formula.api as smf\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n%matplotlib inline\n\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn import datasets, linear_model\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import LogisticRegression\nimport random\nimport xgboost as xgb\n\n\ndfs = glob.glob(\"*Novhotels.csv\")\n\n# for df in dfs:\ntrain_features = pd.read_csv(\"10Novhotels.csv\")\n\n#Preliminary data cleaning\ncol_names = train_features.columns\nlist1 = []\nfor i in col_names:\n prop_na = sum(train_features.loc[:,i].isnull())/train_features.loc[:,\"Laundry Service\"].count()\n if prop_na >= .9:\n list1.append(i)\n\ntitle = ['Price', 'Property Type', 'Number of Stars', 'Review Score',\n 'Cleanliness', 'Distance to Mall', 'Distance to MRT',\n 'Early Check-in (Before 3pm)', 'Late Check-out (After 12pm)',\n 'Pay Later', 'Free Cancellation', 'Gym', 'Swimming Pool', 'Car Park',\n 'Airport Transfer', 'Breakfast', 'Hygiene+ (Covid-19)',\n '24h Front Desk', 'Laundry Service', 'Bathtub', 'Balcony', 'Kitchen',\n 'TV', 'Internet', 'Air Conditioning', 'Ironing', 'Non-Smoking']\n\ntrain_features = train_features.drop(columns = list1)\ntrain_features = train_features.drop(['Unnamed: 0', 'Name'], axis = 1) \n#train_features.rename(columns={'*Nov': 'Price'}, inplace=True)\ntrain_features.columns = title\n\npd.options.display.max_columns = None\npd.options.display.max_rows = None\n# display(train_features.head())\n\ntrain_features = train_features.replace(['Y', 'N'], [1, 0])\ntrain_features = train_features[train_features[\"Price\"].notna()]\n\ntrain_features[\"Price\"] = train_features[\"Price\"].astype(str).str.replace(',','')\n# train_features[\"Price\"] = train_features[\"Price\"].str.replace(',','')\ntrain_features[\"Price\"] = pd.to_numeric(train_features[\"Price\"])\n\n#Change stars to categorical\ntrain_features[\"Number of Stars\"] = train_features[\"Number of Stars\"].astype(str)\n\n\n#One hot encoding\ntrain_features = pd.get_dummies(train_features)\n\n#Check for missing data\n# check = train_features.isnull().sum()\n\nmean_val_distmall = round(train_features['Distance to Mall'].mean(),0)\ntrain_features['Distance to Mall']=train_features['Distance to Mall'].fillna(mean_val_distmall)\nmean_val_distmrt = round(train_features['Distance to MRT'].mean(),0)\ntrain_features['Distance to MRT']=train_features['Distance to MRT'].fillna(mean_val_distmrt)\nmean_val_price = round(train_features['Price'].mean(),0)\ntrain_features['Price']=train_features['Price'].fillna(mean_val_price)\n\n# print(train_features.isnull().sum())\n\n# Create correlation matrix\ncorr_matrix = train_features.corr().abs()\n\n# Select upper triangle of correlation matrix\nupper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))\n\n# Find features with correlation greater than 0.95\nto_drop = [column for column in upper.columns if any(upper[column] > 0.95)]\n\n# Drop features \ntrain_features.drop(to_drop, axis=1, inplace=True)\n\nlabels = []\n\nfor i in train_features.columns:\n labels.append(i)\nlabels.remove('Price')\n\ntraining_features = labels \ntarget = 'Price'\n\nrandom.seed(5)\n#Perform train-test split\n#creating 90% training data and 10% test data\nX_train, X_test, Y_train, Y_test = train_test_split(train_features[training_features], train_features[target], train_size = 0.9)\n\ncolsample = np.arange(0.0, 1.1, 0.1)\nlearningrate = np.arange(0.0, 1.1, 0.1)\nmaxdepth = list(range(1, 1000))\nalpha_val = list(range(1, 1000))\nn_estimators_val = list(range(1, 1000))\n\n# for a in range(len(maxdepth)):\nxg_reg = xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,\n max_depth = 5, alpha = 1, n_estimators = 20)\n\nxg_reg.fit(X_train,Y_train)\n\npredicted = xg_reg.predict(X_test)\n# print(n_estimators_val[a])\n#the mean squared error\nprint('Mean squared error: %.2f' % mean_squared_error(Y_test, predicted))\n\n#explained variance score: 1 is perfect prediction\nprint('R square score: %.2f' % r2_score(Y_test,predicted))\n\n\ndf = pd.read_csv(\"prices_1adult.csv\")\ndf = df.replace(to_replace =\"[]\", value =np.nan) \ndf = pd.melt(df, id_vars='Unnamed: 0')\ndf.columns = [\"Name\",\"Date\",\"Price\"]\ndf.head()\n\ndf_second = pd.read_csv(\"Predicted_Price.csv\")\ndf_second.head()\ndf_second = df_second.drop_duplicates()\n\ndf_merge_col = pd.merge(df, df_second, on=['Name','Date'])\n# df_merge_col.to_csv(\"Predicted_Price.csv\")\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a98ef33c6d4a3ea502f26f2356e7581c21c5e24
| 3,875 |
ipynb
|
Jupyter Notebook
|
P7-Infeasibility in Simplex Method.ipynb
|
BhavanaVanjani/Msc-CS-Part-2-Sem-3--Operation-Research
|
a3f0008009f515a22d19049deadd4153a32adda4
|
[
"MIT"
] | null | null | null |
P7-Infeasibility in Simplex Method.ipynb
|
BhavanaVanjani/Msc-CS-Part-2-Sem-3--Operation-Research
|
a3f0008009f515a22d19049deadd4153a32adda4
|
[
"MIT"
] | null | null | null |
P7-Infeasibility in Simplex Method.ipynb
|
BhavanaVanjani/Msc-CS-Part-2-Sem-3--Operation-Research
|
a3f0008009f515a22d19049deadd4153a32adda4
|
[
"MIT"
] | null | null | null | 3,875 | 3,875 | 0.705032 |
[
[
[
"Max z= 200x - 300y\nsubject to\n2x+3y>=1200\nx+y<=400\n2x+3/2y>=900\nx,y>=0",
"_____no_output_____"
]
],
[
[
"from scipy.optimize import linprog\nobj = [-200, 300]",
"_____no_output_____"
],
[
"lhs_ineq = [[ -2, -3], # Red constraint left side\n... [1, 1], # Blue constraint left side\n... [ -2, -1.5]] # Yellow constraint left side",
"_____no_output_____"
],
[
"rhs_ineq = [-1200, # Red constraint right side\n... 400, # Blue constraint right side\n... -900] # Yellow constraint right side",
"_____no_output_____"
],
[
"bnd = [(0, float(\"inf\")), # Bounds of x\n... (0, float(\"inf\"))] # Bounds of y",
"_____no_output_____"
],
[
"opt = linprog(c=obj, A_ub=lhs_ineq, b_ub=rhs_ineq,\n... bounds=bnd,\n... method=\"revised simplex\")",
"_____no_output_____"
],
[
"opt",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9907e965f4d856cc7a959fc69b4b0628d7da07
| 563,584 |
ipynb
|
Jupyter Notebook
|
AirBnB Boston_ND.ipynb
|
MahbubulWasek/Project-1
|
cde686fcf2717890ea9a62772c601550e725fb57
|
[
"CNRI-Python"
] | null | null | null |
AirBnB Boston_ND.ipynb
|
MahbubulWasek/Project-1
|
cde686fcf2717890ea9a62772c601550e725fb57
|
[
"CNRI-Python"
] | null | null | null |
AirBnB Boston_ND.ipynb
|
MahbubulWasek/Project-1
|
cde686fcf2717890ea9a62772c601550e725fb57
|
[
"CNRI-Python"
] | null | null | null | 211.159236 | 100,664 | 0.867402 |
[
[
[
"## DATA SCIENCE NANO DEGREE\n\n### PROJECT 1: Boston AirBnB\n##### MAHBUBUL WASEK",
"_____no_output_____"
],
[
"#### Introduction\n\nThis is part of the Udacity Data Science Nanodegree (Project 1). In this project we are supposed to analysis data using the CRISP-DM process. The CRISP-DM process:\n\n1) Business Understanding \n\n2) Data Understanding\n\n3) Prepare Data \n\n4) Data Modeling \n\n5) Evaluate the Results\n\n6) Deploy",
"_____no_output_____"
],
[
"#### Business Understanding\n\nAirBnB is an online rental marketplace, which created a community for a landlords and their tenants. Landlords are able to attract temporary tenants through this online platform. \n\nThe main source of income for AirBnB is from the service fees charged to both guests and hosts. The peer-to-peer business model used by AirBnb has the potential for continued revenue growhth. \n\nIn this project, I wanted to explore the data to answer the following questions:\n\n##### 1) What has been the overall price trend in the given period of time? Is there any price trend in a week? If so, which days are more profitable?\n\n##### 2) What is the relationship between price and various attributes?\n\n##### 3) What are some of the common features that customers take into consideration for a good experience?",
"_____no_output_____"
],
[
"#### Data Understanding\n\nWe have 3 datasets for this project:\n\n i) Listings : Provides us with a number of columns containing detailed information about the rooms rented.\n \n ii) Reviews : Contains the reviews for the rooms along with unique id for customers.\n \n iii) Calendar : This provides us with the dates the rooms are available along with the price.\n\n\n\nI will explore and try to visualize the data by presenting the results in the form of dashboards to answer the above questions.",
"_____no_output_____"
]
],
[
[
"#Loading the libraries\n\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport os\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score, mean_squared_error\n%matplotlib inline \nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"#Reading the datasets:\nbost_listing = pd.read_csv('listings.csv')\nbost_review = pd.read_csv('reviews.csv')\nbost_calendar = pd.read_csv('calendar.csv')",
"_____no_output_____"
],
[
"num_rows_l = bost_listing.shape[0]\nnum_cols_l = bost_listing.shape[1]\n\nnum_rows_r = bost_review.shape[0]\nnum_cols_r = bost_review.shape[1]\n\nnum_rows_c = bost_calendar.shape[0]\nnum_cols_c = bost_calendar.shape[1]\n\nprint(num_rows_l, num_cols_l, num_rows_r, num_cols_r, num_rows_c, num_cols_c)",
"3585 95 68275 6 1308890 4\n"
],
[
"bost_review.head()",
"_____no_output_____"
],
[
"bost_review.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 68275 entries, 0 to 68274\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 listing_id 68275 non-null int64 \n 1 id 68275 non-null int64 \n 2 date 68275 non-null object\n 3 reviewer_id 68275 non-null int64 \n 4 reviewer_name 68275 non-null object\n 5 comments 68222 non-null object\ndtypes: int64(3), object(3)\nmemory usage: 3.1+ MB\n"
],
[
"bost_calendar.head()",
"_____no_output_____"
],
[
"bost_calendar.tail()",
"_____no_output_____"
],
[
"bost_calendar.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1308890 entries, 0 to 1308889\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 listing_id 1308890 non-null int64 \n 1 date 1308890 non-null object\n 2 available 1308890 non-null object\n 3 price 643037 non-null object\ndtypes: int64(1), object(3)\nmemory usage: 39.9+ MB\n"
],
[
"# Check if price is NaN when available value is f:\n\ncalendar_q1 = bost_calendar.groupby('available')['price'].count().reset_index()\ncalendar_q1.columns = ['available', 'price_count']\ncalendar_q1",
"_____no_output_____"
],
[
"# How many rows per each listing:\n\ncalendar_q2 = bost_calendar.groupby('listing_id')['date'].count().reset_index()\ncalendar_q2['date'].value_counts()",
"_____no_output_____"
],
[
"pd.options.display.max_columns = 95",
"_____no_output_____"
],
[
"bost_listing.head()",
"_____no_output_____"
],
[
"#Getting the columns in Listing dataset:\nbost_listing.columns",
"_____no_output_____"
],
[
"#Converting amount columns from string to numbers:\n\nbost_listing['price'] = bost_listing['price'].apply(str).str.replace(\"[$, ]\", \"\").astype(\"float\")\nbost_listing['weekly_price'] = bost_listing['weekly_price'].apply(str).str.replace(\"[$, ]\", \"\").astype(\"float\")\nbost_listing['monthly_price'] = bost_listing['monthly_price'].apply(str).str.replace(\"[$, ]\", \"\").astype(\"float\")\nbost_listing['security_deposit'] = bost_listing['security_deposit'].apply(str).str.replace(\"[$, ]\", \"\").astype(\"float\")\nbost_listing['cleaning_fee'] = bost_listing['cleaning_fee'].apply(str).str.replace(\"[$, ]\", \"\").astype(\"float\")\nbost_listing['extra_people'] = bost_listing['extra_people'].apply(str).str.replace(\"[$, ]\", \"\").astype(\"float\") \n",
"_____no_output_____"
],
[
"print(bost_listing['price'])",
"0 250.0\n1 65.0\n2 65.0\n3 75.0\n4 79.0\n ... \n3580 69.0\n3581 150.0\n3582 198.0\n3583 65.0\n3584 65.0\nName: price, Length: 3585, dtype: float64\n"
],
[
"#Dropping columns with all Null values:\nbost_listing = bost_listing.dropna(how='all', axis=1)",
"_____no_output_____"
],
[
"#Dropping columns with 'url' in the name:\nfor col in bost_listing.columns: \n if 'url' in col: \n del bost_listing[col] ",
"_____no_output_____"
],
[
"#Dropping columns that I will not need:\nuseless_columns = ['scrape_id', 'last_scraped', 'experiences_offered', 'neighborhood_overview', 'notes', 'transit', 'access', 'interaction', 'house_rules', 'host_name', 'host_since', 'host_location', 'host_about', 'host_response_time', 'host_response_rate', \n 'host_acceptance_rate', 'host_neighbourhood', 'host_listings_count', 'host_total_listings_count', 'host_verifications', 'host_has_profile_pic', 'host_identity_verified', 'street', 'neighbourhood', 'neighbourhood_cleansed', \n 'city', 'state', 'zipcode', 'market', 'smart_location', 'country_code', 'country', 'latitude', 'longitude', 'is_location_exact', 'calendar_updated', 'calendar_last_scraped', 'review_scores_rating',\n 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value', 'requires_license', 'require_guest_profile_picture',\n 'require_guest_phone_verification', 'calculated_host_listings_count']\n\nbost_listing.drop(useless_columns, axis=1, inplace=True)",
"_____no_output_____"
],
[
"bost_listing.head()",
"_____no_output_____"
],
[
"bost_listing.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3585 entries, 0 to 3584\nData columns (total 35 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 3585 non-null int64 \n 1 name 3585 non-null object \n 2 summary 3442 non-null object \n 3 space 2528 non-null object \n 4 description 3585 non-null object \n 5 host_id 3585 non-null int64 \n 6 host_is_superhost 3585 non-null object \n 7 property_type 3582 non-null object \n 8 room_type 3585 non-null object \n 9 accommodates 3585 non-null int64 \n 10 bathrooms 3571 non-null float64\n 11 bedrooms 3575 non-null float64\n 12 beds 3576 non-null float64\n 13 bed_type 3585 non-null object \n 14 amenities 3585 non-null object \n 15 square_feet 56 non-null float64\n 16 price 3585 non-null float64\n 17 weekly_price 892 non-null float64\n 18 monthly_price 888 non-null float64\n 19 security_deposit 1342 non-null float64\n 20 cleaning_fee 2478 non-null float64\n 21 guests_included 3585 non-null int64 \n 22 extra_people 3585 non-null float64\n 23 minimum_nights 3585 non-null int64 \n 24 maximum_nights 3585 non-null int64 \n 25 availability_30 3585 non-null int64 \n 26 availability_60 3585 non-null int64 \n 27 availability_90 3585 non-null int64 \n 28 availability_365 3585 non-null int64 \n 29 number_of_reviews 3585 non-null int64 \n 30 first_review 2829 non-null object \n 31 last_review 2829 non-null object \n 32 instant_bookable 3585 non-null object \n 33 cancellation_policy 3585 non-null object \n 34 reviews_per_month 2829 non-null float64\ndtypes: float64(11), int64(11), object(13)\nmemory usage: 980.4+ KB\n"
],
[
"len(bost_listing.columns)",
"_____no_output_____"
]
],
[
[
"-----",
"_____no_output_____"
],
[
"#### ANSWERING THE QUESTIONS: ",
"_____no_output_____"
]
],
[
[
"# Plot 1: Overall Price Trend:\n\nsns.set_style(\"dark\",{\"axes.facecolor\":\"black\"})\n\ncalendar_q1 = bost_calendar.copy(deep=True) \ncalendar_q1.dropna(inplace=True)\ncalendar_q1['date'] = pd.to_datetime(calendar_q1['date'])\ncalendar_q1['price'] = calendar_q1['price'].map(lambda x: float(x[1:].replace(\",\", \"\")))\n\n#Range \nstart_date = '2016-09-05 00:00:00'\nend_date = '2017-09-06 00:00:00'\ncalendar_q1 = calendar_q1[(calendar_q1['date'] > start_date) & (calendar_q1['date'] < end_date)]\n\ncalendar_q1 = calendar_q1.groupby('date')['price'].mean().reset_index()\n\nplt.figure(figsize=(10,6))\nplt.plot(calendar_q1.date, calendar_q1.price, color = 'r', marker='D', ls='--', linewidth=1.5)\nplt.title(\"Overall Price Trend\", fontsize= 30, color= \"DarkBlue\")\nplt.xlabel('Date', fontsize=25)\nplt.ylabel('Price', fontsize=25)\nplt.show()",
"_____no_output_____"
],
[
"# Plot 2: Weekly Price Trend\n\nsns.set_style(\"dark\",{\"axes.facecolor\":\"black\"})\ncalendar_q1[\"weekday\"] = calendar_q1[\"date\"].dt.day_name()\n\nplt.rcParams['figure.figsize'] = 10,6\nsns.boxplot(x= 'weekday', y= 'price', data = calendar_q1, palette= \"vlag\", width=0.4)\nplt.title(\"Weekly Price Trend\", fontsize= 30, color= \"DarkBlue\")\nplt.xlabel('Day', fontsize= 25)\nplt.ylabel('Price', fontsize= 25)\nplt.show()",
"_____no_output_____"
]
],
[
[
"The above 2 graphs provide us the price of AirBnB homes in Boston over a period of time. Plot 1 is a line graph which shows the price of AirBnB homes in Bostom over the time period of September 2016 to September 2017. It can be seen that the price of AirBnB homes has overall decreased in Boston from September 2016 to 2017. Starting at 240 in September 2016 and coming to below 200 a year later. The highest it reached was in 2016 at slightly above 280. Although, there was a substantial decrease from October 2016 to February 2017, the price experienced a slow increasing trend till September 2017. There was also a probable seasonal peak for few weeks in the price at the end of April 2017.\n\nPlot 2 shows the weekly price trend for AirBnB homes in Boston. Plot 2 is a follow up to the overall trend of price for AirBnB homes in Boston. Plot 2 gives us a clear picture of how the price of AirBnB homes vary depending on the day of the week. The highest price for homes can be seen to be on Fridays and Saturdays at slightly above 200. The lowest price for homes are on Mondays with the mean price of around 190.",
"_____no_output_____"
]
],
[
[
"# Plot 3: Price Of Different Room Types In The Different Types Of Property:\n\nsns.set_style(\"dark\",{\"axes.facecolor\":\"black\"})\nplt.rcParams['figure.figsize'] = 10,6\nax = sns.swarmplot(data=bost_listing, x=\"property_type\", y=\"price\", hue=\"room_type\")\nplt.xticks (rotation='vertical')\nplt.ylim(0,1100)\nplt.title(\"Price of Different Room Types in Different Properties\", fontsize= 30, color= \"DarkBlue\")\nplt.legend(framealpha=1, frameon=True, facecolor='white', edgecolor='white')\nplt.xlabel('Property Type', fontsize= 25)\nplt.ylabel('Price', fontsize= 25)\n",
"_____no_output_____"
]
],
[
[
"Plot 3 shows the price of different types of room for different AirBnB properties in Boston. There are mainly 3 types of room for the different AirBnB homes in Boston. The blue dots represent Entire Home or Apartment and seems to be the most available option in Boston followed by the orange dot representing Private Rooms. The least available AirBnB room type in Boston is Shared Room. \n\nBoston offers these 3 types of room to be rented in 13 different types of properties. The top 3 types of properties where rooms are rented in Boston are House, Apartment and Condominium. The other types of property in Boston includes Loft, Bed & Breakfast, Townhouse, Boat, Villa, Entire Floor, Dorm, Guesthouse, Camper/RV and Others. Another valueable information which can be deduced from Plot 3, is the difference in price among the 3 types of room in these different properties in Boston. ",
"_____no_output_____"
]
],
[
[
"# Plot 5: Price of Different Rooms:\n\nsns.set_style(\"dark\",{\"axes.facecolor\":\"black\"})\nw = sns.boxplot(data=bost_listing, x='room_type', y='price', palette='summer', width= 0.5)\nplt.rcParams['figure.figsize'] = 10,6\nplt.ylim(0,500)\nplt.title(\"Price of Different Rooms\", fontsize= 30, color= \"DarkBlue\")\nplt.xlabel('Room Types', fontsize= 25)\nplt.ylabel('Price', fontsize= 25)",
"_____no_output_____"
]
],
[
[
"Plot 5 shows a boxplot graph plotted to investigate the information gained from Plot 4. Plot 5 confirms the price trend observed for the different types of room in Plot 4. It can be seen that among the 3 types of room, Entire Home/Apt has the highest mean price of around 200 followed by Private Room with price around a little less than 100. The price for Shared Room in Boston AirBnB homes seems to be around 50. ",
"_____no_output_____"
]
],
[
[
"# Plot 4: Price of Different Properties with the Number of Beds\n\nsns.set_style(\"darkgrid\")\nvis2 = sns.lmplot(data= bost_listing, x='price', y='beds',\n fit_reg=False, hue=\"property_type\", size=7, aspect=1)\nplt.ylim(0,10)\nplt.xlim(0,1500)\nplt.title(\"Price of Different Properties With Varying Bed Numbers\", fontsize= 30, color= \"DarkBlue\")\nplt.xlabel('Price', fontsize= 25)\nplt.ylabel('Number of Beds', fontsize= 25)",
"_____no_output_____"
]
],
[
[
"Plot 4 displays the Different Property prices for the number of beds. It can be seen that most of the AirBnB homes rented in Boston had only 1 bed with varying prices mostly between 600. Most of the Townhouse, Entire floor, Loft and Guesthouse offered 2 beds with the price similar to properties with 1 bed. However, most of the AirBnB homes offering 3 beds were Apartment and Condominium with varying price range between 400. A similar price trend can be seen in AirBnB homes offering 4 beds with few boats along with Condominium and Villas among the property types. ",
"_____no_output_____"
]
],
[
[
"# Plot 6: Price Distribution with Extra People: \n\nk3= sns.kdeplot(bost_listing.extra_people, bost_listing.price, shade=True, shade_lowest=True, cmap='inferno')\nk3b= sns.kdeplot(bost_listing.extra_people, bost_listing.price, cmap='cool')\n\nplt.ylim(-10,400)\nplt.xlim(-10,35)\nplt.title(\"Price Distribution with Extra People\", fontsize= 30, color= \"DarkBlue\")\nplt.xlabel('Extra People', fontsize= 25)\nplt.ylabel('Price', fontsize= 25)",
"_____no_output_____"
]
],
[
[
"Plot 6 shows us a Kernel Density Estimation (KDE) graph of Extra People and Price. It tells us how having Extra People effects the price of the AirBnB homes in Boston. We can see that if there are 0 extra people then the lower density of price is stretched from the minimum at 0 to the highest density at around 400. If we move to the right and increase the extra people to 5 we see that the lower density of price is at 25 and the higher density of price at around 275. If wee keep mmoving further to 10 extra people the lower density of price seems to stay similar but the higher density of price decreases to around 150. It is a very interesting observation as the price density narrows down if we keeping increasing the number of extra people to 30.",
"_____no_output_____"
]
],
[
[
"# Plot 7: Extracting Common Words from the 'Comment' Column:\n\n\n## Loading the necessary libraries\n\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS\ncomment_words = ' '\nstopwords = set(STOPWORDS) ",
"_____no_output_____"
],
[
"for val in bost_review.comments: \n\n # typecaste each val to string \n val = str(val) \n\n # split the value \n tokens = val.split() \n\n# Converts each token into lowercase \nfor i in range(len(tokens)): \n tokens[i] = tokens[i].lower() \n\nfor words in tokens: \n comment_words = comment_words + words + ' '\n\n\nwordcloud = WordCloud(width = 800, height = 800, \n background_color ='white', \n stopwords = stopwords, \n min_font_size = 10).generate(comment_words) \n\n# plot the WordCloud image \nplt.figure(figsize = (5, 8), facecolor ='B') \nplt.imshow(wordcloud) \nplt.axis(\"off\") \nplt.tight_layout(pad = 0) \n\nplt.show() ",
"_____no_output_____"
]
],
[
[
"Plot 7 is word cloud which derived the most common words from the 'Comment' column of the reviews from the customers. Plot 7 provides us the criteria which matters to customers when giving reviews. A good observation which can be deduced from the reviews is that customers seem to have a good experience if the AirBnB home is clean and amenities are provided. In order to please the customers, the host can include amenities along with shampoo and towels. ",
"_____no_output_____"
],
[
"----",
"_____no_output_____"
],
[
"#### Linear Regression Model",
"_____no_output_____"
]
],
[
[
"bost_listing.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3585 entries, 0 to 3584\nData columns (total 35 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 3585 non-null int64 \n 1 name 3585 non-null object \n 2 summary 3442 non-null object \n 3 space 2528 non-null object \n 4 description 3585 non-null object \n 5 host_id 3585 non-null int64 \n 6 host_is_superhost 3585 non-null object \n 7 property_type 3582 non-null object \n 8 room_type 3585 non-null object \n 9 accommodates 3585 non-null int64 \n 10 bathrooms 3571 non-null float64\n 11 bedrooms 3575 non-null float64\n 12 beds 3576 non-null float64\n 13 bed_type 3585 non-null object \n 14 amenities 3585 non-null object \n 15 square_feet 56 non-null float64\n 16 price 3585 non-null float64\n 17 weekly_price 892 non-null float64\n 18 monthly_price 888 non-null float64\n 19 security_deposit 1342 non-null float64\n 20 cleaning_fee 2478 non-null float64\n 21 guests_included 3585 non-null int64 \n 22 extra_people 3585 non-null float64\n 23 minimum_nights 3585 non-null int64 \n 24 maximum_nights 3585 non-null int64 \n 25 availability_30 3585 non-null int64 \n 26 availability_60 3585 non-null int64 \n 27 availability_90 3585 non-null int64 \n 28 availability_365 3585 non-null int64 \n 29 number_of_reviews 3585 non-null int64 \n 30 first_review 2829 non-null object \n 31 last_review 2829 non-null object \n 32 instant_bookable 3585 non-null object \n 33 cancellation_policy 3585 non-null object \n 34 reviews_per_month 2829 non-null float64\ndtypes: float64(11), int64(11), object(13)\nmemory usage: 980.4+ KB\n"
],
[
"df = bost_listing.copy()",
"_____no_output_____"
],
[
"useless_columns1 = ['name', 'summary', 'space', 'description', 'host_id', 'host_is_superhost', 'property_type', 'room_type', 'bed_type', 'amenities', 'first_review', 'last_review', 'instant_bookable', 'cancellation_policy']\n\ndf.drop(useless_columns1, axis=1, inplace=True)",
"_____no_output_____"
],
[
"# Fill the mean of the columns for any missing values\n\nfill_mean = lambda col: col.fillna(col.mean())\n\ndf = df.apply(fill_mean, axis=0)",
"_____no_output_____"
],
[
"# Setting X variables\nX = df[['accommodates', 'bathrooms', 'bedrooms', 'beds', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people']]",
"_____no_output_____"
],
[
"# Setting y variable\n\ny = df['price']",
"_____no_output_____"
],
[
"# Creating train and test sets of data\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42)",
"_____no_output_____"
],
[
"# Instantiate a Linear Regression model with normalized data\n\nlm_model = LinearRegression (normalize=True)",
"_____no_output_____"
],
[
"# Fit model to the training data\n\nlm_model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"# Predict the response for the training data and the test data\n\ny_test_preds = lm_model.predict(X_test)\ny_train_preds = lm_model.predict(X_train)",
"_____no_output_____"
],
[
"# Comparing the values of the y_test_preds (Y Prediction) with the y_test values:\n\nplt.scatter(y_test, y_test_preds)",
"_____no_output_____"
],
[
"# Obtain an r-squared value for both the training and test data\n\ntest_score = r2_score(y_test, y_test_preds)\ntrain_score = r2_score(y_train, y_train_preds)\n\nprint(\"Training R2 {}. Test R2 {}.\".format(train_score, test_score))",
"Training R2 0.27363955935152584. Test R2 0.19501791226572707.\n"
]
],
[
[
"The predicted values from the Linear Regression Model created is quite far away from the Actual Values. The model has a lot of scope of being improved",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a991b0fb0ee8824d461147d576171d945561ede
| 117,503 |
ipynb
|
Jupyter Notebook
|
data_science/code/modeling/42_deg_c/ec_incorporation_42_deg_c.ipynb
|
blablabananarama/thesis
|
acb8978cb6dd95f03e3f1e591e0e1c274d369103
|
[
"CC0-1.0"
] | null | null | null |
data_science/code/modeling/42_deg_c/ec_incorporation_42_deg_c.ipynb
|
blablabananarama/thesis
|
acb8978cb6dd95f03e3f1e591e0e1c274d369103
|
[
"CC0-1.0"
] | null | null | null |
data_science/code/modeling/42_deg_c/ec_incorporation_42_deg_c.ipynb
|
blablabananarama/thesis
|
acb8978cb6dd95f03e3f1e591e0e1c274d369103
|
[
"CC0-1.0"
] | 1 |
2021-01-20T11:29:14.000Z
|
2021-01-20T11:29:14.000Z
| 122.144491 | 30,828 | 0.841561 |
[
[
[
"import os\nfrom dotenv import load_dotenv, find_dotenv\nfrom os.path import join, dirname, basename, exists, isdir\n\n### Load environmental variables from the project root directory ###\n# find .env automagically by walking up directories until it's found\ndotenv_path = find_dotenv()\n\n# load up the entries as environment variables\nload_dotenv(dotenv_path)\n\n# now you can get the variables using their names\n\n# Check whether a network drive has been specified\nDATABASE = os.environ.get(\"NETWORK_URL\")\nif DATABASE == 'None':\n pass\nelse:\n pass\n #mount network drive here\n\n# set up directory pathsa\nCURRENT_DIR = os.getcwd()\nPROJ = dirname(dotenv_path) # project root directory\n\nDATA = join(PROJ, 'data') #data directory\nRAW_EXTERNAL = join(DATA, 'raw_external') # external data raw directory\nRAW_INTERNAL = join(DATA, 'raw_internal') # internal data raw directory\nINTERMEDIATE = join(DATA, 'intermediate') # intermediate data directory\nFINAL = join(DATA, 'final') # final data directory\n\nRESULTS = join(PROJ, 'results') # output directory\nFIGURES = join(RESULTS, 'figures') # figure output directory\nPICTURES = join(RESULTS, 'pictures') # picture output directory\n\n\n# make folders specific for certain data\nfolder_name = ''\nif folder_name != '':\n #make folders if they don't exist\n if not exists(join(RAW_EXTERNAL, folder_name)):\n os.makedirs(join(RAW_EXTERNAL, folder_name))\n\n if not exists(join(INTERMEDIATE, folder_name)):\n os.makedirs(join(INTERMEDIATE, folder_name))\n\n if not exists(join(FINAL, folder_name)):\n os.makedirs(join(FINAL, folder_name))\n\n\nprint('Standard variables loaded, you are good to go!')",
"Standard variables loaded, you are good to go!\n"
],
[
"import cobra\nimport os\nimport pandas as pd\nimport cameo\nimport wget\nimport ssl\nfrom scipy.stats import pearsonr\n\n\n#E. coli model:\nssl._create_default_https_context = ssl._create_unverified_context\nwget.download(\"https://raw.githubusercontent.com/BenjaSanchez/notebooks/master/e_coli_simulations/eciML1515.xml\")\neColi_Model = cobra.io.read_sbml_model(\"eciML1515.xml\")\nos.remove(\"eciML1515.xml\")\n\n\n# proteomics data:\nproteomics_dataset = f\"{INTERMEDIATE}/proteomics/proteomics_concentrations.csv\"\nweights_location = f\"{INTERMEDIATE}/proteomics/proteomics_masses.csv\"",
"_____no_output_____"
],
[
"from collections import namedtuple\nfrom cobra.medium.boundary_types import find_external_compartment\nfrom cobra.io.dict import reaction_to_dict\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"from simulations.modeling.driven import (\n adjust_fluxes2model,\n flexibilize_proteomics,\n minimize_distance,\n)",
"_____no_output_____"
],
[
"exchange_reaction = \"42°C glucose\"\nexchange_reaction_lowercase = \"42c\"\n",
"_____no_output_____"
],
[
"def reset_real_proteomics(proteomics_dataset):\n '''loads set of proteomics data from the provided dataset file into dict of lists'''\n data = pd.read_csv(proteomics_dataset, index_col=\"UP\") # yeast\n data_dict = {}\n for i in range(0,data.shape[1], 3):\n uncertainty = data.iloc[:,i:i+3].std(axis=1)\n uncertainty_name = data.columns[i]+ \"_uncertainty\"\n data[uncertainty_name] = uncertainty\n data_dict[data.columns[i]] = [{'identifier':data.index[j], 'measurement':data.iloc[j,i], 'uncertainty':data[uncertainty_name][j] }\\\n for j in range(0, len(data.iloc[:,i]))]\n data_dict[data.columns[i+1]] = [{'identifier':data.index[j], 'measurement':data.iloc[j,i+1], 'uncertainty':data[uncertainty_name][j] }\\\n for j in range(0, len(data.iloc[:,i+1]))]\n data_dict[data.columns[i+2]] = [{'identifier':data.index[j], 'measurement':data.iloc[j,i+2], 'uncertainty':data[uncertainty_name][j] }\\\n for j in range(0, len(data.iloc[:,i+2]))]\n return data_dict\n\n",
"_____no_output_____"
],
[
"proteomics_data = reset_real_proteomics(proteomics_dataset)\n\ngrowth_rates = pd.read_csv(f\"{RAW_INTERNAL}/proteomics/growth_conditions.csv\")\ngrowth_rates = growth_rates.drop(growth_rates.columns.difference(['Growth condition','Growth rate (h-1)', 'Stdev']), 1)\ngrowth_rates = growth_rates.drop([0,1], axis=0)",
"_____no_output_____"
],
[
"\ndef find_exchange_rxn(compound, model):\n exchange_reactions = [i for i in model.reactions if \"EX\" in i.id]\n compound_ex_rxn = [i for i in exchange_reactions if compound in i.name]\n compound_ex_rxn = [i for i in compound_ex_rxn if len(list(i._metabolites.keys())) == 1 \\\n & (list(i._metabolites.values())[0] == 1.0) \\\n & (list(i._metabolites.keys())[0].name == compound + \" [extracellular space]\")]\n return compound_ex_rxn\n\n\n# find Pyruvate\nac_ex = find_exchange_rxn(exchange_reaction, eColi_Model)\nprint(ac_ex)\nmodel = eColi_Model\n\n# minimal medium with pyruvate\n# pyruvate_growth_rate = list(growth_rates['Growth rate (h-1)'].loc[growth_rates['Growth condition'] == \"Acetate\"])[0]\n# model = eColi_Model.copy()\n# medium = model.medium\n# medium.pop(\"EX_glc__D_e_REV\", None)\n\n\n\n# medium[f'{ac_ex[0].id}'] = 10\n# model.medium = medium\n# pyr_model.medium = minimal_medium(pyr_model).to_dict()\nprint(model.optimize())",
"[]\n<Solution 0.877 at 0x123268e10>\n"
],
[
"# Flexibilize proteomics\n\nec_model_1 = model\nec_model_2 = model\nec_model_3 = model\n\n# first \nprint(\"Number of proteins originally: \", len(proteomics_data[exchange_reaction_lowercase]))\ngrowth_rate = {\"measurement\":float(list(growth_rates['Growth rate (h-1)'].loc[growth_rates['Growth condition'] == exchange_reaction])[0]),\\\n \"uncertainty\":float(list(growth_rates['Stdev'].loc[growth_rates['Growth condition'] == exchange_reaction])[0])}\nnew_growth_rate, new_proteomics, warnings = flexibilize_proteomics(ec_model_1, \"BIOMASS_Ec_iML1515_core_75p37M\", growth_rate, proteomics_data[exchange_reaction_lowercase], [])\nprint(\"Number of proteins incorporated: \", len(new_proteomics))\n\n# first \nprint(\"Number of proteins originally: \", len(proteomics_data[exchange_reaction_lowercase + \"2\"]))\ngrowth_rate = {\"measurement\":float(list(growth_rates['Growth rate (h-1)'].loc[growth_rates['Growth condition'] == exchange_reaction])[0]),\\\n \"uncertainty\":float(list(growth_rates['Stdev'].loc[growth_rates['Growth condition'] == exchange_reaction])[0])}\nnew_growth_rate, new_proteomics, warnings = flexibilize_proteomics(ec_model_2, \"BIOMASS_Ec_iML1515_core_75p37M\", growth_rate, proteomics_data[exchange_reaction_lowercase + \"1\"], [])\nprint(\"Number of proteins incorporated: \", len(new_proteomics))\n\n# first \nprint(\"Number of proteins originally: \", len(proteomics_data[exchange_reaction_lowercase + \"2\"]))\ngrowth_rate = {\"measurement\":float(list(growth_rates['Growth rate (h-1)'].loc[growth_rates['Growth condition'] == exchange_reaction])[0]),\\\n \"uncertainty\":float(list(growth_rates['Stdev'].loc[growth_rates['Growth condition'] == exchange_reaction])[0])}\nnew_growth_rate, new_proteomics, warnings = flexibilize_proteomics(ec_model_3, \"BIOMASS_Ec_iML1515_core_75p37M\", growth_rate, proteomics_data[exchange_reaction_lowercase + \"2\"], [])\nprint(\"Number of proteins incorporated: \", len(new_proteomics))\n",
"Number of proteins originally: 2058\nNumber of proteins incorporated: 1988\nNumber of proteins originally: 2058\nNumber of proteins incorporated: 1987\nNumber of proteins originally: 2058\nNumber of proteins incorporated: 1987\n"
]
],
[
[
"# Extraction of the usages \n\n",
"_____no_output_____"
]
],
[
[
"\nweights = pd.read_csv(weights_location, index_col = \"UP\")\n# usages of ac proteins\n\n#solution = pyr_model.optimize()\n\n# pyr model uages\n\n\ndef get_usages(prot_int_model, weights):\n # get the usages of a model integrated with proteomics\n try:\n solution = cobra.flux_analysis.pfba(prot_int_model)\n except:\n print(\"used normal fba\")\n solution = prot_int_model.optimize()\n abs_usages = pd.Series()\n perc_usages = pd.Series()\n mass_usages = 0\n non_mass_proteins = []\n for reaction in prot_int_model.reactions:\n if reaction.id.startswith(\"prot_\"):\n prot_id = reaction.id.replace(\"prot_\",\"\")\n prot_id = prot_id.replace(\"_exchange\",\"\")\n abs_usage = solution.fluxes[reaction.id]\n abs_usages = abs_usages.append(pd.Series({prot_id:abs_usage}))\n perc_usage = solution.fluxes[reaction.id]/reaction.upper_bound\n perc_usages = perc_usages.append(pd.Series({prot_id:perc_usage}))\n try: \n if perc_usage <= 100:\n mass_usages += perc_usage/100 * weights[prot_id]\n except:\n non_mass_proteins.append(prot_id)\n return abs_usages, perc_usages, mass_usages, non_mass_proteins\n\n# \nabs_usages_1, perc_usages_1, mass_usage_1, non_mass_proteins_1 = get_usages(ec_model_1, weights[f\"42°C\"])\nabs_usages_2, perc_usages_2, mass_usage_2, non_mass_proteins_2 = get_usages(ec_model_2, weights[f\"42°C\"])\nabs_usages_3, perc_usages_3, mass_usage_3, non_mass_proteins_3 = get_usages(ec_model_3, weights[f\"42°C\"])",
"_____no_output_____"
],
[
"len(non_mass_proteins_1)\nprint(\"Mass of Proteins total: \", sum(weights[\"acetate\"]))\nprint(\"Mass actually used: \", sum(weights[\"acetate\"])*(mass_usage_1/sum(weights[\"acetate\"])))",
"Mass of Proteins total: 117633655349.57416\nMass actually used: 123101086.04662248\n"
],
[
"abs_usages_df = pd.DataFrame({f\"{exchange_reaction_lowercase}\": perc_usages_1, f\"{exchange_reaction_lowercase}.1\": perc_usages_2, f\"{exchange_reaction_lowercase}.2\": perc_usages_3})\nabs_usages_df.to_csv(f\"{FINAL}/abs_usages_gecko/{exchange_reaction_lowercase}\")",
"_____no_output_____"
]
],
[
[
"# Masses\n\nMasses that are actually used seem very low, at 0,9 %\n\nWhat should I actually do here?\n\nTotal protein mass: 117633655349 Dalton",
"_____no_output_____"
]
],
[
[
"import numpy as np; np.random.seed(42)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = perc_usages_1.to_frame()\ndf[\"perc_usages_2\"] = perc_usages_2\ndf[\"perc_usages_3\"] = perc_usages_3\ndf.columns = [\"Measurement 1\", \"Measurement 2\", \"Measurement 3\"]\n\nsns.boxplot(x=\"variable\", y=\"value\", data=pd.melt(df[(df > 0) & (df < 100)]))\nplt.xlabel('Measurements')\nplt.ylabel('Usage of measurement in %')\nplt.title('% usage of proteins per ec simulation ')\nplt.savefig(f'{FIGURES}/ec_incorporation_perc_usage_box_ac')\nplt.show()\n\n",
"_____no_output_____"
],
[
"#df['pct'] = df['Location'].div(df.groupby('Hour')['Location'].transform('sum'))\n#g = sns.FacetGrid(df, row=\"pct\", hue=\"pct\", aspect=15, height=.5, palette=pal)\n\nperc_incorporation_pyr = pd.melt(df[(df > 0) & (df < 100)])\n \n# Method 1: on the same Axis\nsns.distplot( df[(df > 0) & (df < 100)].iloc[:,0] , color=\"skyblue\", label=\"1\", kde=False)\nsns.distplot( df[(df > 0) & (df < 100)].iloc[:,1], color=\"red\", label=\"2\", kde=False)\nsns.distplot( df[(df > 0) & (df < 100)].iloc[:,2], color=\"green\", label=\"3\", kde=False)\n",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\n\n# standardize data for pca \n# #features = ['sepal length', 'sepal width', 'petal length', 'petal width']# Separating out the features\npca_df_all_proteomics_and_pyr = pd.read_csv(proteomics_dataset, index_col=\"UP\").loc[df.index,:]\npca_df_all_proteomics_and_pyr['pyr_1'] = abs_usages_1\npca_df_all_proteomics_and_pyr = pca_df_all_proteomics_and_pyr.T.dropna(axis='columns')\nx = pca_df_all_proteomics_and_pyr.values\nx = StandardScaler().fit_transform(x)\n\n# run pca\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2)\nprincipalComponents = pca.fit_transform(x)\nprincipalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])\nprincipalDf.index = pca_df_all_proteomics_and_pyr.index\n\nfig = plt.figure(figsize = (8,8))\nax = fig.add_subplot(1,1,1) \nax.set_xlabel('Principal Component 1', fontsize = 15)\nax.set_ylabel('Principal Component 2', fontsize = 15)\nax.set_title('2 component PCA with zero values', fontsize = 20)\n\namount = len(principalDf.index)\nfor i in range(amount):\n c = [float(i)/float(amount), 0.0, float(amount-i)/float(amount)] #R,G,B\n ax.scatter(principalDf.loc[principalDf.index[i], 'principal component 1']\n , principalDf.loc[principalDf.index[i], 'principal component 2']\n , color = c \n , s = 50)\nax.scatter(principalDf.loc[\"pyr_1\", 'principal component 1']\n , principalDf.loc[principalDf.index[i], 'principal component 2']\n , color = \"green\"\n , s = 50)\n\n\n#ax.legend(pca_df_all_proteomics_and_pyr.index)\nax.grid()\nplt.savefig(f'{FIGURES}/')\n",
"/Library/Python/3.7/site-packages/pandas/core/indexing.py:1418: FutureWarning:\n\n\nPassing list-likes to .loc or [] with any missing label will raise\nKeyError in the future, you can use .reindex() as an alternative.\n\nSee the documentation here:\nhttps://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike\n\n"
],
[
"# standardize data for pca \n# #features = ['sepal length', 'sepal width', 'petal length', 'petal width']# Separating out the features\npca_df_all_proteomics_and_pyr = pd.read_csv(proteomics_dataset, index_col=\"UP\").loc[df.index,:]\npca_df_all_proteomics_and_pyr['pyr_1'] = abs_usages_1\npca_df_all_proteomics_and_pyr = pca_df_all_proteomics_and_pyr[pca_df_all_proteomics_and_pyr['pyr_1'] > 0]\npca_df_all_proteomics_and_pyr = pca_df_all_proteomics_and_pyr.T.dropna(axis='columns')\nx = pca_df_all_proteomics_and_pyr.values\nx = StandardScaler().fit_transform(x)\n\n# run pca\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2)\nprincipalComponents = pca.fit_transform(x)\nprincipalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])\nprincipalDf.index = pca_df_all_proteomics_and_pyr.index\n\nfig = plt.figure(figsize = (8,8))\nax = fig.add_subplot(1,1,1) \nax.set_xlabel('Principal Component 1', fontsize = 15)\nax.set_ylabel('Principal Component 2', fontsize = 15)\nax.set_title('2 component PCA without zero values', fontsize = 20)\n\namount = len(principalDf.index)\nfor i in range(amount):\n c = [float(i)/float(amount), 0.0, float(amount-i)/float(amount)] #R,G,B\n ax.scatter(principalDf.loc[principalDf.index[i], 'principal component 1']\n , principalDf.loc[principalDf.index[i], 'principal component 2']\n , color = c \n , s = 50)\nax.scatter(principalDf.loc[\"pyr_1\", 'principal component 1']\n , principalDf.loc[principalDf.index[i], 'principal component 2']\n , color = \"green\"\n , s = 50)\n\nax.grid()",
"/Library/Python/3.7/site-packages/pandas/core/indexing.py:1418: FutureWarning:\n\n\nPassing list-likes to .loc or [] with any missing label will raise\nKeyError in the future, you can use .reindex() as an alternative.\n\nSee the documentation here:\nhttps://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike\n\n"
],
[
"pd.DataFrame({'ac_1':abs_usages_1, 'ac_2':abs_usages_2, 'ac_3':abs_usages_3}).to_csv(f'{INTERMEDIATE}/proteomics/acetate_usages.csv')\n",
"_____no_output_____"
]
],
[
[
"# Check sensibility\n- try using fluxomics data to compare the fluxes to the values of the non-ec model",
"_____no_output_____"
]
],
[
[
"# load fluxomics data\nfluxes = pd.read_csv(f\"{RAW_EXTERNAL}/chemostat_flux.csv\", index_col = \"D [h-1]\")[\"0.19\"]\n\nfluxes",
"_____no_output_____"
],
[
"ec_model_sol = ec_model_1.optimize()\nec_model_pfba = cobra.flux_analysis.pfba(ec_model_1)",
"_____no_output_____"
],
[
"\n\n# compare the \ntype(ec_model_1.optimize().fluxes)\nf = ec_model_1.optimize()\ng = [i for i in range(0, len(f.fluxes)) if \"EX_glc\" in f.fluxes.index[i]]\nf.fluxes[g]\n\n# find reaction GLC + ATP -> G6P \nglc_metabolite = ec_model_1.metabolites.get_by_id(\"glc__D_c\")\natp_metabolite = ec_model_1.metabolites.get_by_id(\"atp_c\")\nreactions_glc_atp = [i for i in model.reactions\\\n if glc_metabolite in i.metabolites and atp_metabolite in i.metabolites]\nhexokinase = reactions[0]\nprint(\"GLC + ATP -> G6P :\",ec_model_sol[hexokinase.id])\n\n# find flux glc atp \n\n# find reaction G6P -> 6PG + NADPH\ng6p_metabolite = ec_model_1.metabolites.get_by_id(\"g6p_c\")\nnadph_metabolite = ec_model_1.metabolites.get_by_id(\"nadph_c\")\nnadp_reactions = [i for i in model.reactions\\\n if nadph_metabolite in i.metabolites and g6p_metabolite in i.metabolites]\ng6p_nadph = nadp_reactions[0]\nprint(\"G6P -> 6PG + NADPH :\",ec_model_sol[g6p_nadph.id])\n\n\n# find reaction G6P -> F6P\nf6p_metabolite = ec_model_1.metabolites.get_by_id(\"f6p_c\")\ng6p_metabolite = ec_model_1.metabolites.get_by_id(\"g6p_c\")\nf6p_reactions = [i for i in model.reactions\\\n if f6p_metabolite in i.metabolites and g6p_metabolite in i.metabolites]\nf6p_reaction = f6p_reactions[0]\nprint(\"G6P -> F6P :\",ec_model_sol[f6p_reaction.id])\n\n\n# find reaction 6PG -> T3P + PYR\npyr_metabolite = ec_model_1.metabolites.get_by_id(\"pyr_c\")\nsixpgl_metabolite = ec_model_1.metabolites.get_by_id(\"6pgl_c\")\npyr_reactions = [i for i in model.reactions\\\n if pyr_metabolite in i.metabolites]# and sixpgl_metabolite in i.metabolites]\npyr_reaction = pyr_reactions[0]\nprint(\"6PG -> T3P + PYR :\",ec_model_sol[pyr_reaction.id])\n\n\n# find reaction F6P + ATP -> 2T3P\npyr_reactions = [i for i in model.reactions\\\n if f6p_metabolite in i.metabolites and atp_metabolite in i.metabolites]\nf6p_reaction = pyr_reactions[0]\nprint(\"F6P + ATP -> 2T3P :\",ec_model_sol[f6p_reaction.id])\n\n\n# find reaction PGA -> PEP\npep_metabolite = ec_model_1.metabolites.get_by_id(\"pep_c\")\npga_reactions = [i for i in model.reactions\\\n if pep_metabolite in i.metabolites]# and atp_metabolite in i.metabolites]\npga_reaction = pga_reactions[0]\n#ec_model_1.optimize()[pga_reactions.id]\n\n# find reaction PEP -> PYR + ATP\npep_metabolite = ec_model_1.metabolites.get_by_id(\"pep_c\")\npeppyr_reactions = [i for i in model.reactions\\\n if pep_metabolite in i.metabolites and atp_metabolite in i.metabolites]\npeppyr_reaction = peppyr_reactions[1]\nprint(\"PEP -> PYR + ATP :\",ec_model_sol[peppyr_reaction.id])\n\n\n# find reaction PYR -> AcCoA + CO2 + NADH\npep_metabolite = ec_model_1.metabolites.get_by_id(\"pep_c\")\nco2_metabolite = ec_model_1.metabolites.get_by_id(\"co2_c\")\naccoa_metabolite = ec_model_1.metabolites.get_by_id(\"accoa_c\")\npyraccoa_reactions = [i for i in model.reactions\\\n if pyr_metabolite in i.metabolites and co2_metabolite in i.metabolites and accoa_metabolite in i.metabolites]\n#pyraccoa_reaction = pyraccoa_reactions[0]\n#ec_model_1.optimize()[pyraccoa_reaction.id]\n\n# find reaction FUM -> MAL\nfum_metabolite = ec_model_1.metabolites.get_by_id(\"fum_c\")\nco2_metabolite = ec_model_1.metabolites.get_by_id(\"co2_c\")\naccoa_metabolite = ec_model_1.metabolites.get_by_id(\"accoa_c\")\npyraccoa_reactions = [i for i in model.reactions\\\n if pyr_metabolite in i.metabolites and co2_metabolite in i.metabolites and accoa_metabolite in i.metabolites]\n#pyraccoa_reaction = pyraccoa_reactions[0]\n#ec_model_1.optimize()[pyraccoa_reaction.id]\n\n\n# find reaction AcCoA -> Acetate + ATP\nac_metabolite = ec_model_1.metabolites.get_by_id(\"ac_c\")\naccoaac_reactions = [i for i in model.reactions\\\n if ac_metabolite in i.metabolites and accoa_metabolite in i.metabolites and atp_metabolite in i.metabolites]\naccoaac_reaction = accoaac_reactions[0]\nprint(\"AcCoA -> Acetate + ATP: \" ,ec_model_sol[accoaac_reaction.id])\n\n# find reaction NADPH -> NADH\nnadh_metabolite = ec_model_1.metabolites.get_by_id(\"nadh_c\")\nnadpnadh_reactions = [i for i in model.reactions\\\n if nadh_metabolite in i.metabolites and nadph_metabolite in i.metabolites]\nnadpnadh_reaction = nadpnadh_reactions[0]\n#ec_model_1.optimize()[nadpnadh_reaction.id]",
"GLC + ATP -> G6P : 3.076127514751424\ng6p nadph : 3.076127514751424\nG6P -> F6P : 2.5168621099324184\n6PG -> T3P + PYR : 0.0\nF6P + ATP -> 2T3P : 0.2338547795227729\nPEP -> PYR + ATP : 0.0\nAcCoA -> Acetate + ATP: 0.0\n"
],
[
"\n#E. coli model:\nssl._create_default_https_context = ssl._create_unverified_context\nwget.download(\"https://raw.githubusercontent.com/BenjaSanchez/notebooks/master/e_coli_simulations/eciML1515.xml\")\nunc_model = cobra.io.read_sbml_model(\"eciML1515.xml\")\nos.remove(\"eciML1515.xml\")\n\nunc_model_sol = unc_model.optimize()\n#unc_model_pfba_sol = cobra.flux_analysis.pfba(unc_model_sol)",
"_____no_output_____"
],
[
"# load fluxomics data\nfluxes = pd.read_csv(f\"{RAW_EXTERNAL}/chemostat_flux.csv\", index_col = \"D [h-1]\")[\"0.19\"]\n\n\n# compare the \ntype(ec_model_1.optimize().fluxes)\nf = ec_model_1.optimize()\ng = [i for i in range(0, len(f.fluxes)) if \"EX_glc\" in f.fluxes.index[i]]\nf.fluxes[g]\nunc_model.medium\n\n# find reaction GLC + ATP -> G6P \nglc_metabolite = ec_model_1.metabolites.get_by_id(\"glc__D_c\")\natp_metabolite = ec_model_1.metabolites.get_by_id(\"atp_c\")\nreactions_glc_atp = [i for i in model.reactions\\\n if glc_metabolite in i.metabolites and atp_metabolite in i.metabolites]\nhexokinase = reactions[0]\nprint(\"GLC + ATP -> G6P :\",unc_model_sol[hexokinase.id])\n\n# find flux glc atp \n\n# find reaction G6P -> 6PG + NADPH\ng6p_metabolite = ec_model_1.metabolites.get_by_id(\"g6p_c\")\nnadph_metabolite = ec_model_1.metabolites.get_by_id(\"nadph_c\")\nnadp_reactions = [i for i in model.reactions\\\n if nadph_metabolite in i.metabolites and g6p_metabolite in i.metabolites]\ng6p_nadph = nadp_reactions[0]\nprint(\"G6P -> 6PG + NADPH :\",unc_model_sol[g6p_nadph.id])\n\n\n# find reaction G6P -> F6P\nf6p_metabolite = ec_model_1.metabolites.get_by_id(\"f6p_c\")\ng6p_metabolite = ec_model_1.metabolites.get_by_id(\"g6p_c\")\nf6p_reactions = [i for i in model.reactions\\\n if f6p_metabolite in i.metabolites and g6p_metabolite in i.metabolites]\nf6p_reaction = f6p_reactions[0]\nprint(\"G6P -> F6P :\",unc_model_sol[f6p_reaction.id])\n\n\n# find reaction 6PG -> T3P + PYR\npyr_metabolite = ec_model_1.metabolites.get_by_id(\"pyr_c\")\nsixpgl_metabolite = ec_model_1.metabolites.get_by_id(\"6pgl_c\")\npyr_reactions = [i for i in model.reactions\\\n if pyr_metabolite in i.metabolites]# and sixpgl_metabolite in i.metabolites]\npyr_reaction = pyr_reactions[0]\nprint(\"6PG -> T3P + PYR :\",unc_model_sol[pyr_reaction.id])\n\n\n# find reaction F6P + ATP -> 2T3P\npyr_reactions = [i for i in model.reactions\\\n if f6p_metabolite in i.metabolites and atp_metabolite in i.metabolites]\nf6p_reaction = pyr_reactions[0]\nprint(\"F6P + ATP -> 2T3P :\",unc_model_sol[f6p_reaction.id])\n\n\n# find reaction PGA -> PEP\npep_metabolite = ec_model_1.metabolites.get_by_id(\"pep_c\")\npga_reactions = [i for i in model.reactions\\\n if pep_metabolite in i.metabolites]# and atp_metabolite in i.metabolites]\npga_reaction = pga_reactions[0]\n#ec_model_1.optimize()[pga_reactions.id]\n\n# find reaction PEP -> PYR + ATP\npep_metabolite = ec_model_1.metabolites.get_by_id(\"pep_c\")\npeppyr_reactions = [i for i in model.reactions\\\n if pep_metabolite in i.metabolites and atp_metabolite in i.metabolites]\npeppyr_reaction = peppyr_reactions[1]\nprint(\"PEP -> PYR + ATP :\",unc_model_sol[peppyr_reaction.id])\n\n\n# find reaction PYR -> AcCoA + CO2 + NADH\npep_metabolite = ec_model_1.metabolites.get_by_id(\"pep_c\")\nco2_metabolite = ec_model_1.metabolites.get_by_id(\"co2_c\")\naccoa_metabolite = ec_model_1.metabolites.get_by_id(\"accoa_c\")\npyraccoa_reactions = [i for i in model.reactions\\\n if pyr_metabolite in i.metabolites and co2_metabolite in i.metabolites and accoa_metabolite in i.metabolites]\n#pyraccoa_reaction = pyraccoa_reactions[0]\n#ec_model_1.optimize()[pyraccoa_reaction.id]\n\n# find reaction FUM -> MAL\nfum_metabolite = ec_model_1.metabolites.get_by_id(\"fum_c\")\nco2_metabolite = ec_model_1.metabolites.get_by_id(\"co2_c\")\naccoa_metabolite = ec_model_1.metabolites.get_by_id(\"accoa_c\")\npyraccoa_reactions = [i for i in model.reactions\\\n if pyr_metabolite in i.metabolites and co2_metabolite in i.metabolites and accoa_metabolite in i.metabolites]\n#pyraccoa_reaction = pyraccoa_reactions[0]\n#ec_model_1.optimize()[pyraccoa_reaction.id]\n\n\n# find reaction AcCoA -> Acetate + ATP\nac_metabolite = ec_model_1.metabolites.get_by_id(\"ac_c\")\naccoaac_reactions = [i for i in model.reactions\\\n if ac_metabolite in i.metabolites and accoa_metabolite in i.metabolites and atp_metabolite in i.metabolites]\naccoaac_reaction = accoaac_reactions[0]\nprint(\"AcCoA -> Acetate + ATP: \" ,unc_model_sol[accoaac_reaction.id])\n\n# find reaction NADPH -> NADH\nnadh_metabolite = ec_model_1.metabolites.get_by_id(\"nadh_c\")\nnadpnadh_reactions = [i for i in model.reactions\\\n if nadh_metabolite in i.metabolites and nadph_metabolite in i.metabolites]\nnadpnadh_reaction = nadpnadh_reactions[0]\n#ec_model_1.optimize()[nadpnadh_reaction.id]",
"GLC + ATP -> G6P : 2.355367839801507\nG6P -> 6PG + NADPH : 2.355367839801507\nG6P -> F6P : 7.644632160198492\n6PG -> T3P + PYR : 0.0\nF6P + ATP -> 2T3P : 0.0\nPEP -> PYR + ATP : 0.0\nAcCoA -> Acetate + ATP: 0.0\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a992442cf0f0a8ee69362319524f437be3765a0
| 112,278 |
ipynb
|
Jupyter Notebook
|
demo/mnist_code/mnist-keras.ipynb
|
scailable/sclbl-emnist-demo
|
2dbe3a107ef393a466306d5aebe7a7f75b25e669
|
[
"MIT"
] | null | null | null |
demo/mnist_code/mnist-keras.ipynb
|
scailable/sclbl-emnist-demo
|
2dbe3a107ef393a466306d5aebe7a7f75b25e669
|
[
"MIT"
] | null | null | null |
demo/mnist_code/mnist-keras.ipynb
|
scailable/sclbl-emnist-demo
|
2dbe3a107ef393a466306d5aebe7a7f75b25e669
|
[
"MIT"
] | null | null | null | 59.913554 | 7,229 | 0.591238 |
[
[
[
"### load library",
"_____no_output_____"
]
],
[
[
"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)",
"_____no_output_____"
],
[
"from tensorflow import keras\n# from tensorflow.keras.utils import to_categorical\n# from tensorflow.keras.models import Sequential\n# from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n# from tensorflow.keras.losses import categorical_crossentropy\n# from tensorflow.keras.optimizers import Adam\n# from tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"### Load dataset ",
"_____no_output_____"
]
],
[
[
"train_data_path = '../mnist_input/mnist-digits-train.csv'\ntest_data_path = '../mnist_input/mnist-digits-test.csv'",
"_____no_output_____"
],
[
"train_data = pd.read_csv(train_data_path, header=None)",
"_____no_output_____"
],
[
"train_data.head(10)",
"_____no_output_____"
],
[
"# The classes of this balanced dataset are as follows. Index into it based on class label\nclass_mapping = '0123456789'",
"_____no_output_____"
],
[
"class_mapping[5]",
"_____no_output_____"
],
[
"train_data.shape",
"_____no_output_____"
]
],
[
[
"### Data is flipped",
"_____no_output_____"
]
],
[
[
"num_classes = len(train_data[0].unique())\nrow_num = 8\n\nplt.imshow(train_data.values[row_num, 1:].reshape([28, 28]), cmap='Greys_r')\nplt.show()\n\nimg_flip = np.transpose(train_data.values[row_num,1:].reshape(28, 28), axes=[1,0]) # img_size * img_size arrays\nplt.imshow(img_flip, cmap='Greys_r')\n\nplt.show()",
"_____no_output_____"
],
[
"def show_img(data, row_num):\n img_flip = np.transpose(data.values[row_num,1:].reshape(28, 28), axes=[1,0]) # img_size * img_size arrays\n plt.title('Class: ' + str(data.values[row_num,0]) + ', Label: ' + str(class_mapping[data.values[row_num,0]]))\n plt.imshow(img_flip, cmap='Greys_r')",
"_____no_output_____"
],
[
"show_img(train_data, 149)",
"_____no_output_____"
],
[
"# 10 digits\nnum_classes = 10\nimg_size = 28\n\ndef img_label_load(data_path, num_classes=None):\n data = pd.read_csv(data_path, header=None)\n data_rows = len(data)\n if not num_classes:\n num_classes = len(data[0].unique())\n \n # this assumes square imgs. Should be 28x28\n img_size = int(np.sqrt(len(data.iloc[0][1:])))\n \n # Images need to be transposed. This line also does the reshaping needed.\n imgs = np.transpose(data.values[:,1:].reshape(data_rows, img_size, img_size, 1), axes=[0,2,1,3]) # img_size * img_size arrays\n \n labels = keras.utils.to_categorical(data.values[:,0], num_classes) # one-hot encoding vectors\n \n return imgs/255., labels\n\n",
"_____no_output_____"
]
],
[
[
"### model, compile",
"_____no_output_____"
]
],
[
[
"model = keras.models.Sequential()\n\n# model.add(keras.layers.Reshape((img_size,img_size,1), input_shape=(784,)))\nmodel.add(keras.layers.Conv2D(filters=12, kernel_size=(5,5), strides=2, activation='relu', \n input_shape=(img_size,img_size,1)))\n# model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))\nmodel.add(keras.layers.Dropout(.5))\n\nmodel.add(keras.layers.Conv2D(filters=18, kernel_size=(3,3) , strides=2, activation='relu'))\n# model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))\nmodel.add(keras.layers.Dropout(.5))\n\nmodel.add(keras.layers.Conv2D(filters=24, kernel_size=(2,2), activation='relu'))\n# model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))\n\n# model.add(keras.layers.Conv2D(filters=30, kernel_size=(3,3), activation='relu'))\n\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(units=150, activation='relu'))\nmodel.add(keras.layers.Dense(units=num_classes, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 12, 12, 12) 312 \n_________________________________________________________________\ndropout (Dropout) (None, 12, 12, 12) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 5, 5, 18) 1962 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 5, 5, 18) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 4, 4, 24) 1752 \n_________________________________________________________________\nflatten (Flatten) (None, 384) 0 \n_________________________________________________________________\ndense (Dense) (None, 150) 57750 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 1510 \n=================================================================\nTotal params: 63,286\nTrainable params: 63,286\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"for layer in model.layers:\n print(layer.get_output_at(0).get_shape().as_list())\n",
"[None, 12, 12, 12]\n[None, 12, 12, 12]\n[None, 5, 5, 18]\n[None, 5, 5, 18]\n[None, 4, 4, 24]\n[None, 384]\n[None, 150]\n[None, 10]\n"
]
],
[
[
"### Train",
"_____no_output_____"
]
],
[
[
"X, y = img_label_load(train_data_path)\nprint(X.shape)",
"(240000, 28, 28, 1)\n"
],
[
"data_generator = keras.preprocessing.image.ImageDataGenerator(validation_split=.2)\n## consider using this for more variety\ndata_generator_with_aug = keras.preprocessing.image.ImageDataGenerator(validation_split=.2,\n width_shift_range=.2, height_shift_range=.2,\n rotation_range=60, zoom_range=.2, shear_range=.3)\n\n# if already ran this above, no need to do it again\n# X, y = img_label_load(train_data_path)\n# print(\"X.shape: \", X.shape)\n\ntraining_data_generator = data_generator.flow(X, y, subset='training')\nvalidation_data_generator = data_generator.flow(X, y, subset='validation')\nhistory = model.fit_generator(training_data_generator, \n steps_per_epoch=500, epochs=10, # can change epochs to 5\n validation_data=validation_data_generator)\n",
"WARNING:tensorflow:From <ipython-input-16-690a4fed7a6b>:15: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use Model.fit, which supports generators.\nEpoch 1/10\n500/500 [==============================] - 7s 13ms/step - loss: 0.7529 - accuracy: 0.7559 - val_loss: 0.2267 - val_accuracy: 0.9339\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 2/10\n500/500 [==============================] - 6s 13ms/step - loss: 0.3075 - accuracy: 0.9031 - val_loss: 0.1494 - val_accuracy: 0.9553\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 3/10\n500/500 [==============================] - 6s 13ms/step - loss: 0.2168 - accuracy: 0.9329 - val_loss: 0.1212 - val_accuracy: 0.9640\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 4/10\n500/500 [==============================] - 6s 13ms/step - loss: 0.1763 - accuracy: 0.9456 - val_loss: 0.0984 - val_accuracy: 0.9695\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 5/10\n500/500 [==============================] - 5s 9ms/step - loss: 0.1605 - accuracy: 0.9503 - val_loss: 0.0873 - val_accuracy: 0.9730\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 6/10\n500/500 [==============================] - 5s 9ms/step - loss: 0.1485 - accuracy: 0.9554 - val_loss: 0.0908 - val_accuracy: 0.9711\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 7/10\n500/500 [==============================] - 4s 9ms/step - loss: 0.1364 - accuracy: 0.9572 - val_loss: 0.0743 - val_accuracy: 0.9768\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 8/10\n500/500 [==============================] - 5s 9ms/step - loss: 0.1286 - accuracy: 0.9599 - val_loss: 0.0681 - val_accuracy: 0.9792\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 9/10\n500/500 [==============================] - 4s 9ms/step - loss: 0.1069 - accuracy: 0.9660 - val_loss: 0.0656 - val_accuracy: 0.9798\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nEpoch 10/10\n500/500 [==============================] - 4s 9ms/step - loss: 0.1052 - accuracy: 0.9671 - val_loss: 0.0605 - val_accuracy: 0.9809\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\n"
],
[
"test_X, test_y = img_label_load(test_data_path)\ntest_data_generator = data_generator.flow(X, y)\n\nmodel.evaluate_generator(test_data_generator)",
"WARNING:tensorflow:From <ipython-input-17-e462b9b9a17f>:4: Model.evaluate_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use Model.evaluate, which supports generators.\n"
]
],
[
[
"### Look at some predictions\n",
"_____no_output_____"
]
],
[
[
"test_data = pd.read_csv(test_data_path, header=None)\nshow_img(test_data, 123)",
"_____no_output_____"
],
[
"X_test, y_test = img_label_load(test_data_path) # loads images and orients for model",
"_____no_output_____"
],
[
"def run_prediction(idx):\n result = np.argmax(model.predict(X_test[idx:idx+1]))\n print('Prediction: ', result, ', Char: ', class_mapping[result])\n print('Label: ', test_data.values[idx,0])\n show_img(test_data, idx)",
"_____no_output_____"
],
[
"import random\n\nfor _ in range(1,10):\n idx = random.randint(0, 47-1)\n run_prediction(idx)",
"Prediction: 2 , Char: 2\nLabel: 2\nPrediction: 6 , Char: 6\nLabel: 6\nPrediction: 6 , Char: 6\nLabel: 6\nPrediction: 5 , Char: 5\nLabel: 5\nPrediction: 6 , Char: 6\nLabel: 6\nPrediction: 6 , Char: 6\nLabel: 6\nPrediction: 3 , Char: 3\nLabel: 3\nPrediction: 5 , Char: 5\nLabel: 5\nPrediction: 5 , Char: 5\nLabel: 5\n"
],
[
"show_img(test_data, 123)\nnp.argmax(y_test[123])",
"_____no_output_____"
]
],
[
[
"### Keras exports",
"_____no_output_____"
]
],
[
[
"with open('model.json', 'w') as f:\n f.write(model.to_json())\nmodel.save_weights('./model.h5')\n\nmodel.save('./full_model.h5')\n#!dir\n# ... or ...\n#!ls -al",
"_____no_output_____"
]
],
[
[
"### Keras to ONNX\n",
"_____no_output_____"
]
],
[
[
"import keras2onnx\n\n# convert to onnx model\nonnx_model = keras2onnx.convert_keras(model, model.name)\n\n# save onnx model\nmodel_file = 'model.onnx'\nkeras2onnx.save_model(onnx_model, model_file)\n",
"tf executing eager_mode: True\ntf.keras model eager_mode: False\nThe ONNX operator number change on the optimization: 25 -> 15\nThe maximum opset needed by this model is only 11.\n"
]
],
[
[
"### Upload the ONNX file at Sclbl.net ...",
"_____no_output_____"
],
[
"### ... then test with Protobuf input",
"_____no_output_____"
]
],
[
[
"import requests\nimport base64\nfrom onnx import numpy_helper\n\n# serialize and base64 encode protobuf input\n\nxc = X_test[127]\n\nprint(X_test[127])\nxc = xc.astype('float32')\ntensor = numpy_helper.from_array(xc)\nserialized = tensor.SerializeToString()\nencoded = base64.b64encode(serialized)\n\n# then test the model on a sclbl.net cloud server\n\nurl = \"https://taskmanager.sclbl.net:8080/task/34e77475-51e7-11eb-962f-9600004e79cc\"\npayload = \"{\\\"input\\\":{\\\"content-type\\\":\\\"json\\\",\\\"location\\\":\\\"embedded\\\",\\\"data\\\":\\\"{\\\\\\\"input\\\\\\\": \\\\\\\"\" + encoded.decode('ascii') + \"\\\\\\\"}\\\"},\\\"output\\\":{\\\"content-type\\\":\\\"json\\\",\\\"location\\\":\\\"echo\\\"},\\\"control\\\":1,\\\"properties\\\":{\\\"language\\\":\\\"WASM\\\"}}\"\n\nresponse = requests.request(\"POST\", url, data = payload)\n\nprint(response.text.encode('utf8'))\nprint(\"Expected result: \" + str(np.argmax(y_test[127])))",
"[[[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.01568627]\n [0.01568627]\n [0.01960784]\n [0.08235294]\n [0.14509804]\n [0.1254902 ]\n [0.03529412]\n [0.01568627]\n [0.01568627]\n [0.00784314]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.01568627]\n [0.08235294]\n [0.19607843]\n [0.44705882]\n [0.49019608]\n [0.50588235]\n [0.6745098 ]\n [0.84313725]\n [0.79607843]\n [0.54901961]\n [0.49803922]\n [0.49019608]\n [0.32156863]\n [0.08627451]\n [0.00784314]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.07843137]\n [0.43137255]\n [0.6745098 ]\n [0.8627451 ]\n [0.96078431]\n [0.97647059]\n [0.98039216]\n [0.98039216]\n [0.98039216]\n [0.97647059]\n [0.97254902]\n [0.98039216]\n [0.97647059]\n [0.90980392]\n [0.66666667]\n [0.25490196]\n [0.00784314]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.00784314]\n [0.30980392]\n [0.88235294]\n [0.96862745]\n [0.96078431]\n [0.87058824]\n [0.85098039]\n [0.84313725]\n [0.6745098 ]\n [0.50588235]\n [0.50588235]\n [0.6745098 ]\n [0.8627451 ]\n [0.91764706]\n [0.99215686]\n [0.97254902]\n [0.54117647]\n [0.03529412]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.01176471]\n [0.37254902]\n [0.81568627]\n [0.85098039]\n [0.79607843]\n [0.54901961]\n [0.50588235]\n [0.49019608]\n [0.32156863]\n [0.15294118]\n [0.15294118]\n [0.32156863]\n [0.54509804]\n [0.69411765]\n [0.96862745]\n [0.98823529]\n [0.6745098 ]\n [0.08235294]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.07058824]\n [0.14117647]\n [0.14509804]\n [0.1254902 ]\n [0.03529412]\n [0.01960784]\n [0.01568627]\n [0.00784314]\n [0. ]\n [0. ]\n [0.00784314]\n [0.05098039]\n [0.20784314]\n [0.87058824]\n [0.99607843]\n [0.84313725]\n [0.14509804]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.03529412]\n [0.2 ]\n [0.87058824]\n [0.99607843]\n [0.84313725]\n [0.14509804]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.13333333]\n [0.45098039]\n [0.96078431]\n [0.98823529]\n [0.6745098 ]\n [0.08235294]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.01176471]\n [0.32941176]\n [0.6745098 ]\n [0.98823529]\n [0.97647059]\n [0.49803922]\n [0.01960784]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.03921569]\n [0.50196078]\n [0.81568627]\n [0.99607843]\n [0.95294118]\n [0.44313725]\n [0.01568627]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.01568627]\n [0.30980392]\n [0.8627451 ]\n [0.96862745]\n [0.96862745]\n [0.63921569]\n [0.12941176]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.00784314]\n [0.01568627]\n [0.01960784]\n [0.08235294]\n [0.14509804]\n [0.14509804]\n [0.14509804]\n [0.14509804]\n [0.14509804]\n [0.15294118]\n [0.32941176]\n [0.73333333]\n [0.98431373]\n [0.99607843]\n [0.8627451 ]\n [0.20392157]\n [0.00392157]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.00392157]\n [0.07843137]\n [0.13333333]\n [0.32156863]\n [0.49019608]\n [0.50588235]\n [0.6745098 ]\n [0.84313725]\n [0.85098039]\n [0.85098039]\n [0.85098039]\n [0.85098039]\n [0.85098039]\n [0.91372549]\n [0.98823529]\n [0.97647059]\n [0.91764706]\n [0.60392157]\n [0.07843137]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.01960784]\n [0.18431373]\n [0.62352941]\n [0.79215686]\n [0.91372549]\n [0.97647059]\n [0.98039216]\n [0.98823529]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.87058824]\n [0.56862745]\n [0.09411765]\n [0.00392157]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0.00392157]\n [0.18431373]\n [0.76470588]\n [0.96862745]\n [0.98823529]\n [0.96078431]\n [0.87058824]\n [0.85098039]\n [0.85098039]\n [0.85098039]\n [0.85490196]\n [0.87843137]\n [0.98039216]\n [0.99607843]\n [1. ]\n [1. ]\n [0.99607843]\n [0.85098039]\n [0.50588235]\n [0.01960784]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0.03137255]\n [0.37254902]\n [0.90588235]\n [0.98823529]\n [0.96862745]\n [0.81568627]\n [0.54901961]\n [0.49803922]\n [0.49803922]\n [0.49803922]\n [0.53333333]\n [0.63137255]\n [0.94509804]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.99607843]\n [0.87058824]\n [0.55294118]\n [0.04313725]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0.01176471]\n [0.30980392]\n [0.85490196]\n [0.98039216]\n [0.85490196]\n [0.63921569]\n [0.18431373]\n [0.03529412]\n [0.01568627]\n [0.02745098]\n [0.05882353]\n [0.37647059]\n [0.65882353]\n [0.97254902]\n [0.99607843]\n [0.96470588]\n [0.9372549 ]\n [0.99215686]\n [0.96862745]\n [0.86666667]\n [0.35686275]\n [0.02745098]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0.08235294]\n [0.66666667]\n [0.97254902]\n [0.73333333]\n [0.30980392]\n [0.12941176]\n [0.00392157]\n [0. ]\n [0.02352941]\n [0.25882353]\n [0.54509804]\n [0.86666667]\n [0.96470588]\n [0.99215686]\n [0.87058824]\n [0.50588235]\n [0.42745098]\n [0.86666667]\n [0.99215686]\n [0.99215686]\n [0.79215686]\n [0.13333333]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0.14509804]\n [0.84313725]\n [0.91764706]\n [0.3372549 ]\n [0.01960784]\n [0.00392157]\n [0.01568627]\n [0.13333333]\n [0.45098039]\n [0.7372549 ]\n [0.97254902]\n [0.99607843]\n [0.99215686]\n [0.90588235]\n [0.48235294]\n [0.04705882]\n [0.04313725]\n [0.48235294]\n [0.90588235]\n [0.96862745]\n [0.91372549]\n [0.32156863]\n [0.00784314]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0.14509804]\n [0.85098039]\n [0.91764706]\n [0.40392157]\n [0.16078431]\n [0.2 ]\n [0.45098039]\n [0.69411765]\n [0.95294118]\n [0.98823529]\n [0.99215686]\n [0.90980392]\n [0.81176471]\n [0.37254902]\n [0.07843137]\n [0. ]\n [0. ]\n [0.07843137]\n [0.37254902]\n [0.62352941]\n [0.82352941]\n [0.41568627]\n [0.01176471]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0.14509804]\n [0.84313725]\n [0.96862745]\n [0.69803922]\n [0.50980392]\n [0.54901961]\n [0.8 ]\n [0.91764706]\n [0.99215686]\n [0.98823529]\n [0.95294118]\n [0.68627451]\n [0.49803922]\n [0.13333333]\n [0.01176471]\n [0. ]\n [0. ]\n [0.01176471]\n [0.13333333]\n [0.30196078]\n [0.48235294]\n [0.24705882]\n [0.00784314]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0.08235294]\n [0.66666667]\n [0.98039216]\n [0.98823529]\n [0.98039216]\n [0.98039216]\n [0.98431373]\n [0.96078431]\n [0.8627451 ]\n [0.6745098 ]\n [0.45098039]\n [0.13333333]\n [0.03921569]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0.00784314]\n [0.01568627]\n [0.00784314]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0.00784314]\n [0.2627451 ]\n [0.66666667]\n [0.84313725]\n [0.85098039]\n [0.84313725]\n [0.6745098 ]\n [0.45098039]\n [0.19607843]\n [0.08235294]\n [0.01568627]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0.00784314]\n [0.08235294]\n [0.14509804]\n [0.14509804]\n [0.14509804]\n [0.08235294]\n [0.01568627]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]\n\n [[0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]\n [0. ]]]\nb'{\"statusCode\":1,\"result\":\"{\\\\\"output\\\\\": [0.0000,0.0000,0.9999,0.0000,0.0000,0.0000,0.0000,0.0000,0.0000,0.0000]}\",\"tid\":\"sclblt-34e77475-51e7-11eb-962f-9600004e79cc-91\",\"message\":\"ComputeNode assigned successfully. Result returned and available at https://taskmanager.sclbl.net:8080/retrieve/sclblt-34e77475-51e7-11eb-962f-9600004e79cc-91\",\"timeOut\":false,\"resultUrl\":\"https://taskmanager.sclbl.net:8080/retrieve/sclblt-34e77475-51e7-11eb-962f-9600004e79cc-91\"}\\n'\nExpected result: 2\n"
]
],
[
[
"### ... or with raw input",
"_____no_output_____"
]
],
[
[
"# serialize and base64 encode raw input\n\nxc = X_test[127]\nxc = xc.astype('float32')\nraw = xc.tobytes();\nencoded = base64.b64encode(raw)\n\n# then test the model on a sclbl.net cloud server\n\nurl = \"https://taskmanager.sclbl.net:8080/task/34e77475-51e7-11eb-962f-9600004e79cc\"\npayload = \"{\\\"input\\\":{\\\"content-type\\\":\\\"json\\\",\\\"location\\\":\\\"embedded\\\",\\\"data\\\":\\\"{\\\\\\\"type\\\\\\\":\\\\\\\"raw\\\\\\\",\\\\\\\"input\\\\\\\": \\\\\\\"\" + encoded.decode('ascii') + \"\\\\\\\"}\\\"},\\\"output\\\":{\\\"content-type\\\":\\\"json\\\",\\\"location\\\":\\\"echo\\\"},\\\"control\\\":1,\\\"properties\\\":{\\\"language\\\":\\\"WASM\\\"}}\"\n\nresponse = requests.request(\"POST\", url, data = payload)\n\nprint(response.text.encode('utf8'))\nprint(\"Expected result: \" + str(np.argmax(y_test[127])))\n\nprint(encoded.decode('ascii'))",
"b'{\"statusCode\":1,\"result\":\"{\\\\\"output\\\\\": [0.0000,0.0000,0.9999,0.0000,0.0000,0.0000,0.0000,0.0000,0.0000,0.0000]}\",\"tid\":\"sclblt-34e77475-51e7-11eb-962f-9600004e79cc-92\",\"message\":\"ComputeNode assigned successfully. Result returned and available at https://taskmanager.sclbl.net:8080/retrieve/sclblt-34e77475-51e7-11eb-962f-9600004e79cc-92\",\"timeOut\":false,\"resultUrl\":\"https://taskmanager.sclbl.net:8080/retrieve/sclblt-34e77475-51e7-11eb-962f-9600004e79cc-92\"}\\n'\nExpected result: 2\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgYCAPIGAgDyhoKA8qaioPZWUFD6BgAA+kZAQPYGAgDyBgIA8gYAAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGAgDypqKg9ychIPuXk5D77+vo+goEBP62sLD/Y11c/zMtLP42MDD///v4++/r6PqWkpD6xsLA9gYAAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKGgoD3d3Nw+rawsP93cXD/29XU/+vl5P/v6ej/7+no/+/p6P/r5eT/5+Hg/+/p6P/r5eT/p6Gg/q6oqP4OCgj6BgAA8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGAADyfnp4+4uFhP/j3dz/29XU/395eP9rZWT/Y11c/rawsP4KBAT+CgQE/rawsP93cXD/r6mo//v19P/n4eD+Ligo/kZAQPQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADBwEA8v76+PtHQUD/a2Vk/zMtLP42MDD+CgQE/+/r6PqWkpD6dnBw+nZwcPqWkpD6Miws/srExP/j3dz/9/Hw/rawsP6moqD0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJGQkD2RkBA+lZQUPoGAAD6RkBA9oaCgPIGAgDyBgAA8AAAAAAAAAACBgAA80dBQPdXUVD7f3l4///5+P9jXVz+VlBQ+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJGQED3NzEw+395eP//+fj/Y11c/lZQUPgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACJiAg+5+bmPvb1dT/9/Hw/rawsP6moqD0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADBwEA8qaioPq2sLD/9/Hw/+vl5P//+/j6hoKA8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoaAgPYGAAD/R0FA///5+P/Tzcz/j4uI+gYCAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgYCAPJ+enj7d3Fw/+Pd3P/j3dz+koyM/hYQEPgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgYAAPIGAgDyhoKA8qaioPZWUFD6VlBQ+lZQUPpWUFD6VlBQ+nZwcPqmoqD68uzs//Pt7P//+fj/d3Fw/0dBQPoGAgDsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGAgDuhoKA9iYgIPqWkpD77+vo+goEBP62sLD/Y11c/2tlZP9rZWT/a2Vk/2tlZP9rZWT/q6Wk//fx8P/r5eT/r6mo/m5oaP6GgoD0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKGgoDy9vDw+oJ8fP8vKSj/q6Wk/+vl5P/v6ej/9/Hw///5+P//+fj///n4///5+P//+fj///n4///5+P//+fj/f3l4/kpERP8HAwD2BgIA7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGAgDu9vDw+xMNDP/j3dz/9/Hw/9vV1P9/eXj/a2Vk/2tlZP9rZWT/b2lo/4eBgP/v6ej///n4/AACAPwAAgD///n4/2tlZP4KBAT+hoKA8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACBgAA9v76+PujnZz/9/Hw/+Pd3P9HQUD+NjAw///7+Pv/+/j7//v4+iYgIP6KhIT/y8XE///5+P//+fj///n4///5+P9/eXj+OjQ0/sbAwPQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADBwEA8n56ePtvaWj/7+no/29paP6SjIz+9vDw+kZAQPYGAgDzh4OA88fBwPcHAwD6pqCg/+fh4P//+fj/39nY/8O9vP/79fT/493c/3t1dP7e2tj7h4OA8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqaioPauqKj/5+Hg/vLs7P5+enj6FhAQ+gYCAOwAAAADBwMA8hYSEPoyLCz/e3V0/9/Z2P/79fT/f3l4/goEBP9va2j7e3V0//v19P/79fT/Lyko/iYgIPgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJWUFD7Y11c/6+pqP62srD6hoKA8gYCAO4GAgDyJiAg+5+bmPr28PD/5+Hg///5+P/79fT/o52c/9/b2PsHAQD2xsDA99/b2PujnZz/493c/6ulpP6WkpD6BgAA8AAAAAAAAAAAAAAAAAAAAAAAAAACVlBQ+2tlZP+vqaj/Pzs4+paQkPs3MTD7n5uY+srExP/Tzcz/9/Hw//v19P+noaD/Qz08/v76+PqGgoD0AAAAAAAAAAKGgoD2/vr4+oJ8fP9PSUj/V1NQ+wcBAPAAAAAAAAAAAAAAAAAAAAAAAAAAAlZQUPtjXVz/493c/s7IyP4OCAj+NjAw/zcxMP+vqaj/+/X0//fx8P/Tzcz+wry8///7+PomICD7BwEA8AAAAAAAAAADBwEA8iYgIPpuamj739vY+/fx8PoGAADwAAAAAAAAAAAAAAAAAAAAAAAAAAKmoqD2rqio/+/p6P/38fD/7+no/+/p6P/z7ez/29XU/3dxcP62sLD/n5uY+iYgIPqGgID0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACBgAA8gYCAPIGAADwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACBgAA8h4aGPquqKj/Y11c/2tlZP9jXVz+trCw/5+bmPsnISD6pqKg9gYCAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGAADypqKg9lZQUPpWUFD6VlBQ+qaioPYGAgDwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9925e31c1add9a2bae6de3f430f4225690378c
| 1,037,533 |
ipynb
|
Jupyter Notebook
|
notebooks/controller_comparison_analysis.ipynb
|
epfl-lasa/crowdbot-evaluation-tools
|
0e98c76428f6af5a4caa6b83b91ac05b3ed300ce
|
[
"MIT"
] | 4 |
2022-01-26T13:10:23.000Z
|
2022-03-02T19:42:11.000Z
|
notebooks/controller_comparison_analysis.ipynb
|
epfl-lasa/crowdbot-evaluation-tools
|
0e98c76428f6af5a4caa6b83b91ac05b3ed300ce
|
[
"MIT"
] | null | null | null |
notebooks/controller_comparison_analysis.ipynb
|
epfl-lasa/crowdbot-evaluation-tools
|
0e98c76428f6af5a4caa6b83b91ac05b3ed300ce
|
[
"MIT"
] | null | null | null | 278.607143 | 95,870 | 0.910359 |
[
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Libraries-&-settings\" data-toc-modified-id=\"Libraries-&-settings-1\"><span class=\"toc-item-num\">1 </span>Libraries & settings</a></span></li><li><span><a href=\"#Metrics\" data-toc-modified-id=\"Metrics-2\"><span class=\"toc-item-num\">2 </span>Metrics</a></span><ul class=\"toc-item\"><li><span><a href=\"#Crowd-related\" data-toc-modified-id=\"Crowd-related-2.1\"><span class=\"toc-item-num\">2.1 </span>Crowd-related</a></span></li><li><span><a href=\"#Path-efficiency-related\" data-toc-modified-id=\"Path-efficiency-related-2.2\"><span class=\"toc-item-num\">2.2 </span>Path efficiency-related</a></span></li><li><span><a href=\"#Control-related\" data-toc-modified-id=\"Control-related-2.3\"><span class=\"toc-item-num\">2.3 </span>Control-related</a></span></li></ul></li><li><span><a href=\"#Pipeline\" data-toc-modified-id=\"Pipeline-3\"><span class=\"toc-item-num\">3 </span>Pipeline</a></span><ul class=\"toc-item\"><li><span><a href=\"#Result-loading\" data-toc-modified-id=\"Result-loading-3.1\"><span class=\"toc-item-num\">3.1 </span>Result loading</a></span></li><li><span><a href=\"#Mean-Std-statistics\" data-toc-modified-id=\"Mean-Std-statistics-3.2\"><span class=\"toc-item-num\">3.2 </span>Mean-Std statistics</a></span></li><li><span><a href=\"#ANOVA-test-for-controller-comparison\" data-toc-modified-id=\"ANOVA-test-for-controller-comparison-3.3\"><span class=\"toc-item-num\">3.3 </span>ANOVA test for controller comparison</a></span></li><li><span><a href=\"#Visualize-with-grouping-by-date\" data-toc-modified-id=\"Visualize-with-grouping-by-date-3.4\"><span class=\"toc-item-num\">3.4 </span>Visualize with grouping by date</a></span><ul class=\"toc-item\"><li><span><a href=\"#Palette-settings\" data-toc-modified-id=\"Palette-settings-3.4.1\"><span class=\"toc-item-num\">3.4.1 </span>Palette settings</a></span></li><li><span><a href=\"#Crowd-related-metrics\" data-toc-modified-id=\"Crowd-related-metrics-3.4.2\"><span class=\"toc-item-num\">3.4.2 </span>Crowd-related metrics</a></span><ul class=\"toc-item\"><li><span><a href=\"#4-in-1-plotting\" data-toc-modified-id=\"4-in-1-plotting-3.4.2.1\"><span class=\"toc-item-num\">3.4.2.1 </span>4-in-1 plotting</a></span></li><li><span><a href=\"#Individual-figures\" data-toc-modified-id=\"Individual-figures-3.4.2.2\"><span class=\"toc-item-num\">3.4.2.2 </span>Individual figures</a></span></li></ul></li><li><span><a href=\"#Path-efficiency-related-metrics\" data-toc-modified-id=\"Path-efficiency-related-metrics-3.4.3\"><span class=\"toc-item-num\">3.4.3 </span>Path efficiency-related metrics</a></span><ul class=\"toc-item\"><li><span><a href=\"#2-in-1-plotting\" data-toc-modified-id=\"2-in-1-plotting-3.4.3.1\"><span class=\"toc-item-num\">3.4.3.1 </span>2-in-1 plotting</a></span></li><li><span><a href=\"#Individual-figures\" data-toc-modified-id=\"Individual-figures-3.4.3.2\"><span class=\"toc-item-num\">3.4.3.2 </span>Individual figures</a></span></li></ul></li><li><span><a href=\"#Control-related-metrics\" data-toc-modified-id=\"Control-related-metrics-3.4.4\"><span class=\"toc-item-num\">3.4.4 </span>Control-related metrics</a></span><ul class=\"toc-item\"><li><span><a href=\"#4-in-1-plotting\" data-toc-modified-id=\"4-in-1-plotting-3.4.4.1\"><span class=\"toc-item-num\">3.4.4.1 </span>4-in-1 plotting</a></span></li><li><span><a href=\"#Individual-figures\" data-toc-modified-id=\"Individual-figures-3.4.4.2\"><span class=\"toc-item-num\">3.4.4.2 </span>Individual figures</a></span></li></ul></li></ul></li><li><span><a href=\"#Visualize-without-grouping-by-date\" data-toc-modified-id=\"Visualize-without-grouping-by-date-3.5\"><span class=\"toc-item-num\">3.5 </span>Visualize without grouping by date</a></span><ul class=\"toc-item\"><li><span><a href=\"#Palette-settings\" data-toc-modified-id=\"Palette-settings-3.5.1\"><span class=\"toc-item-num\">3.5.1 </span>Palette settings</a></span></li><li><span><a href=\"#Crowd-related-metrics\" data-toc-modified-id=\"Crowd-related-metrics-3.5.2\"><span class=\"toc-item-num\">3.5.2 </span>Crowd-related metrics</a></span><ul class=\"toc-item\"><li><span><a href=\"#4-in-1-plotting\" data-toc-modified-id=\"4-in-1-plotting-3.5.2.1\"><span class=\"toc-item-num\">3.5.2.1 </span>4-in-1 plotting</a></span></li><li><span><a href=\"#Individual-figures\" data-toc-modified-id=\"Individual-figures-3.5.2.2\"><span class=\"toc-item-num\">3.5.2.2 </span>Individual figures</a></span></li></ul></li><li><span><a href=\"#Path-efficiency-related-metrics\" data-toc-modified-id=\"Path-efficiency-related-metrics-3.5.3\"><span class=\"toc-item-num\">3.5.3 </span>Path efficiency-related metrics</a></span><ul class=\"toc-item\"><li><span><a href=\"#2-in-1-plotting\" data-toc-modified-id=\"2-in-1-plotting-3.5.3.1\"><span class=\"toc-item-num\">3.5.3.1 </span>2-in-1 plotting</a></span></li><li><span><a href=\"#Individual-figures\" data-toc-modified-id=\"Individual-figures-3.5.3.2\"><span class=\"toc-item-num\">3.5.3.2 </span>Individual figures</a></span></li></ul></li><li><span><a href=\"#Control-related-metrics\" data-toc-modified-id=\"Control-related-metrics-3.5.4\"><span class=\"toc-item-num\">3.5.4 </span>Control-related metrics</a></span><ul class=\"toc-item\"><li><span><a href=\"#4-in-1-plotting\" data-toc-modified-id=\"4-in-1-plotting-3.5.4.1\"><span class=\"toc-item-num\">3.5.4.1 </span>4-in-1 plotting</a></span></li><li><span><a href=\"#Individual-figures\" data-toc-modified-id=\"Individual-figures-3.5.4.2\"><span class=\"toc-item-num\">3.5.4.2 </span>Individual figures</a></span></li></ul></li></ul></li></ul></li></ul></div>",
"_____no_output_____"
],
[
"# Controller comparison analysis\n\n> Analysis of different control methods on 2021-04-10 and 2021-04-10 data",
"_____no_output_____"
],
[
"## Libraries & settings",
"_____no_output_____"
]
],
[
[
"import math\nimport datetime\nimport collections\nimport sys, os, fnmatch\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nuse_serif_font = True\nif use_serif_font:\n plt.style.use('./styles/serif.mplstyle')\nelse:\n plt.style.use('./styles/sans_serif.mplstyle')\n\nplt.ioff()\nimport seaborn as sns\nsns.set_context(\"paper\", font_scale=1.2, rc={\"lines.linewidth\": 1.3})\n\nfrom qolo.utils.notebook_util import (\n walk,\n values2colors,\n values2color_list,\n violinplot,\n categorical_plot,\n barplot_annotate_brackets,\n import_eval_res,\n)\nfrom qolo.core.crowdbot_data import CrowdBotDatabase, CrowdBotData\nfrom qolo.metrics.metric_qolo_perf import compute_rel_jerk",
"_____no_output_____"
]
],
[
[
"## Metrics",
"_____no_output_____"
],
[
"### Crowd-related\n\n1. Crowd Density (within an area of 2.5, 5m, 10m around the robot):\n2. Minimum distance to pedestrians: \n3. Number of violations to the virtual boundary set to the robot controller ",
"_____no_output_____"
]
],
[
[
"crowd_metrics = (\n 'avg_crowd_density2_5',\n 'std_crowd_density2_5',\n 'max_crowd_density2_5',\n 'avg_crowd_density5',\n 'std_crowd_density5',\n 'max_crowd_density5',\n 'avg_min_dist',\n 'virtual_collision',\n)",
"_____no_output_____"
]
],
[
[
"### Path efficiency-related\n\n1. Relative time to goal (normalized by the goal distance)\n2. Relative path length (normalized by the goal distance in straight line):",
"_____no_output_____"
]
],
[
[
"path_metrics = (\n 'rel_duration2goal',\n 'rel_path_length2goal',\n 'path_length2goal',\n 'duration2goal',\n 'min_dist2goal',\n)",
"_____no_output_____"
]
],
[
[
"### Control-related\n\n1. Agreement\n2. Fluency\n3. Contribution\n4. Relative Jerk (smoothness of the path as added sum of linear and angular jerk)",
"_____no_output_____"
]
],
[
[
"control_metrics = (\n 'rel_jerk',\n 'avg_fluency',\n 'contribution',\n 'avg_agreement',\n)",
"_____no_output_____"
]
],
[
[
"## Pipeline",
"_____no_output_____"
]
],
[
[
"qolo_dataset = CrowdBotData()\n\nbagbase = qolo_dataset.bagbase_dir\noutbase = qolo_dataset.outbase_dir",
"_____no_output_____"
]
],
[
[
"### Result loading",
"_____no_output_____"
]
],
[
[
"chosen_dates = ['0410', '0424']\nchosen_type = ['mds', 'rds', 'shared_control']\n\neval_dirs = []\n\nfor root, dirs, files in walk(outbase, topdown=False, maxdepth=1):\n for dir_ in dirs:\n if any(s in dir_ for s in chosen_dates) and any(s in dir_ for s in chosen_type):\n dir_ = dir_.replace(\"_processed\", \"\")\n eval_dirs.append(dir_)\n print(\"{}/ is available!\".format(dir_))",
"0410_mds/ is available!\n0410_rds/ is available!\n0410_shared_control/ is available!\n0424_mds/ is available!\n0424_rds/ is available!\n0424_shared_control/ is available!\n"
],
[
"eval_res_df = import_eval_res(eval_dirs)",
"Reading results from 0410_mds\nReading results from 0410_rds\nReading results from 0410_shared_control\nReading results from 0424_mds\nReading results from 0424_rds\nReading results from 0424_shared_control\n"
],
[
"eval_res_df.head()",
"_____no_output_____"
]
],
[
[
"### Mean-Std statistics",
"_____no_output_____"
]
],
[
[
"for ctrl in chosen_type:\n print(ctrl, \":\", len(eval_res_df[eval_res_df.control_type == ctrl]))\n\nframes_stat = []\nfor ctrl in chosen_type:\n eval_res_df_ = eval_res_df[eval_res_df.control_type == ctrl]\n stat_df = eval_res_df_.drop(['date'], axis=1).agg(['mean', 'std'])\n if ctrl == 'shared_control':\n stat_df.index = 'sc_'+stat_df.index.values\n else:\n stat_df.index = ctrl+'_'+stat_df.index.values\n frames_stat.append(stat_df)\n \nstat_df_all = pd.concat(frames_stat) # , ignore_index=True\nstat_df_all.index.name = 'Metrics'\nstat_df_all",
"mds : 16\nrds : 20\nshared_control : 16\n"
],
[
"export_metrics = (\n 'avg_crowd_density2_5',\n 'max_crowd_density2_5',\n # 'avg_crowd_density5',\n 'avg_min_dist',\n 'rel_duration2goal',\n 'rel_path_length2goal',\n 'rel_jerk',\n 'contribution',\n 'avg_fluency',\n 'avg_agreement',\n 'virtual_collision',\n)",
"_____no_output_____"
],
[
"export_control_df = stat_df_all[list(export_metrics)]\nmetrics_len = len(export_control_df.loc['mds_mean'])\nmethods = ['MDS', 'RDS', 'shared_control']",
"_____no_output_____"
],
[
"for idxx, method in enumerate(methods):\n str_out = []\n for idx in range(metrics_len):\n avg = \"${:0.2f}\".format(round(export_control_df.iloc[2*idxx,idx],2))\n std = \"{:0.2f}$\".format(round(export_control_df.iloc[2*idxx+1,idx],2))\n str_out.append(avg+\" \\pm \"+std)\n export_control_df.loc[method] = str_out\nexport_contro_str_df = export_control_df.iloc[6:9]\nexport_contro_str_df",
"_____no_output_____"
],
[
"# print(export_contro_str_df.to_latex())\n# print(export_contro_str_df.T.to_latex())",
"_____no_output_____"
]
],
[
[
"### ANOVA test for controller comparison",
"_____no_output_____"
]
],
[
[
"anova_metrics = (\n 'avg_crowd_density2_5',\n 'max_crowd_density2_5',\n 'avg_crowd_density5',\n 'avg_min_dist',\n 'virtual_collision',\n 'rel_duration2goal',\n 'rel_path_length2goal',\n 'rel_jerk',\n 'contribution',\n 'avg_fluency',\n 'avg_agreement',\n)",
"_____no_output_____"
],
[
"mds_anova_ = eval_res_df[eval_res_df.control_type=='mds']\nmds_metrics = mds_anova_[list(anova_metrics)].values\n\nrds_anova_ = eval_res_df[eval_res_df.control_type=='rds']\nrds_metrics = rds_anova_[list(anova_metrics)].values\n\nshared_control_anova_ = eval_res_df[eval_res_df.control_type=='shared_control']\nshared_control_metrics = shared_control_anova_[list(anova_metrics)].values",
"_____no_output_____"
],
[
"fvalue12, pvalue12 = stats.f_oneway(mds_metrics, rds_metrics)\nfvalue23, pvalue23 = stats.f_oneway(mds_metrics, shared_control_metrics)\nfvalue13, pvalue13 = stats.f_oneway(rds_metrics, shared_control_metrics)\n# total\nfvalue, pvalue = stats.f_oneway(mds_metrics, rds_metrics, shared_control_metrics)\n\nstatP_df = pd.DataFrame(\n data=np.vstack((pvalue12, pvalue23, pvalue13, pvalue)), \n index=['mds-rds', 'mds-shared', 'rds-shared', 'total'],\n)\nstatP_df.columns = list(anova_metrics)\nstatP_df.index.name = 'Metrics'\n\nstatF_df = pd.DataFrame(\n data=np.vstack((fvalue12, fvalue23, fvalue13, fvalue)), \n index=['mds-rds', 'mds-shared', 'rds-shared', 'total'],\n)\nstatF_df.columns = list(anova_metrics)\nstatF_df.index.name = 'Metrics'",
"_____no_output_____"
],
[
"statP_df",
"_____no_output_____"
],
[
"statF_df",
"_____no_output_____"
],
[
"# print(statF_df.T.to_latex())\n# print(statP_df.T.to_latex())\n# print(stat_df_all.T.to_latex())",
"_____no_output_____"
]
],
[
[
"### Visualize with grouping by date",
"_____no_output_____"
],
[
"#### Palette settings",
"_____no_output_____"
]
],
[
[
"dates=['0410', '0424']\n\nvalue_unique, color_unique = values2color_list(\n dates, cmap_name='hot', range=(0.55, 0.75)\n )\n\nvalue_unique, point_color_unique = values2color_list(\n dates, cmap_name='hot', range=(0.3, 0.6)\n )\n\n# creating a dictionary with one specific color per group:\nbox_pal = {value_unique[i]: color_unique[i] for i in range(len(value_unique))}\n# original: (0.3, 0.6)\nscatter_pal = {value_unique[i]: point_color_unique[i] for i in range(len(value_unique))}\n# black\n# scatter_pal = {value_unique[i]: (0.0, 0.0, 0.0, 1.0) for i in range(len(value_unique))}\n# gray\n# scatter_pal = {value_unique[i]: (0.3, 0.3, 0.3, 0.8) for i in range(len(value_unique))}\nbox_pal, scatter_pal",
"_____no_output_____"
]
],
[
[
"#### Crowd-related metrics",
"_____no_output_____"
]
],
[
[
"crowd_metrics_df = eval_res_df[['seq', 'control_type'] + list(crowd_metrics) + ['date']]",
"_____no_output_____"
],
[
"for ctrl in chosen_type:\n print(\"###\", ctrl)\n print(\"# mean\")\n print(crowd_metrics_df[crowd_metrics_df.control_type == ctrl].mean(numeric_only=True))\n # print(\"# std\")\n # print(crowd_metrics_df[crowd_metrics_df.control_type == ctrl].std(numeric_only=True))\n print()",
"### mds\n# mean\navg_crowd_density2_5 0.115182\nstd_crowd_density2_5 0.084864\nmax_crowd_density2_5 0.452000\navg_crowd_density5 0.116808\nstd_crowd_density5 0.049867\nmax_crowd_density5 0.269768\navg_min_dist 1.189581\nvirtual_collision 3.500000\ndtype: float64\n\n### rds\n# mean\navg_crowd_density2_5 0.129016\nstd_crowd_density2_5 0.089006\nmax_crowd_density2_5 0.466006\navg_crowd_density5 0.126946\nstd_crowd_density5 0.051916\nmax_crowd_density5 0.284569\navg_min_dist 1.077405\nvirtual_collision 7.050000\ndtype: float64\n\n### shared_control\n# mean\navg_crowd_density2_5 0.116123\nstd_crowd_density2_5 0.089108\nmax_crowd_density2_5 0.509296\navg_crowd_density5 0.126017\nstd_crowd_density5 0.052694\nmax_crowd_density5 0.300007\navg_min_dist 1.195912\nvirtual_collision 4.250000\ndtype: float64\n\n"
],
[
"print(\"# max value in each metrics\")\nprint(crowd_metrics_df.max(numeric_only=True))\nprint(\"# min value in each metrics\")\nprint(crowd_metrics_df.min(numeric_only=True))",
"# max value in each metrics\navg_crowd_density2_5 0.180251\nstd_crowd_density2_5 0.119700\nmax_crowd_density2_5 0.814873\navg_crowd_density5 0.169079\nstd_crowd_density5 0.084297\nmax_crowd_density5 0.394704\navg_min_dist 1.515852\nvirtual_collision 36.000000\ndtype: float64\n# min value in each metrics\navg_crowd_density2_5 0.072533\nstd_crowd_density2_5 0.059217\nmax_crowd_density2_5 0.305577\navg_crowd_density5 0.089543\nstd_crowd_density5 0.030495\nmax_crowd_density5 0.203718\navg_min_dist 0.804015\nvirtual_collision 0.000000\ndtype: float64\n"
]
],
[
[
"##### 4-in-1 plotting",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\nfig, axes = plt.subplots(2, 2, figsize=(16, 10))\n\ncategorical_plot(\n axes=axes[0,0],\n df=crowd_metrics_df,\n metric='avg_crowd_density2_5',\n category='control_type',\n title='Mean crowd density within 2.5 m',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.0, 0.25],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[0,0].set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\naxes[0,0].tick_params(axis='x', labelsize=16)\naxes[0,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=axes[0,1],\n df=crowd_metrics_df,\n metric='max_crowd_density2_5',\n category='control_type',\n title='Max crowd density within 2.5 m',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.3, 0.90],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[0,1].set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\naxes[0,1].tick_params(axis='x', labelsize=16)\naxes[0,1].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=axes[1,0],\n df=crowd_metrics_df,\n metric='virtual_collision',\n category='control_type',\n title='Virtual collision with Qolo',\n xlabel='',\n ylabel='',\n ylim=[-0.1, 20],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[1,0].set_ylabel(\"Virtual collision\", fontsize=16)\naxes[1,0].tick_params(axis='x', labelsize=16)\naxes[1,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=axes[1,1],\n df=crowd_metrics_df,\n metric='avg_min_dist',\n category='control_type',\n title='Min. distance of Pedestrain from qolo',\n xlabel='',\n ylabel='Distance [m]',\n ylim=[0.6, 2.0],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[1,1].set_ylabel(\"Distance [m]\", fontsize=16)\naxes[1,1].tick_params(axis='x', labelsize=16)\naxes[1,1].tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/comp_crowd_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"##### Individual figures",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig1, control_axes1 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes1,\n df=crowd_metrics_df,\n metric='avg_crowd_density2_5',\n category='control_type',\n title='',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.0, 0.25],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes1.set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\ncontrol_axes1.tick_params(axis='x', labelsize=16)\ncontrol_axes1.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_mean_density_2_5_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig2, control_axes2 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes2,\n df=crowd_metrics_df,\n metric='max_crowd_density2_5',\n category='control_type',\n title='',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.3, 0.90],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes2.set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\ncontrol_axes2.tick_params(axis='x', labelsize=16)\ncontrol_axes2.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_max_density_2_5_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig3, control_axes3 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes3,\n df=crowd_metrics_df,\n metric='virtual_collision',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[-0.1, 20],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes3.set_ylabel(\"Virtual collision\", fontsize=16)\ncontrol_axes3.tick_params(axis='x', labelsize=16)\ncontrol_axes3.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_virtual_collision_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig4, control_axes4 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes4,\n df=crowd_metrics_df,\n metric='avg_min_dist',\n category='control_type',\n title='',\n xlabel='',\n ylabel='Distance [m]',\n ylim=[0.6, 2.0],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes4.set_ylabel(\"Distance [m]\", fontsize=16)\ncontrol_axes4.tick_params(axis='x', labelsize=16)\ncontrol_axes4.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_mean_min_dist_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"#### Path efficiency-related metrics",
"_____no_output_____"
]
],
[
[
"path_metrics_df = eval_res_df[['seq', 'control_type'] + list(path_metrics) + ['date']]",
"_____no_output_____"
],
[
"print(\"# max value in each metrics\")\nprint(path_metrics_df.max(numeric_only=True))\nprint(\"# min value in each metrics\")\nprint(path_metrics_df.min(numeric_only=True))",
"# max value in each metrics\nrel_duration2goal 0.585570\nrel_path_length2goal 2.808815\npath_length2goal 61.574150\nduration2goal 126.312773\nmin_dist2goal 39.532685\ndtype: float64\n# min value in each metrics\nrel_duration2goal 0.129141\nrel_path_length2goal 1.056256\npath_length2goal 7.429669\nduration2goal 17.730146\nmin_dist2goal 0.146512\ndtype: float64\n"
]
],
[
[
"##### 2-in-1 plotting",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\npath_fig, path_axes = plt.subplots(1, 2, figsize=(16, 5))\n\ncategorical_plot(\n axes=path_axes[0],\n df=path_metrics_df,\n metric='rel_duration2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\npath_axes[0].set_ylabel(\"Relative time to the goal\", fontsize=16)\npath_axes[0].tick_params(axis='x', labelsize=16)\npath_axes[0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=path_axes[1],\n df=path_metrics_df,\n metric='rel_path_length2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 3.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\npath_axes[1].set_ylabel(\"Relative path length to the goal\", fontsize=16)\npath_axes[1].tick_params(axis='x', labelsize=16)\npath_axes[1].tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/comp_path_efficiency_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"##### Individual figures",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig5, control_axes5 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes5,\n df=path_metrics_df,\n metric='rel_duration2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes5.set_ylabel(\"Relative time to the goal\", fontsize=16)\ncontrol_axes5.tick_params(axis='x', labelsize=16)\ncontrol_axes5.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_rel_time2goal_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig6, control_axes6 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes6,\n df=path_metrics_df,\n metric='rel_path_length2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[1.0, 2.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes6.set_ylabel(\"Relative path length to the goal\", fontsize=16)\ncontrol_axes6.tick_params(axis='x', labelsize=16)\ncontrol_axes6.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_rel_path_length2goal_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"#### Control-related metrics",
"_____no_output_____"
]
],
[
[
"control_metrics_df = eval_res_df[['seq', 'control_type'] + list(control_metrics) + ['date']]",
"_____no_output_____"
],
[
"print(\"# max value in each metrics\")\nprint(control_metrics_df.max(numeric_only=True))\nprint(\"# min value in each metrics\")\nprint(control_metrics_df.min(numeric_only=True))",
"# max value in each metrics\nrel_jerk 0.252785\navg_fluency 0.996798\ncontribution 1.108100\navg_agreement 0.909517\ndtype: float64\n# min value in each metrics\nrel_jerk 0.030936\navg_fluency 0.918531\ncontribution 0.206484\navg_agreement 0.550326\ndtype: float64\n"
]
],
[
[
"##### 4-in-1 plotting",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig, control_axes = plt.subplots(2, 2, figsize=(16, 12))\n\ncategorical_plot(\n axes=control_axes[0,0],\n df=control_metrics_df,\n metric='avg_fluency',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.90, 1.02],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[0,0].set_ylabel(\"Average control fluency\", fontsize=16)\ncontrol_axes[0,0].tick_params(axis='x', labelsize=16)\ncontrol_axes[0,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=control_axes[0,1],\n df=control_metrics_df,\n metric='rel_jerk',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0, 0.35],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[0,1].set_ylabel(\"Relative jerk\", fontsize=16)\ncontrol_axes[0,1].tick_params(axis='x', labelsize=16)\ncontrol_axes[0,1].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=control_axes[1,0],\n df=control_metrics_df,\n metric='contribution',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.2],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[1,0].set_ylabel(\"Contribution\", fontsize=16)\ncontrol_axes[1,0].tick_params(axis='x', labelsize=16)\ncontrol_axes[1,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=control_axes[1,1],\n df=control_metrics_df,\n metric='avg_agreement',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.5, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[1,1].set_ylabel(\"Average agreement\", fontsize=16)\ncontrol_axes[1,1].tick_params(axis='x', labelsize=16)\ncontrol_axes[1,1].tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/comp_control_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"##### Individual figures",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig7, control_axes7 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes7,\n df=control_metrics_df,\n metric='avg_fluency',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.90, 1.02],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes7.set_ylabel(\"Average control fluency\", fontsize=16)\ncontrol_axes7.tick_params(axis='x', labelsize=16)\ncontrol_axes7.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_avg_fluency_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig8, control_axes8 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes8,\n df=control_metrics_df,\n metric='rel_jerk',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0, 0.35],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes8.set_ylabel(\"Relative jerk\", fontsize=16)\ncontrol_axes8.tick_params(axis='x', labelsize=16)\ncontrol_axes8.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_rel_jerk_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig9, control_axes9 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes9,\n df=control_metrics_df,\n metric='contribution',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.2],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes9.set_ylabel(\"Contribution\", fontsize=16)\ncontrol_axes9.tick_params(axis='x', labelsize=16)\ncontrol_axes9.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_contribution_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig10, control_axes10 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes10,\n df=control_metrics_df,\n metric='avg_agreement',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.5, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes10.set_ylabel(\"Average agreement\", fontsize=16)\ncontrol_axes10.tick_params(axis='x', labelsize=16)\ncontrol_axes10.tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/pub/control_boxplot_avg_agreement_group_by_date.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"crowd_metrics_df0424 = crowd_metrics_df[crowd_metrics_df.date=='0424'].sort_values('control_type', ascending=False)\nprint(\"Sequence on 0424\")\nprint(crowd_metrics_df0424['control_type'].value_counts())",
"Sequence on 0424\nshared_control 8\nrds 7\nmds 6\nName: control_type, dtype: int64\n"
],
[
"crowd_metrics_df0410 = crowd_metrics_df[crowd_metrics_df.date=='0410'].sort_values(by=['control_type'], ascending=False, ignore_index=True).reindex()\nprint(\"Sequence on 0410\")\nprint(crowd_metrics_df0410['control_type'].value_counts())",
"Sequence on 0410\nrds 13\nmds 10\nshared_control 8\nName: control_type, dtype: int64\n"
]
],
[
[
"### Visualize without grouping by date",
"_____no_output_____"
],
[
"#### Palette settings",
"_____no_output_____"
]
],
[
[
"control_methods=['mds', 'rds', 'shared_control']\n\nvalue_unique, color_unique = values2color_list(\n eval_res_df['control_type'].values, cmap_name='hot', range=(0.55, 0.75)\n )\n\nvalue_unique, point_color_unique = values2color_list(\n eval_res_df['control_type'].values, cmap_name='hot', range=(0.35, 0.5)\n )\n\n# creating a dictionary with one specific color per group:\nbox_pal = {value_unique[i]: color_unique[i] for i in range(len(value_unique))}\n# original: (0.3, 0.6)\n# scatter_pal = {value_unique[i]: point_color_unique[i] for i in range(len(value_unique))}\n# black\n# scatter_pal = {value_unique[i]: (0.0, 0.0, 0.0, 1.0) for i in range(len(value_unique))}\n# gray\nscatter_pal = {value_unique[i]: (0.3, 0.3, 0.3, 0.8) for i in range(len(value_unique))}\nbox_pal, scatter_pal",
"_____no_output_____"
]
],
[
[
"#### Crowd-related metrics",
"_____no_output_____"
],
[
"##### 4-in-1 plotting",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\nfig, axes = plt.subplots(2, 2, figsize=(16, 10))\n\ncategorical_plot(\n axes=axes[0,0],\n df=crowd_metrics_df,\n metric='avg_crowd_density2_5',\n category='control_type',\n title='Mean crowd density within 2.5 m',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.05, 0.20],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n #group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[0,0].set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\naxes[0,0].tick_params(axis='x', labelsize=16)\naxes[0,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=axes[0,1],\n df=crowd_metrics_df,\n metric='max_crowd_density2_5',\n category='control_type',\n title='Max crowd density within 2.5 m',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.3, 0.90],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n #group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[0,1].set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\naxes[0,1].tick_params(axis='x', labelsize=16)\naxes[0,1].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=axes[1,0],\n df=crowd_metrics_df,\n metric='virtual_collision',\n category='control_type',\n title='Virtual collision with Qolo',\n xlabel='',\n ylabel='',\n ylim=[-0.1, 20],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n #group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[1,0].set_ylabel(\"Virtual collision\", fontsize=16)\naxes[1,0].tick_params(axis='x', labelsize=16)\naxes[1,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=axes[1,1],\n df=crowd_metrics_df,\n metric='avg_min_dist',\n category='control_type',\n title='Min. distance of Pedestrain from qolo',\n xlabel='',\n ylabel='Distance [m]',\n ylim=[0.6, 1.6],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n #group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\naxes[1,1].set_ylabel(\"Distance [m]\", fontsize=16)\naxes[1,1].tick_params(axis='x', labelsize=16)\naxes[1,1].tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/comp_crowd.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"##### Individual figures",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig1, control_axes1 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes1,\n df=crowd_metrics_df,\n metric='avg_crowd_density2_5',\n category='control_type',\n title='',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.05, 0.20],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes1.set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\ncontrol_axes1.tick_params(axis='x', labelsize=16)\ncontrol_axes1.tick_params(axis='y', labelsize=14)\ncontrol_axes1.set_xticks([0,1,2])\ncontrol_axes1.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\nplt.savefig(\"./pdf/pub/control_boxplot_mean_density_2_5.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig2, control_axes2 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes2,\n df=crowd_metrics_df,\n metric='max_crowd_density2_5',\n category='control_type',\n title='',\n xlabel='',\n ylabel='Density [1/$m^2$]',\n ylim=[0.2, 0.90],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes2.set_ylabel(\"Density [1/$m^2$]\", fontsize=16)\ncontrol_axes2.tick_params(axis='x', labelsize=16)\ncontrol_axes2.tick_params(axis='y', labelsize=14)\ncontrol_axes2.set_xticks([0,1,2])\ncontrol_axes2.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\nplt.savefig(\"./pdf/pub/control_boxplot_max_density_2_5.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig3, control_axes3 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes3,\n df=crowd_metrics_df,\n metric='virtual_collision',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[-0.1, 15],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes3.set_ylabel(\"Virtual collision\", fontsize=16)\ncontrol_axes3.tick_params(axis='x', labelsize=16)\ncontrol_axes3.tick_params(axis='y', labelsize=14)\ncontrol_axes3.set_xticks([0,1,2])\ncontrol_axes3.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\nplt.savefig(\"./pdf/pub/control_boxplot_virtual_collision.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig4, control_axes4 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes4,\n df=crowd_metrics_df,\n metric='avg_min_dist',\n category='control_type',\n title='',\n xlabel='',\n ylabel='Distance [m]',\n ylim=[0.6, 1.6],\n kind='box',\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes4.set_ylabel(\"Distance [m]\", fontsize=16)\ncontrol_axes4.tick_params(axis='x', labelsize=16)\ncontrol_axes4.tick_params(axis='y', labelsize=14)\ncontrol_axes4.set_xticks([0,1,2])\ncontrol_axes4.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\nplt.savefig(\"./pdf/pub/control_boxplot_mean_min_dist.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"#### Path efficiency-related metrics",
"_____no_output_____"
],
[
"##### 2-in-1 plotting",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\npath_fig, path_axes = plt.subplots(1, 2, figsize=(16, 5))\n\ncategorical_plot(\n axes=path_axes[0],\n df=path_metrics_df,\n metric='rel_duration2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\npath_axes[0].set_ylabel(\"Relative time to the goal\", fontsize=16)\npath_axes[0].tick_params(axis='x', labelsize=16)\npath_axes[0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=path_axes[1],\n df=path_metrics_df,\n metric='rel_path_length2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[1.0, 2.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\npath_axes[1].set_ylabel(\"Relative path length to the goal\", fontsize=16)\npath_axes[1].tick_params(axis='x', labelsize=16)\npath_axes[1].tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/comp_path_efficiency.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"##### Individual figures",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig5, control_axes5 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes5,\n df=path_metrics_df,\n metric='rel_duration2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes5.set_ylabel(\"Relative time to the goal\", fontsize=16)\ncontrol_axes5.tick_params(axis='x', labelsize=16)\ncontrol_axes5.tick_params(axis='y', labelsize=14)\ncontrol_axes5.set_xticks([0,1,2])\ncontrol_axes5.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\nplt.savefig(\"./pdf/pub/control_boxplot_rel_time2goal.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig6, control_axes6 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes6,\n df=path_metrics_df,\n metric='rel_path_length2goal',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[1.0, 2.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes6.set_ylabel(\"Relative path length to the goal\", fontsize=16)\ncontrol_axes6.tick_params(axis='x', labelsize=16)\ncontrol_axes6.tick_params(axis='y', labelsize=14)\ncontrol_axes6.set_xticks([0,1,2])\ncontrol_axes6.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\nplt.savefig(\"./pdf/pub/control_boxplot_rel_path_length2goal.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"#### Control-related metrics",
"_____no_output_____"
],
[
"##### 4-in-1 plotting",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig, control_axes = plt.subplots(2, 2, figsize=(16, 12))\n\ncategorical_plot(\n axes=control_axes[0,0],\n df=control_metrics_df,\n metric='avg_fluency',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.90, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[0,0].set_ylabel(\"Average control fluency\", fontsize=16)\ncontrol_axes[0,0].tick_params(axis='x', labelsize=16)\ncontrol_axes[0,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=control_axes[0,1],\n df=control_metrics_df,\n metric='rel_jerk',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0, 0.3],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[0,1].set_ylabel(\"Relative jerk\", fontsize=16)\ncontrol_axes[0,1].tick_params(axis='x', labelsize=16)\ncontrol_axes[0,1].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=control_axes[1,0],\n df=control_metrics_df,\n metric='contribution',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.2],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[1,0].set_ylabel(\"Contribution\", fontsize=16)\ncontrol_axes[1,0].tick_params(axis='x', labelsize=16)\ncontrol_axes[1,0].tick_params(axis='y', labelsize=14)\n\ncategorical_plot(\n axes=control_axes[1,1],\n df=control_metrics_df,\n metric='avg_agreement',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.5, 1.0],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes[1,1].set_ylabel(\"Average agreement\", fontsize=16)\ncontrol_axes[1,1].tick_params(axis='x', labelsize=16)\ncontrol_axes[1,1].tick_params(axis='y', labelsize=14)\n\nplt.savefig(\"./pdf/comp_control.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
],
[
[
"##### Individual figures",
"_____no_output_____"
]
],
[
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig7, control_axes7 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes7,\n df=control_metrics_df,\n metric='avg_fluency',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.90, 1.06],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes7.set_ylabel(\"Average control fluency\", fontsize=16)\ncontrol_axes7.tick_params(axis='x', labelsize=16)\ncontrol_axes7.tick_params(axis='y', labelsize=14)\ncontrol_axes7.set_xticks([0,1,2])\ncontrol_axes7.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\n# significance\nbars = [0, 1, 2]\nheights = [0.99, 1.0, 1.03]\nbarplot_annotate_brackets(0, 1, 3.539208e-04, bars, heights, line_y=1.00)\nbarplot_annotate_brackets(0, 2, 4.194127e-03, bars, heights, line_y=1.03)\nbarplot_annotate_brackets(1, 2, 7.744226e-10, bars, heights, line_y=1.015)\n\nplt.savefig(\"./pdf/pub/control_boxplot_avg_fluency.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig8, control_axes8 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes8,\n df=control_metrics_df,\n metric='rel_jerk',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0, 0.30],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes8.set_ylabel(\"Relative jerk\", fontsize=16)\ncontrol_axes8.tick_params(axis='x', labelsize=16)\ncontrol_axes8.tick_params(axis='y', labelsize=14)\ncontrol_axes8.set_xticks([0,1,2])\ncontrol_axes8.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\n# significance\nbars = [0, 1, 2]\nheights = [0.99, 1.0, 1.03]\nbarplot_annotate_brackets(0, 1, 1.022116e-02, bars, heights, line_y=0.265)\nbarplot_annotate_brackets(0, 2, 2.421626e-01, bars, heights, line_y=0.30)\nbarplot_annotate_brackets(1, 2, 2.126847e-07, bars, heights, line_y=0.19)\n\nplt.savefig(\"./pdf/pub/control_boxplot_rel_jerk.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig9, control_axes9 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes9,\n df=control_metrics_df,\n metric='contribution',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.0, 1.4],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes9.set_ylabel(\"Contribution\", fontsize=16)\ncontrol_axes9.tick_params(axis='x', labelsize=16)\ncontrol_axes9.tick_params(axis='y', labelsize=14)\ncontrol_axes9.set_xticks([0,1,2])\ncontrol_axes9.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\n# significance\nbars = [0, 1, 2]\nheights = [0.99, 1.0, 1.03]\nbarplot_annotate_brackets(0, 1, 1.701803e-10, bars, heights, line_y=1.15)\nbarplot_annotate_brackets(0, 2, 1.271729e-01, bars, heights, line_y=1.2)\nbarplot_annotate_brackets(1, 2, 3.495410e-09, bars, heights, line_y=1.25)\n\nplt.savefig(\"./pdf/pub/control_boxplot_contribution.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
],
[
"mpl.rcParams['font.family'] = ['serif']\nmpl.rcParams['font.serif'] = ['Times New Roman']\nmpl.rcParams['mathtext.fontset'] = 'cm'\n\ncontrol_fig10, control_axes10 = plt.subplots(figsize=(6, 5))\n\ncategorical_plot(\n axes=control_axes10,\n df=control_metrics_df,\n metric='avg_agreement',\n category='control_type',\n title='',\n xlabel='',\n ylabel='',\n ylim=[0.5, 1.1],\n lgd_labels=['April 10, 2021', 'April 24, 2021'],\n lgd_font=\"Times New Roman\",\n kind='box',\n # group='date',\n loc='upper left',\n box_palette=box_pal,\n scatter_palette=scatter_pal,\n)\n\ncontrol_axes10.set_ylabel(\"Average agreement\", fontsize=16)\ncontrol_axes10.tick_params(axis='x', labelsize=16)\ncontrol_axes10.tick_params(axis='y', labelsize=14)\ncontrol_axes10.set_xticks([0,1,2])\ncontrol_axes10.set_xticklabels(['MDS','RDS','SC'], fontsize=16)\n\n# significance\nbars = [0, 1, 2]\nheights = [0.99, 1.0, 1.03]\nbarplot_annotate_brackets(0, 1, 5.248126e-02, bars, heights, line_y=0.82)\nbarplot_annotate_brackets(0, 2, 4.394447e-12, bars, heights, line_y=1.0)\nbarplot_annotate_brackets(1, 2, 3.542947e-15, bars, heights, line_y=0.94)\n\nplt.savefig(\"./pdf/pub/control_boxplot_avg_agreement.pdf\", dpi=300)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.close()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a99290fbe9a1cfece6db518bddd700269e5f988
| 18,123 |
ipynb
|
Jupyter Notebook
|
starter_code/VacationPy.ipynb
|
NitrogenHamster/PythonAPI
|
6414601543f974bd5133d4826ca06a87cdc8fdab
|
[
"ADSL"
] | null | null | null |
starter_code/VacationPy.ipynb
|
NitrogenHamster/PythonAPI
|
6414601543f974bd5133d4826ca06a87cdc8fdab
|
[
"ADSL"
] | null | null | null |
starter_code/VacationPy.ipynb
|
NitrogenHamster/PythonAPI
|
6414601543f974bd5133d4826ca06a87cdc8fdab
|
[
"ADSL"
] | null | null | null | 30.054726 | 156 | 0.391326 |
[
[
[
"# VacationPy\n----\n\n#### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport gmaps\nimport os\n\n# Import API key\nfrom api_keys import g_key\n",
"_____no_output_____"
]
],
[
[
"### Store Part I results into DataFrame\n* Load the csv exported in Part I to a DataFrame",
"_____no_output_____"
]
],
[
[
"cities = pd.read_csv(\"weather.csv\", encoding=\"utf-8\")\ncities = cities.drop(['Unnamed: 0'], axis=1)\ncities.head()",
"_____no_output_____"
]
],
[
[
"### Humidity Heatmap\n* Configure gmaps.\n* Use the Lat and Lng as locations and Humidity as the weight.\n* Add Heatmap layer to map.",
"_____no_output_____"
]
],
[
[
"humidity = cities[\"Humidity\"].astype(float)\nlocations = cities[[\"Lat\", \"Lng\"]]",
"_____no_output_____"
],
[
"gmaps.configure(api_key=g_key)\nfig = gmaps.figure()\nheat_layer = gmaps.heatmap_layer(locations, weights=humidity,dissipating=False, max_intensity=humidity.max(),point_radius=5)\nfig.add_layer(heat_layer)\nfig",
"_____no_output_____"
]
],
[
[
"### Create new DataFrame fitting weather criteria\n* Narrow down the cities to fit weather conditions.\n* Drop any rows will null values.",
"_____no_output_____"
]
],
[
[
"narrowed_city_df = cities.loc[(cities[\"Max Tempture\"] > 70) & (cities[\"Max Tempture\"] < 80) & (cities[\"Cloud density\"] == 0), :]\nnarrowed_city_df = narrowed_city_df.dropna(how='any')\nnarrowed_city_df.reset_index(inplace=True)\ndel narrowed_city_df['index']\nnarrowed_city_df.head()",
"_____no_output_____"
]
],
[
[
"### Hotel Map\n* Store into variable named `hotel_df`.\n* Add a \"Hotel Name\" column to the DataFrame.\n* Set parameters to search for hotels with 5000 meters.\n* Hit the Google Places API for each city's coordinates.\n* Store the first Hotel result into the DataFrame.\n* Plot markers on top of the heatmap.",
"_____no_output_____"
]
],
[
[
"hotels = []\n\nfor index, row in narrowed_city_df.iterrows():\n lat = row['Lat']\n lng = row['Lng']\n\n params = {\n \"location\": f\"{lat},{lng}\",\n \"radius\": 5000,\n \"types\" : \"hotel\",\n \"key\": g_key\n }\n \n base_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n requested = requests.get(base_url, params=params)\n jsn = requested.json()\n \n try:\n hotels.append(jsn['results'][0]['name'])\n except:\n hotels.append(\"\")\n \nnarrowed_city_df[\"Hotel Name\"] = hotels\nnarrowed_city_df = narrowed_city_df.dropna(how='any')\nnarrowed_city_df.head()",
"_____no_output_____"
],
[
"# NOTE: Do not change any of the code in this cell\n\n# Using the template add the hotel marks to the heatmap\ninfo_box_template = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Hotel Name}</dd>\n<dt>City</dt><dd>{City Name}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\n# NOTE: be sure to update with your DataFrame name\nhotel_info = [info_box_template.format(**row) for index, row in narrowed_city_df.iterrows()]\nlocations = narrowed_city_df[[\"Lat\", \"Lng\"]]",
"_____no_output_____"
],
[
"# Add marker layer ontop of heat map\nmarkers = gmaps.marker_layer(locations)\nfig.add_layer(markers)\nfig\n# Display Map",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a99778c894f9f9559fcda0e06cd07082e82b669
| 17,020 |
ipynb
|
Jupyter Notebook
|
original_code/ch13.ipynb
|
jieunjeon/CatchingDudeoji
|
6cbb7b8d484ff21eac2d2640a862decfb5e3b9be
|
[
"MIT"
] | 6 |
2021-07-16T11:16:00.000Z
|
2021-08-31T09:23:27.000Z
|
original_code/ch13.ipynb
|
jieunjeon/CatchingDudeoji
|
6cbb7b8d484ff21eac2d2640a862decfb5e3b9be
|
[
"MIT"
] | 3 |
2021-07-27T03:14:17.000Z
|
2021-08-02T10:44:42.000Z
|
original_code/ch13.ipynb
|
jieunjeon/CatchingDudeoji
|
6cbb7b8d484ff21eac2d2640a862decfb5e3b9be
|
[
"MIT"
] | 22 |
2021-07-18T01:25:01.000Z
|
2021-08-20T10:28:31.000Z
| 20.630303 | 76 | 0.47309 |
[
[
[
"# Introduction to Modeling Libraries ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nnp.random.seed(12345)\nimport matplotlib.pyplot as plt\nplt.rc('figure', figsize=(10, 6))\nPREVIOUS_MAX_ROWS = pd.options.display.max_rows\npd.options.display.max_rows = 20\nnp.set_printoptions(precision=4, suppress=True)",
"_____no_output_____"
]
],
[
[
"## Interfacing Between pandas and Model Code",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\ndata = pd.DataFrame({\n 'x0': [1, 2, 3, 4, 5],\n 'x1': [0.01, -0.01, 0.25, -4.1, 0.],\n 'y': [-1.5, 0., 3.6, 1.3, -2.]})\ndata\ndata.columns\ndata.values",
"_____no_output_____"
],
[
"df2 = pd.DataFrame(data.values, columns=['one', 'two', 'three'])\ndf2",
"_____no_output_____"
],
[
"model_cols = ['x0', 'x1']\ndata.loc[:, model_cols].values",
"_____no_output_____"
],
[
"data['category'] = pd.Categorical(['a', 'b', 'a', 'a', 'b'],\n categories=['a', 'b'])\ndata",
"_____no_output_____"
],
[
"dummies = pd.get_dummies(data.category, prefix='category')\ndata_with_dummies = data.drop('category', axis=1).join(dummies)\ndata_with_dummies",
"_____no_output_____"
]
],
[
[
"## Creating Model Descriptions with Patsy",
"_____no_output_____"
],
[
"y ~ x0 + x1",
"_____no_output_____"
]
],
[
[
"data = pd.DataFrame({\n 'x0': [1, 2, 3, 4, 5],\n 'x1': [0.01, -0.01, 0.25, -4.1, 0.],\n 'y': [-1.5, 0., 3.6, 1.3, -2.]})\ndata\nimport patsy\ny, X = patsy.dmatrices('y ~ x0 + x1', data)",
"_____no_output_____"
],
[
"y\nX",
"_____no_output_____"
],
[
"np.asarray(y)\nnp.asarray(X)",
"_____no_output_____"
],
[
"patsy.dmatrices('y ~ x0 + x1 + 0', data)[1]",
"_____no_output_____"
],
[
"coef, resid, _, _ = np.linalg.lstsq(X, y)",
"_____no_output_____"
],
[
"coef\ncoef = pd.Series(coef.squeeze(), index=X.design_info.column_names)\ncoef",
"_____no_output_____"
]
],
[
[
"### Data Transformations in Patsy Formulas",
"_____no_output_____"
]
],
[
[
"y, X = patsy.dmatrices('y ~ x0 + np.log(np.abs(x1) + 1)', data)\nX",
"_____no_output_____"
],
[
"y, X = patsy.dmatrices('y ~ standardize(x0) + center(x1)', data)\nX",
"_____no_output_____"
],
[
"new_data = pd.DataFrame({\n 'x0': [6, 7, 8, 9],\n 'x1': [3.1, -0.5, 0, 2.3],\n 'y': [1, 2, 3, 4]})\nnew_X = patsy.build_design_matrices([X.design_info], new_data)\nnew_X",
"_____no_output_____"
],
[
"y, X = patsy.dmatrices('y ~ I(x0 + x1)', data)\nX",
"_____no_output_____"
]
],
[
[
"### Categorical Data and Patsy",
"_____no_output_____"
]
],
[
[
"data = pd.DataFrame({\n 'key1': ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'b'],\n 'key2': [0, 1, 0, 1, 0, 1, 0, 0],\n 'v1': [1, 2, 3, 4, 5, 6, 7, 8],\n 'v2': [-1, 0, 2.5, -0.5, 4.0, -1.2, 0.2, -1.7]\n})\ny, X = patsy.dmatrices('v2 ~ key1', data)\nX",
"_____no_output_____"
],
[
"y, X = patsy.dmatrices('v2 ~ key1 + 0', data)\nX",
"_____no_output_____"
],
[
"y, X = patsy.dmatrices('v2 ~ C(key2)', data)\nX",
"_____no_output_____"
],
[
"data['key2'] = data['key2'].map({0: 'zero', 1: 'one'})\ndata\ny, X = patsy.dmatrices('v2 ~ key1 + key2', data)\nX\ny, X = patsy.dmatrices('v2 ~ key1 + key2 + key1:key2', data)\nX",
"_____no_output_____"
]
],
[
[
"## Introduction to statsmodels",
"_____no_output_____"
],
[
"### Estimating Linear Models",
"_____no_output_____"
]
],
[
[
"import statsmodels.api as sm\nimport statsmodels.formula.api as smf",
"_____no_output_____"
],
[
"def dnorm(mean, variance, size=1):\n if isinstance(size, int):\n size = size,\n return mean + np.sqrt(variance) * np.random.randn(*size)\n\n# For reproducibility\nnp.random.seed(12345)\n\nN = 100\nX = np.c_[dnorm(0, 0.4, size=N),\n dnorm(0, 0.6, size=N),\n dnorm(0, 0.2, size=N)]\neps = dnorm(0, 0.1, size=N)\nbeta = [0.1, 0.3, 0.5]\n\ny = np.dot(X, beta) + eps",
"_____no_output_____"
],
[
"X[:5]\ny[:5]",
"_____no_output_____"
],
[
"X_model = sm.add_constant(X)\nX_model[:5]",
"_____no_output_____"
],
[
"model = sm.OLS(y, X)",
"_____no_output_____"
],
[
"results = model.fit()\nresults.params",
"_____no_output_____"
],
[
"print(results.summary())",
"_____no_output_____"
],
[
"data = pd.DataFrame(X, columns=['col0', 'col1', 'col2'])\ndata['y'] = y\ndata[:5]",
"_____no_output_____"
],
[
"results = smf.ols('y ~ col0 + col1 + col2', data=data).fit()\nresults.params\nresults.tvalues",
"_____no_output_____"
],
[
"results.predict(data[:5])",
"_____no_output_____"
]
],
[
[
"### Estimating Time Series Processes",
"_____no_output_____"
]
],
[
[
"init_x = 4\n\nimport random\nvalues = [init_x, init_x]\nN = 1000\n\nb0 = 0.8\nb1 = -0.4\nnoise = dnorm(0, 0.1, N)\nfor i in range(N):\n new_x = values[-1] * b0 + values[-2] * b1 + noise[i]\n values.append(new_x)",
"_____no_output_____"
],
[
"MAXLAGS = 5\nmodel = sm.tsa.AR(values)\nresults = model.fit(MAXLAGS)",
"_____no_output_____"
],
[
"results.params",
"_____no_output_____"
]
],
[
[
"## Introduction to scikit-learn",
"_____no_output_____"
]
],
[
[
"train = pd.read_csv('datasets/titanic/train.csv')\ntest = pd.read_csv('datasets/titanic/test.csv')\ntrain[:4]",
"_____no_output_____"
],
[
"train.isnull().sum()\ntest.isnull().sum()",
"_____no_output_____"
],
[
"impute_value = train['Age'].median()\ntrain['Age'] = train['Age'].fillna(impute_value)\ntest['Age'] = test['Age'].fillna(impute_value)",
"_____no_output_____"
],
[
"train['IsFemale'] = (train['Sex'] == 'female').astype(int)\ntest['IsFemale'] = (test['Sex'] == 'female').astype(int)",
"_____no_output_____"
],
[
"predictors = ['Pclass', 'IsFemale', 'Age']\nX_train = train[predictors].values\nX_test = test[predictors].values\ny_train = train['Survived'].values\nX_train[:5]\ny_train[:5]",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\nmodel = LogisticRegression()",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"y_predict = model.predict(X_test)\ny_predict[:10]",
"_____no_output_____"
]
],
[
[
"(y_true == y_predict).mean()",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegressionCV\nmodel_cv = LogisticRegressionCV(10)\nmodel_cv.fit(X_train, y_train)",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_score\nmodel = LogisticRegression(C=10)\nscores = cross_val_score(model, X_train, y_train, cv=4)\nscores",
"_____no_output_____"
]
],
[
[
"## Continuing Your Education",
"_____no_output_____"
]
],
[
[
"pd.options.display.max_rows = PREVIOUS_MAX_ROWS",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a99a27c36a03a9210b76f8b9a94931247d9e5e7
| 55,107 |
ipynb
|
Jupyter Notebook
|
examples/Notebooks/flopy3_export.ipynb
|
hzhang4/usg-cln
|
7bc6a58f73dade866c8d24838305058d4ea2e568
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
examples/Notebooks/flopy3_export.ipynb
|
hzhang4/usg-cln
|
7bc6a58f73dade866c8d24838305058d4ea2e568
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
examples/Notebooks/flopy3_export.ipynb
|
hzhang4/usg-cln
|
7bc6a58f73dade866c8d24838305058d4ea2e568
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | 36.762508 | 681 | 0.516649 |
[
[
[
"# FloPy\n\n### Demo of netCDF and shapefile export capabilities within the flopy export module. ",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nimport datetime\n\n# run installed version of flopy or add local path\ntry:\n import flopy\nexcept:\n fpth = os.path.abspath(os.path.join('..', '..'))\n sys.path.append(fpth)\n import flopy\n\nprint(sys.version)\nprint('flopy version: {}'.format(flopy.__version__))",
"3.8.11 (default, Aug 6 2021, 08:56:27) \n[Clang 10.0.0 ]\nflopy version: 3.3.5\n"
]
],
[
[
"Load our old friend...the Freyberg model",
"_____no_output_____"
]
],
[
[
"nam_file = \"freyberg.nam\"\nmodel_ws = os.path.join(\"..\", \"data\", \"freyberg_multilayer_transient\")\nml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False)",
"/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/mbase.py:352: DeprecationWarning: xul/yul have been deprecated. Use xll/yll instead.\n warnings.warn(\n"
]
],
[
[
"We can see the ``Modelgrid`` instance has generic entries, as does ``start_datetime``",
"_____no_output_____"
]
],
[
[
"ml.modelgrid",
"_____no_output_____"
],
[
"ml.modeltime.start_datetime",
"_____no_output_____"
]
],
[
[
"Setting the attributes of the ``ml.modelgrid`` is easy:",
"_____no_output_____"
]
],
[
[
"proj4_str = \"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\"\nml.modelgrid.set_coord_info(xoff=123456.7, yoff=765432.1, angrot=15.0, proj4=proj4_str)\nml.dis.start_datetime = '7/4/1776'",
"_____no_output_____"
],
[
"ml.modeltime.start_datetime",
"_____no_output_____"
]
],
[
[
"### Some netCDF export capabilities:\n\n#### Export the whole model (inputs and outputs)",
"_____no_output_____"
]
],
[
[
"# make directory\npth = os.path.join('data', 'netCDF_export')\nif not os.path.exists(pth):\n os.makedirs(pth)",
"_____no_output_____"
],
[
"fnc = ml.export(os.path.join(pth, ml.name+'.in.nc'))\nhds = flopy.utils.HeadFile(os.path.join(model_ws,\"freyberg.hds\"))\nflopy.export.utils.output_helper(os.path.join(pth, ml.name+'.out.nc'), ml, {\"hds\":hds})",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\n"
]
],
[
[
"#### export a single array to netcdf or shapefile",
"_____no_output_____"
]
],
[
[
"# export a 2d array\nml.dis.top.export(os.path.join(pth, 'top.nc'))\nml.dis.top.export(os.path.join(pth, 'top.shp'))",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\nwrote data/netCDF_export/top.shp\n"
]
],
[
[
"#### sparse export of stress period data for a boundary condition package \n* excludes cells that aren't in the package (aren't in `package.stress_period_data`) \n* by default, stress periods with duplicate parameter values (e.g., stage, conductance, etc.) are omitted\n(`squeeze=True`); only stress periods with different values are exported \n* argue `squeeze=False` to export all stress periods",
"_____no_output_____"
]
],
[
[
"ml.drn.stress_period_data.export(os.path.join(pth, 'drn.shp'), sparse=True)",
"wrote data/netCDF_export/drn.shp\n"
]
],
[
[
"#### Export a 3d array",
"_____no_output_____"
]
],
[
[
"#export a 3d array\nml.upw.hk.export(os.path.join(pth, 'hk.nc'))\nml.upw.hk.export(os.path.join(pth, 'hk.shp'))",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\nwrote data/netCDF_export/hk.shp\n"
]
],
[
[
"#### Export a number of things to the same netCDF file",
"_____no_output_____"
]
],
[
[
"# export lots of things to the same nc file\nfnc = ml.dis.botm.export(os.path.join(pth, 'test.nc'))\nml.upw.hk.export(fnc)\nml.dis.top.export(fnc)\n\n# export transient 2d\nml.rch.rech.export(fnc)\n",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\n"
]
],
[
[
"### Export whole packages to a netCDF file",
"_____no_output_____"
]
],
[
[
"# export mflist\nfnc = ml.wel.export(os.path.join(pth, 'packages.nc'))\nml.upw.export(fnc)\nfnc.nc",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\n"
]
],
[
[
"### Export the whole model to a netCDF",
"_____no_output_____"
]
],
[
[
"fnc = ml.export(os.path.join(pth, 'model.nc'))\nfnc.nc",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\n"
]
],
[
[
"## Export output to netcdf\n\nFloPy has utilities to export model outputs to a netcdf file. Valid output types for export are MODFLOW binary head files, formatted head files, cell budget files, seawat concentration files, and zonebudget output.\n\nLet's use output from the Freyberg model as an example of these functions",
"_____no_output_____"
]
],
[
[
"# load binary head and cell budget files\nfhead = os.path.join(model_ws, 'freyberg.hds')\nfcbc = os.path.join(model_ws, 'freyberg.cbc')\n\nhds = flopy.utils.HeadFile(fhead)\ncbc = flopy.utils.CellBudgetFile(fcbc)\n\nexport_dict = {\"hds\": hds,\n \"cbc\": cbc}\n\n# export head and cell budget outputs to netcdf\nfnc = flopy.export.utils.output_helper(os.path.join(pth, \"output.nc\"), ml, export_dict)\nfnc.nc",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\n"
]
],
[
[
"### Exporting zonebudget output\n\nzonebudget output can be exported with other modflow outputs, and is placed in a seperate group which allows the user to post-process the zonebudget output before exporting.\n\nHere are two examples on how to export zonebudget output with a binary head and cell budget file\n\n__Example 1__: No postprocessing of the zonebudget output",
"_____no_output_____"
]
],
[
[
"# load the zonebudget output file\nzonbud_ws = os.path.join(\"..\", \"data\", \"zonbud_examples\")\nfzonbud = os.path.join(zonbud_ws, \"freyberg_mlt.2.csv\")\nzon_arrays = flopy.utils.zonbud.read_zbarray(os.path.join(zonbud_ws, \"zonef_mlt.zbr\"))\n\nzbout = flopy.utils.ZoneBudgetOutput(fzonbud, ml.dis, zon_arrays)\nzbout",
"/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:2944: PendingDeprecationWarning: Deprecation planned for version 3.3.5, use ZoneBudget.read_zone_file()\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3129: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5,Use ZoneBudget.read_output(<file>, pivot=True) or ZoneBudget6.get_budget(<file>, pivot=True)\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3186: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3174: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3186: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3174: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3198: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n"
],
[
"export_dict = {'hds': hds,\n 'cbc': cbc}\n\nfnc = flopy.export.utils.output_helper(os.path.join(pth, \"output_with_zonebudget.nc\"),\n ml, export_dict)\n\nfnc = zbout.export(fnc, ml)\nfnc.nc",
"initialize_geometry::proj4_str = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\ninitialize_geometry::self.grid_crs = +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +type=crs\ninitialize_geometry::nc_crs = epsg:4326\ntransforming coordinates using = proj=noop ellps=GRS80\n"
]
],
[
[
"A budget_zones variable has been added to the root group and a new zonebudget group has been added to the netcdf file which hosts all of the budget data",
"_____no_output_____"
],
[
"__Example 2__: postprocessing zonebudget output then exporting",
"_____no_output_____"
]
],
[
[
"# load the zonebudget output and get the budget information\nzbout = flopy.utils.ZoneBudgetOutput(fzonbud, ml.dis, zon_arrays)\ndf = zbout.dataframe\ndf",
"/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3129: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5,Use ZoneBudget.read_output(<file>, pivot=True) or ZoneBudget6.get_budget(<file>, pivot=True)\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3198: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n"
]
],
[
[
"Let's calculate a yearly volumetric budget from the zonebudget data",
"_____no_output_____"
]
],
[
[
"# get a dataframe of volumetric budget information\nvol_df = zbout.volumetric_flux()\n\n# add a year field to the dataframe using datetime\nstart_date = ml.modeltime.start_datetime\nstart_date = datetime.datetime.strptime(start_date, \"%m/%d/%Y\")\nnzones = len(zbout.zones) - 1\n\nyear = [start_date.year] * nzones\nfor totim in vol_df.totim.values[:-nzones]:\n t = start_date + datetime.timedelta(days=totim)\n year.append(t.year)\n\nvol_df['year'] = year\nprint(vol_df)\n# calculate yearly volumetric change using pandas\ntotim_df = vol_df.groupby(['year', 'zone'], as_index=False)['totim'].max()\nyearly = vol_df.groupby(['year', 'zone'], as_index=False)[['STORAGE', 'CONSTANT_HEAD', 'OTHER_ZONES',\n 'ZONE_1', 'ZONE_2', 'ZONE_3']].sum()\nyearly['totim'] = totim_df['totim']\nyearly",
"/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3261: PendingDeprecationWarning: ZoneBudgetOutput.volumetric_flux() will be deprecated in version 3.3.5,\n warnings.warn(\n"
]
],
[
[
"And finally, export the pandas dataframe to netcdf",
"_____no_output_____"
]
],
[
[
"# process the new dataframe into a format that is compatible with netcdf exporting\nzbncf = zbout.dataframe_to_netcdf_fmt(yearly, flux=False)\n\n# export to netcdf\nexport_dict = {\"hds\": hds,\n \"cbc\": cbc,\n \"zbud\": zbncf}\n\nfnc = flopy.export.utils.output_helper(os.path.join(pth, \"output_with_zonebudget.2.nc\"),\n ml, export_dict)\nfnc.nc",
"/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3292: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:3174: PendingDeprecationWarning: ZoneBudgetOutput will be deprecated in version 3.3.5\n warnings.warn(\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a99bfad12be29413ec19862457e7ed5ef2558d6
| 78,339 |
ipynb
|
Jupyter Notebook
|
pytorch_multiproject/utils/visualize_word2vec.ipynb
|
JohnnyTh/pytorch_multiproject
|
07a0c204adccfa34597615fe005718ec6a223624
|
[
"MIT"
] | 3 |
2019-10-23T10:11:44.000Z
|
2021-10-07T12:08:37.000Z
|
pytorch_multiproject/utils/visualize_word2vec.ipynb
|
JohnnyTh/pytorch_multiproject
|
07a0c204adccfa34597615fe005718ec6a223624
|
[
"MIT"
] | null | null | null |
pytorch_multiproject/utils/visualize_word2vec.ipynb
|
JohnnyTh/pytorch_multiproject
|
07a0c204adccfa34597615fe005718ec6a223624
|
[
"MIT"
] | null | null | null | 127.796085 | 56,392 | 0.816107 |
[
[
[
"import torch\n# Check if pytorch is using GPU:\nprint('Used device name: {}'.format(torch.cuda.get_device_name(0)))",
"Used device name: Tesla P100-PCIE-16GB\n"
]
],
[
[
"Import your google drive if necessary.",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
],
[
"import sys\nimport os\nROOT_DIR = 'your_dir'\nsys.path.insert(0, ROOT_DIR)\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport torch\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.manifold import TSNE\n% matplotlib inline",
"_____no_output_____"
]
],
[
[
"After trraining preprocessing the data and training the model, load all the needed files.",
"_____no_output_____"
]
],
[
[
"resources_dir = os.path.join(ROOT_DIR, 'resources', '')\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nvocabulary = pickle.load(open(os.path.join(os.path.join(resources_dir, 'vocabulary'), 'vocabulary.pickle'), 'rb'))\nword2vec_path = 'your_path/idx2vec.pickle'\nword2idx = pickle.load(open(os.path.join(os.path.join(resources_dir, 'word2idx'), 'word2idx.pickle'), 'rb'))\nidx2word = pickle.load(open(os.path.join(os.path.join(resources_dir, 'idx2word'), 'idx2word.pickle'), 'rb'))\nword_count = pickle.load(open(os.path.join(os.path.join(resources_dir, 'word_counts'), 'word_counts.pickle'), 'rb'))\n\nembeddings_weigths = pickle.load(open(word2vec_path, 'rb'))\nembeddings_weigths = torch.tensor(embeddings_weigths).to(device)",
"_____no_output_____"
],
[
"embeddings_weigths[1]",
"_____no_output_____"
]
],
[
[
"Define the cosine similarity between two vectors.\n",
"_____no_output_____"
]
],
[
[
"def cosine_sim(x_vector, y_vector):\n dot_prod = torch.dot(x_vector.T, y_vector)\n vector_norms = torch.sqrt(torch.sum(x_vector**2)) * torch.sqrt(torch.sum(y_vector**2))\n similarity = dot_prod / vector_norms\n return similarity",
"_____no_output_____"
]
],
[
[
"Plot results from t-SNE for a group of selected words.",
"_____no_output_____"
]
],
[
[
"test_words = ['frodo', 'gandalf', 'gimli', 'saruman', 'sauron', 'aragorn', 'ring', 'bilbo', \n 'shire', 'gondor', 'sam', 'pippin', 'baggins', 'legolas',\n 'gollum', 'elrond', 'isengard', 'king', 'merry', 'elf']\ntest_idx = [word2idx[word] for word in test_words]\ntest_embds = embeddings_weigths[test_idx]\n\ntsne = TSNE(perplexity=5, n_components=2, init='pca', n_iter=10000, random_state=12, \n verbose=1)\ntest_embds_2d = tsne.fit_transform(test_embds.cpu().numpy())\n\nplt.figure(figsize = (9, 9), dpi=120)\nfor idx, word in enumerate(test_words):\n\n plt.scatter(test_embds_2d[idx][0], test_embds_2d[idx][1])\n plt.annotate(word, xy = (test_embds_2d[idx][0], test_embds_2d[idx][1]), \\\n ha='right',va='bottom')\n\nplt.show()",
"[t-SNE] Computing 16 nearest neighbors...\n[t-SNE] Indexed 20 samples in 0.001s...\n[t-SNE] Computed neighbors for 20 samples in 0.002s...\n[t-SNE] Computed conditional probabilities for sample 20 / 20\n[t-SNE] Mean sigma: 1.050818\n[t-SNE] KL divergence after 250 iterations with early exaggeration: 56.196255\n[t-SNE] KL divergence after 2950 iterations: 0.471334\n"
]
],
[
[
"Compute cosine similarities for a group of selected words.",
"_____no_output_____"
]
],
[
[
"words = ['frodo', 'gandalf', 'gimli', 'saruman', 'sauron', 'aragorn', 'ring', 'bilbo', \n 'shire', 'gondor', 'sam', 'pippin', 'baggins', 'legolas',\n 'gollum', 'elrond', 'isengard', 'king', 'merry', 'elf']\nwords_idx = [word2idx[word] for word in words]\nembeddings_words = [embeddings_weigths[idx] for idx in words_idx]\n\ntop_num = 5\n\nt = tqdm(embeddings_words)\nt.set_description('Checking words for similarities')\n\nsimilarities = {}\nfor idx_1, word_1 in enumerate(t):\n key_word = words[idx_1]\n similarities[key_word] = []\n for idx_2, word_2 in enumerate(embeddings_weigths):\n # the first two elements in vocab are padding word and unk word\n if idx_2 > 1:\n similarity = float(cosine_sim(word_1, word_2))\n if word2idx[key_word] != idx_2:\n similarities[key_word].append([idx2word[idx_2], similarity])\n similarities[key_word].sort(key= lambda x: x[1])\n similarities[key_word] = similarities[key_word][:-top_num-1:-1]\n\nfor key in similarities:\n for item in similarities[key]:\n item[1] = round(item[1], 4)",
"_____no_output_____"
]
],
[
[
"Format the results and convert them into a pandas dataframe.\n",
"_____no_output_____"
]
],
[
[
"formated_sim = {}\nfor key in similarities:\n temp_list = []\n for items in similarities[key]:\n string = '\"{}\": {}'.format(items[0], items[1])\n temp_list.append(string)\n formated_sim[key] = temp_list\n\ndf = pd.DataFrame(data=formated_sim)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a99c239a2d11d95260fed0549546dc74b0c683a
| 645,398 |
ipynb
|
Jupyter Notebook
|
notebooks/NBA_Statistics.ipynb
|
TimothyHelton/nba_stats
|
a0192e0acad8119407fee0efd6f06f7b1e2af30c
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/NBA_Statistics.ipynb
|
TimothyHelton/nba_stats
|
a0192e0acad8119407fee0efd6f06f7b1e2af30c
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/NBA_Statistics.ipynb
|
TimothyHelton/nba_stats
|
a0192e0acad8119407fee0efd6f06f7b1e2af30c
|
[
"BSD-3-Clause"
] | null | null | null | 543.721988 | 257,744 | 0.923841 |
[
[
[
"# NBA Statistics\n---\nTimothy Helton\n\nThis notebook generates figures describing National Basketball Association (NBA) players likelyhood of being a member of the Hall of Fame.\n\nTo see the full project please click\n[**here**](https://timothyhelton.github.io/nba_stats.html).\n\n---\nNOTE: This notebook uses code found in the\n[**nba_stats**](https://github.com/TimothyHelton/nba_stats)\npackage.\n\nTo execute all the cells do one of the following items:\n- Install the nba_stats package to the active Python interpreter.\n- Add nba_stats/nba_stats to the PYTHON_PATH system variable.",
"_____no_output_____"
],
[
"---\n## Imports",
"_____no_output_____"
]
],
[
[
"import logging\nimport sys\nimport os\nimport warnings\n\nimport bokeh.io as bkio\nimport pandas as pd\n\nfrom nba_stats import players\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\nbkio.output_notebook()\n%matplotlib inline\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"---\n## Python Version",
"_____no_output_____"
]
],
[
[
"print(f'Python Version: {sys.version}')",
"Python Version: 3.6.2 |Anaconda custom (x86_64)| (default, Jul 20 2017, 13:14:59) \n[GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]\n"
]
],
[
[
"---\n## Set Logging Level\nTo display the logging statments use logging.INFO.",
"_____no_output_____"
]
],
[
[
"players.logger.setLevel(logging.CRITICAL)",
"_____no_output_____"
]
],
[
[
"---\n## Load Data",
"_____no_output_____"
]
],
[
[
"nba = players.Statistics()",
"_____no_output_____"
]
],
[
[
"---\n## Generate Hall of Fame Figures",
"_____no_output_____"
],
[
"##### NBA Player Hall of Fame Percentage",
"_____no_output_____"
]
],
[
[
"nba.hof_percent_plot()",
"_____no_output_____"
]
],
[
[
"##### Basketball Hall of Fame Categories",
"_____no_output_____"
]
],
[
[
"nba.hof_category_plot()",
"_____no_output_____"
]
],
[
[
"##### Basketball Player Hall of Fame Subcategories",
"_____no_output_____"
]
],
[
[
"nba.hof_player_breakdown_plot()",
"_____no_output_____"
]
],
[
[
"##### NBA Player Birth Locations Histogram",
"_____no_output_____"
]
],
[
[
"nba.hof_birth_loc_plot()",
"_____no_output_____"
]
],
[
[
"##### NBA Player Birth Locations Map",
"_____no_output_____"
]
],
[
[
"try:\n os.remove('hof_birth_map.html')\nexcept FileNotFoundError:\n pass\n\nnba.hof_birth_map_plot()",
"_____no_output_____"
]
],
[
[
"##### NBA Hall of Fame Players College Attendence",
"_____no_output_____"
]
],
[
[
"nba.hof_college_plot()",
"_____no_output_____"
]
],
[
[
"---\n## Features",
"_____no_output_____"
]
],
[
[
"nba.hof_correlation_plot()",
"_____no_output_____"
]
],
[
[
"---\n## Data Subsets\n\nSubsets of the season statistics dataset are determined isolating records with complete features (no missing data).\n\nA total of 21 subsets were identified, and stored in the *feature_subset* attribute dictionary with the record count as keys.\n\nEach subset is a named tuple with the following fields.\n- data: original data\n- feature_names: names of included features\n- x_test: x test dataset\n- x_train: x training dataset\n- y_test: y test dataset\n- y_train: y training dataset\n\nThe test and train data has the following qualities.\n- test set size is 20% of the subset\n- training set is balanced with 500 random entries for both the Hall of Fame and Regular players\n - alter this parameter using the *training_size* attribute\n- the random seed is set to 0 by default\n - alter this parameter using the *seed* attribute\n- none of the test entries are included in the training dataset",
"_____no_output_____"
]
],
[
[
"subsets = nba.feature_subset.keys()\nprint(f'Number of Subsets: {len(subsets)}')",
"Number of Subsets: 21\n"
]
],
[
[
"---\n## Principle Component Analysis (PCA)\n\nThe PCA for each of the subsets is calculated in the player.Statistics constructor.\n\nThe *pca* attribute is a dictionary with keys of features count for each subset.\n\nEach PCA subset is a numedtuple with the following fields:\n- cut_off: number of components that have a positive $2^{nd}$ derivative for the scree plot\n- feature_names: names of included features\n- fit: PCA model fit of the training datasets\n- model: PCA model\n- n_components: number of components\n- subset: original subset data\n- var_pct: variance percentage\n- var_pct_cum: variance percentage cumulative sum\n- variance: DataFrame combining var_pct and var_pct_cum\n- x_test: x test dataset\n- x_train: x training dataset\n- y_test: y test dataset\n- y_train: y training dataset",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\npca_subsets = pd.DataFrame(list(zip(nba.pca.keys(), nba.feature_subset.keys())),\n columns=['Features', 'Records'])\npca_subsets.index = pca_subsets.index.rename('PCA Model')\npca_subsets",
"_____no_output_____"
]
],
[
[
"---\n## Model Evaluations",
"_____no_output_____"
],
[
"##### Comparision Plot",
"_____no_output_____"
]
],
[
[
"nba.evaluation_plot()",
"_____no_output_____"
]
],
[
[
"##### Chose Optimal Feature Subset\n\nThe random seed value is not set and calculations for all subsets are run 100 times with the top test score being tallied.\n\n**Note**:\n\nModels with 47 or 48 features result in the highest predictive classification scores.\nThe variation is due to the stochastic nature of the calculation.\nWith the seed values set to the default of 0 the optimal features to use will be 47.",
"_____no_output_____"
]
],
[
[
"# nba.optimal_features_plot(evaluations=100)",
"_____no_output_____"
]
],
[
[
"##### PCA Plot for Optimal Feature Subset",
"_____no_output_____"
]
],
[
[
"nba.classify_players(nba.pca[47], model='LR')\nnba.pca_plot(nba.pca[47])",
"_____no_output_____"
]
],
[
[
"##### Confusion Matrix for Optimal Feature Subset",
"_____no_output_____"
]
],
[
[
"players.confusion_plot(nba.classify[47].confusion)",
"_____no_output_____"
]
],
[
[
"##### Evaluate All Players in a Subset",
"_____no_output_____"
]
],
[
[
"nba.evaluate_all_players(feature_qty=47, model='LR')",
"\n\nAll Players Individual Seasons: 47 Features\nMean Score: 0.552\n precision recall f1-score support\n\n 0 0.99 0.55 0.70 16971\n 1 0.05 0.73 0.09 515\n\navg / total 0.96 0.55 0.69 17486\n\nConfusion Matrix\n 0 1\n0 9282 7689\n1 141 374\n\n\nAll Players Categorize: 47 Features\nHall of Fame Accuracy: 0.025\nRegular Players: 0.984\n\n\nPlayers Predicted to be in the Hall of Fame: 47 Features\n fame_seasons\nplayer \nEddie Johnson 27\nMike Dunleavy 24\nVince Carter 22\nChauncey Billups 22\nGerald Henderson 22\nJason Kidd 22\nTim Thomas 21\nTim Hardaway 21\nAndre Miller 21\nDale Ellis 20\nSam Cassell 20\nKobe Bryant 20\nRay Allen 20\nMark Jackson 20\nSteve Blake 19\nPaul Pierce 19\nAllen Iverson 19\nJoe Johnson 19\nBeno Udrih 19\nMike Miller 18\nMike Bibby 18\nPeja Stojakovic 18\nKyle Korver 18\nRicky Pierce 18\nJason Terry 18\nSteve Nash 18\nSteve Smith 18\nJamal Crawford 18\nMichael Finley 18\n"
]
],
[
[
"###### Note:\nAt first glance the Classification Report and Confusion Matrix appear to be in disagreement. scikit learn performs the following steps to generate the Classification Report.\n1. Calculate the precision and recall for each class.\n1. Use values from step 1 to calculate the F1 score.\n1. Calculated a weighted average for precision, recall, and the F1 score.",
"_____no_output_____"
]
],
[
[
"nba.evaluate_all_players(feature_qty=47, model='LR')",
"\n\nAll Players Individual Seasons: 47 Features\nMean Score: 0.552\n precision recall f1-score support\n\n 0 0.99 0.55 0.70 16971\n 1 0.05 0.73 0.09 515\n\navg / total 0.96 0.55 0.69 17486\n\nConfusion Matrix\n 0 1\n0 9282 7689\n1 141 374\n\n\nAll Players Categorize: 47 Features\nHall of Fame Accuracy: 0.025\nRegular Players: 0.984\n\n\nPlayers Predicted to be in the Hall of Fame: 47 Features\n fame_seasons\nplayer \nEddie Johnson 27\nMike Dunleavy 24\nVince Carter 22\nChauncey Billups 22\nGerald Henderson 22\nJason Kidd 22\nTim Thomas 21\nTim Hardaway 21\nAndre Miller 21\nDale Ellis 20\nSam Cassell 20\nKobe Bryant 20\nRay Allen 20\nMark Jackson 20\nSteve Blake 19\nPaul Pierce 19\nAllen Iverson 19\nJoe Johnson 19\nBeno Udrih 19\nMike Miller 18\nMike Bibby 18\nPeja Stojakovic 18\nKyle Korver 18\nRicky Pierce 18\nJason Terry 18\nSteve Nash 18\nSteve Smith 18\nJamal Crawford 18\nMichael Finley 18\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a99c87f6d1af269820231ffdb2efc104e5f2377
| 2,127 |
ipynb
|
Jupyter Notebook
|
Data-Science-HYD-2k19/.ipynb_checkpoints/FUNCTIONS-checkpoint.ipynb
|
Sanjay9921/Python
|
05ac161dd46f9b4731a5c14ff5ef52adb705e8e6
|
[
"MIT"
] | null | null | null |
Data-Science-HYD-2k19/.ipynb_checkpoints/FUNCTIONS-checkpoint.ipynb
|
Sanjay9921/Python
|
05ac161dd46f9b4731a5c14ff5ef52adb705e8e6
|
[
"MIT"
] | null | null | null |
Data-Science-HYD-2k19/.ipynb_checkpoints/FUNCTIONS-checkpoint.ipynb
|
Sanjay9921/Python
|
05ac161dd46f9b4731a5c14ff5ef52adb705e8e6
|
[
"MIT"
] | null | null | null | 18.179487 | 59 | 0.447109 |
[
[
[
"#The advantage of the functions is: Reproducibilty\n\ndef hello():\n print(\"Hello world\")\n\nhello()",
"Hello world\n"
],
[
"def greeting(name):\n print(\"Hello: %s, how are you?\"%name)\n \n\ngreeting(\"Sanjay\")",
"Hello: Sanjay, how are you?\n"
],
[
"def concat_strings(str1,str2):\n str3 = str1+str2\n print(str3)\n\nconcat_strings(\"Sanjay\",\" Prabhu\")",
"Sanjay Prabhu\n"
],
[
"def is_prime(n):\n for i in range(2,n):\n if n%i==0:\n print(\"Not a prime\")\n break\n else:\n print(\"The number %d is a prime.\"%n)\n\nis_prime(4)\nis_prime(13)",
"Not a prime\nThe number 13 is a prime.\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a99cd1b6c3576430e07bc32db9989513a0a8184
| 27,292 |
ipynb
|
Jupyter Notebook
|
ml-exercises/feature_crosses.ipynb
|
StevenPZChan/machine-learning
|
a27483b0fbfcc1a0558b30f331461eb5fbf12fc6
|
[
"MIT"
] | null | null | null |
ml-exercises/feature_crosses.ipynb
|
StevenPZChan/machine-learning
|
a27483b0fbfcc1a0558b30f331461eb5fbf12fc6
|
[
"MIT"
] | null | null | null |
ml-exercises/feature_crosses.ipynb
|
StevenPZChan/machine-learning
|
a27483b0fbfcc1a0558b30f331461eb5fbf12fc6
|
[
"MIT"
] | null | null | null | 38.712057 | 707 | 0.623882 |
[
[
[
"# Feature Crosses\nContinuing on the previous exercise, we will improve our linear regression model with the addition of more synthetic features.\n\nFirst, let's define the input and create the data loading code.",
"_____no_output_____"
]
],
[
[
"import math\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\ncalifornia_housing_dataframe = pd.read_csv(\"https://storage.googleapis.com/ml_universities/california_housing_train.csv\", sep=\",\")\n\ncalifornia_housing_dataframe = california_housing_dataframe.reindex(\n np.random.permutation(california_housing_dataframe.index))",
"_____no_output_____"
],
[
"def preprocess_features(california_housing_dataframe):\n \"\"\"Prepares input features from California housing data set.\n\n Args:\n california_housing_dataframe: A Pandas DataFrame expected to contain data\n from the California housing data set.\n Returns:\n A DataFrame that contains the features to be used for the model, including\n synthetic features.\n \"\"\"\n selected_features = california_housing_dataframe[\n [\"latitude\",\n \"longitude\",\n \"housing_median_age\",\n \"total_rooms\",\n \"total_bedrooms\",\n \"population\",\n \"households\",\n \"median_income\"]]\n processed_features = selected_features.copy()\n # Create a synthetic feature.\n processed_features[\"rooms_per_person\"] = (\n california_housing_dataframe[\"total_rooms\"] /\n california_housing_dataframe[\"population\"])\n return processed_features\n\ndef preprocess_targets(california_housing_dataframe):\n \"\"\"Prepares target features (i.e., labels) from California housing data set.\n\n Args:\n california_housing_dataframe: A Pandas DataFrame expected to contain data\n from the California housing data set.\n Returns:\n A DataFrame that contains the target feature.\n \"\"\"\n output_targets = pd.DataFrame()\n # Scale the target to be in units of thousands of dollars.\n output_targets[\"median_house_value\"] = (\n california_housing_dataframe[\"median_house_value\"] / 1000.0)\n return output_targets",
"_____no_output_____"
],
[
"training_examples = preprocess_features(california_housing_dataframe.head(12000))\ntraining_examples.describe()",
"_____no_output_____"
],
[
"training_targets = preprocess_targets(california_housing_dataframe.head(12000))\ntraining_targets.describe()",
"_____no_output_____"
],
[
"validation_examples = preprocess_features(california_housing_dataframe.tail(5000))\nvalidation_examples.describe()",
"_____no_output_____"
],
[
"validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))\nvalidation_targets.describe()",
"_____no_output_____"
]
],
[
[
"### Feature engineering\n\nCreating relevant features greatly improves ML models, especially for simple models like regression. We learned in a previous exercise that two (or more) independent features often do not provide as much information as a feature derived from them.\n\nWe have already used a synthetic feature in our example: `rooms_per_person`.\n\nWe can create simple synthetic features by performing operations on certain columns. However, this may become tedious for complex operations like bucketizing or crossing bucketized features. Feature columns are powerful abstractions that make it easy to add synthetic features.",
"_____no_output_____"
]
],
[
[
"longitude = tf.contrib.layers.real_valued_column(\"longitude\")\nlatitude = tf.contrib.layers.real_valued_column(\"latitude\")\nhousing_median_age = tf.contrib.layers.real_valued_column(\"housing_median_age\")\nhouseholds = tf.contrib.layers.real_valued_column(\"households\")\nmedian_income = tf.contrib.layers.real_valued_column(\"median_income\")\nrooms_per_person = tf.contrib.layers.real_valued_column(\"rooms_per_person\")\n\nfeature_columns = set([\n longitude,\n latitude,\n housing_median_age,\n households,\n median_income,\n rooms_per_person])",
"_____no_output_____"
]
],
[
[
"#### The input function\n\nPreviously, we passed data to the estimator using Pandas `DataFrame` objects. A more flexible, but more complex, way to pass data is through the input function.\n\nOne particularity of the estimators API is that input functions are responsible for splitting the data into batches, so the `batch_size` arg is ignored when using `input_fn`. The batch size will be determined by the number of rows that the input function returns (see below).\n\nInput functions return [Tensor](https://www.tensorflow.org/versions/master/api_docs/python/framework.html#Tensor) objects, which are the core data types used in TensorFlow. More specifically, input functions must return the following `(features, label)` tuple:\n* `features`: A `dict` mapping `string` values (the feature name) to `Tensor` values of shape `(n, 1)` where `n` is the number of data rows (and therefore batch size) returned by the input function.\n* `label`: A `Tensor` of shape `(n, 1)`, representing the corresponding labels.\n\nAs a side note, the input functions usually create a queue that reads the data sequentially, but this is an advanced topic not covered here. This makes them a necessity if your data is too large to be preloaded into memory.\n\nFor simplicity, our function will convert the entire `DataFrame` to a `Tensor`. This means we'll use a batch size of `12000` (and respectively `5000` for validation) - somewhat on the large size, but that will work fine with our small model. This will make training somewhat slower, but thanks to vector optimizations the performance penalty won't be that bad.\n\nHere's the necessary input function:",
"_____no_output_____"
]
],
[
[
"def input_function(examples_df, targets_df, single_read=False):\n \"\"\"Converts a pair of examples/targets `DataFrame`s to `Tensor`s.\n \n The `Tensor`s are reshaped to `(N,1)` where `N` is number of examples in the `DataFrame`s.\n \n Args:\n examples_df: A `DataFrame` that contains the input features. All its columns will be\n transformed into corresponding input feature `Tensor` objects.\n targets_df: A `DataFrame` that contains a single column, the targets corresponding to\n each example in `examples_df`.\n single_read: A `bool` that indicates whether this function should stop after reading\n through the dataset once. If `False`, the function will loop through the data set.\n This stop mechanism is used by the estimator's `predict()` to limit the number of\n values it reads.\n Returns:\n A tuple `(input_features, target_tensor)`:\n input_features: A `dict` mapping string values (the column name of the feature) to\n `Tensor`s (the actual values of the feature).\n target_tensor: A `Tensor` representing the target values.\n \"\"\"\n features = {}\n for column_name in examples_df.keys():\n batch_tensor = tf.to_float(\n tf.reshape(tf.constant(examples_df[column_name].values), [-1, 1]))\n if single_read:\n features[column_name] = tf.train.limit_epochs(batch_tensor, num_epochs=1)\n else:\n features[column_name] = batch_tensor\n target_tensor = tf.to_float(\n tf.reshape(tf.constant(targets_df[targets_df.keys()[0]].values), [-1, 1]))\n\n return features, target_tensor",
"_____no_output_____"
]
],
[
[
"For an example, the code below shows the output of the input function when passed a few sample records from the California housing data set.\n\nThis snippet is for illustrative purposes only. It is not required for training the model, but you may find it useful to visualize the effect of various feature crosses.",
"_____no_output_____"
]
],
[
[
"def sample_from_input_function(input_fn):\n \"\"\"Returns a few samples from the given input function.\n \n Args:\n input_fn: An input function, that meets the `Estimator`'s contract for\n input functions.\n Returns:\n A `DataFrame` that contains a small number of records that are returned\n by this function.\n \"\"\"\n \n examples, target = input_fn()\n \n example_samples = {\n name: tf.strided_slice(values, [0, 0], [5, 1]) for name, values in examples.items()\n }\n target_samples = tf.strided_slice(target, [0, 0], [5, 1])\n \n with tf.Session() as sess:\n example_sample_values, target_sample_values = sess.run(\n [example_samples, target_samples])\n \n results = pd.DataFrame()\n for name, values in example_sample_values.items():\n results[name] = pd.Series(values.reshape(-1))\n results['target'] = target_sample_values.reshape(-1)\n\n return results",
"_____no_output_____"
],
[
"samples = sample_from_input_function(\n lambda: input_function(training_examples, training_targets))\nsamples",
"_____no_output_____"
]
],
[
[
"### FTRL optimization algorithm\n\nHigh dimensional linear models benefit from using a variant of gradient-based optimization called FTRL. This algorithm has the benefit of scaling the learning rate differently for different coefficients, which can be useful if some features rarely take non-zero values (it also is well suited to support L1 regularization). We can apply FTRL using the [FtrlOptimizer](https://www.tensorflow.org/versions/master/api_docs/python/train.html#FtrlOptimizer).",
"_____no_output_____"
]
],
[
[
"def train_model(\n learning_rate,\n steps,\n feature_columns,\n training_examples,\n training_targets,\n validation_examples,\n validation_targets):\n \"\"\"Trains a linear regression model.\n \n In addition to training, this function also prints training progress information,\n as well as a plot of the training and validation loss over time.\n \n Args:\n learning_rate: A `float`, the learning rate.\n steps: A non-zero `int`, the total number of training steps. A training step\n consists of a forward and backward pass using a single batch.\n feature_columns: A `set` specifying the input feature columns to use.\n training_examples: A `DataFrame` containing one or more columns from\n `california_housing_dataframe` to use as input features for training.\n training_targets: A `DataFrame` containing exactly one column from\n `california_housing_dataframe` to use as target for training.\n validation_examples: A `DataFrame` containing one or more columns from\n `california_housing_dataframe` to use as input features for validation.\n validation_targets: A `DataFrame` containing exactly one column from\n `california_housing_dataframe` to use as target for validation.\n \n Returns:\n A `LinearRegressor` object trained on the training data.\n \"\"\"\n\n periods = 10\n steps_per_period = steps / periods\n\n # Create a linear regressor object.\n linear_regressor = tf.contrib.learn.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=tf.train.FtrlOptimizer(learning_rate=learning_rate),\n gradient_clip_norm=5.0\n )\n \n training_input_function = lambda: input_function(\n training_examples, training_targets)\n training_input_function_for_predict = lambda: input_function(\n training_examples, training_targets, single_read=True)\n validation_input_function_for_predict = lambda: input_function(\n validation_examples, validation_targets, single_read=True)\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print \"Training model...\"\n print \"RMSE (on training data):\"\n training_rmse = []\n validation_rmse = []\n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.fit(\n input_fn=training_input_function,\n steps=steps_per_period\n )\n # Take a break and compute predictions.\n training_predictions = list(linear_regressor.predict(\n input_fn=training_input_function_for_predict))\n validation_predictions = list(linear_regressor.predict(\n input_fn=validation_input_function_for_predict))\n # Compute training and validation loss.\n training_root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(training_predictions, training_targets))\n validation_root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(validation_predictions, validation_targets))\n # Occasionally print the current loss.\n print \" period %02d : %0.2f\" % (period, training_root_mean_squared_error)\n # Add the loss metrics from this period to our list.\n training_rmse.append(training_root_mean_squared_error)\n validation_rmse.append(validation_root_mean_squared_error)\n print \"Model training finished.\"\n\n \n # Output a graph of loss metrics over periods.\n plt.ylabel(\"RMSE\")\n plt.xlabel(\"Periods\")\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(training_rmse, label=\"training\")\n plt.plot(validation_rmse, label=\"validation\")\n plt.legend()\n\n return linear_regressor",
"_____no_output_____"
],
[
"_ = train_model(\n learning_rate=1.0,\n steps=500,\n feature_columns=feature_columns,\n training_examples=training_examples,\n training_targets=training_targets,\n validation_examples=validation_examples,\n validation_targets=validation_targets)",
"_____no_output_____"
]
],
[
[
"### One-hot encoding for discrete features\n\nDiscrete (i.e. strings, enumerations, integers) features are usually converted into families of binary features before training a logistic regression model.\n\nFor example, suppose we created a synthetic feature that can take any of the values `0`, `1` or `2`, and that we have a few training points:\n\n| # | feature_value |\n|---|---------------|\n| 0 | 2 |\n| 1 | 0 |\n| 2 | 1 |\n\nFor each possible categorical value, we make a new **binary** feature of **real values** that can take one of just two possible values: 1.0 if the example has that value, and 0.0 if not. In the example above, the categorical feature would be converted into three features, and the training points now look like:\n\n| # | feature_value_0 | feature_value_1 | feature_value_2 |\n|---|-----------------|-----------------|-----------------|\n| 0 | 0.0 | 0.0 | 1.0 |\n| 1 | 1.0 | 0.0 | 0.0 |\n| 2 | 0.0 | 1.0 | 0.0 |",
"_____no_output_____"
],
[
"### Bucketized (binned) features\n\nBucketization is also known as binning.\n\nWe can bucketize `population` into the following 3 buckets (for instance):\n- `bucket_0` (`< 5000`): corresponding to less populated blocks\n- `bucket_1` (`5000 - 25000`): corresponding to mid populated blocks\n- `bucket_2` (`> 25000`): corresponding to highly populated blocks\n\nGiven the preceding bucket definitions, the following `population` vector:\n\n [[10001], [42004], [2500], [18000]]\n\nbecomes the following bucketized feature vector:\n\n [[1], [2], [0], [1]]\n\nThe feature values are now the bucket indices. Note that these indices are considered to be discrete features. Typically, these will be further converted in one-hot representations as above, but this is done transparently.\n\nTo define bucketized features, use `bucketized_column`, which requires the boundaries separating each bucket. The function in the cell below will calculate these boundaries based on quantiles, so that each bucket contains an equal number of elements.",
"_____no_output_____"
]
],
[
[
"def get_quantile_based_boundaries(feature_values, num_buckets):\n boundaries = np.arange(1.0, num_buckets) / num_buckets\n quantiles = feature_values.quantile(boundaries)\n return [quantiles[q] for q in quantiles.keys()]\n\n# Divide households into 7 buckets.\nbucketized_households = tf.contrib.layers.bucketized_column(\n households, boundaries=get_quantile_based_boundaries(\n california_housing_dataframe[\"households\"], 7))\n\n# Divide longitude into 10 buckets.\nbucketized_longitude = tf.contrib.layers.bucketized_column(\n longitude, boundaries=get_quantile_based_boundaries(\n california_housing_dataframe[\"longitude\"], 10))",
"_____no_output_____"
]
],
[
[
"### Task 1: Train the model on bucketized feature columns.\n**Bucketize all the real valued features in our example, train the model and see if the results improve.**\n\nIn the preceding code block, two real valued columns (namely `households` and `longitude`) have been transformed into bucketized feature columns. Your task is to bucketize the rest of the columns, then run the code to train the model. There are various heuristics to find the range of the buckets. This exercise uses a quantile-based technique, which chooses the bucket boundaries in such a way that each bucket has the same number of examples.",
"_____no_output_____"
]
],
[
[
"#\n# Your code here: bucketize the following columns below, following the example above.\n#\nbucketized_latitude = \nbucketized_housing_median_age = \nbucketized_median_income =\nbucketized_rooms_per_person =\n\nbucketized_feature_columns=set([\n bucketized_longitude,\n bucketized_latitude,\n bucketized_housing_median_age,\n bucketized_households,\n bucketized_median_income,\n bucketized_rooms_per_person])\n\n_ = train_model(\n learning_rate=1.0,\n steps=500,\n feature_columns=bucketized_feature_columns,\n training_examples=training_examples,\n training_targets=training_targets,\n validation_examples=validation_examples,\n validation_targets=validation_targets)",
"_____no_output_____"
]
],
[
[
"### Feature crosses\n\nCrossing two (or more) features is a clever way to learn non-linear relations using a linear model. In our problem, if we just use the feature `latitude` for learning, the model might learn that city blocks at a particular latitude (or within a particular range of latitudes since we have bucketized it) are more likely to be expensive than others. Similarly for the feature `longitude`. However, if we cross `longitude` by `latitude`, the crossed feature represents a well defined city block. If the model learns that certain city blocks (within range of latitudes and longitudes) are more likely to be more expensive than others, it is a stronger signal than two features considered individually.\n\nCurrently, the feature columns API only supports discrete features for crosses. To cross two continuous values, like `latitude` or `longitude`, we cab bucketize them.\n\nIf we cross the `latitude` and `longitude` features (supposing, for example, that `longitude` was bucketized into `2` buckets, while `latitude` has `3` buckets), we actually get six crossed binary features. Each of these features will get its own separate weight when we train the model.",
"_____no_output_____"
],
[
"### Task 2: Train the model using feature crosses.\n\n**Add a feature cross of `longitude` and `latitude` to your model, train it, and determine whether the results improve.**",
"_____no_output_____"
]
],
[
[
"long_x_lat = tf.contrib.layers.crossed_column(\n set([bucketized_longitude, bucketized_latitude]), hash_bucket_size=1000)\n\n#\n# Your code here: Create a feature column set that includes the cross.\n#\nfeature_columns_with_cross = \n\n_ = train_model(\n learning_rate=1.0,\n steps=500,\n feature_columns=feature_columns_with_cross,\n training_examples=training_examples,\n training_targets=training_targets,\n validation_examples=validation_examples,\n validation_targets=validation_targets)",
"_____no_output_____"
]
],
[
[
"### Optional Challenge: Try out more synthetic features.\n\nSo far, we've tried simple bucketized columns and feature crosses, but there are many more combinations that could potentially improve the results. For example, you could cross multiple columns. What happens if you vary the number of buckets? What other synthetic features can you think of? Do they improve the model?",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a99cd822c235ea064363af84e76d574ffa0874a
| 8,132 |
ipynb
|
Jupyter Notebook
|
demos/Lecture14-Demos-Blank.ipynb
|
annabellegrimes/CPEN-400Q
|
044d521f8109567ec004a9c882898f9e2eb5a19e
|
[
"MIT"
] | 6 |
2022-01-12T22:57:13.000Z
|
2022-03-15T21:20:59.000Z
|
demos/Lecture14-Demos-Blank.ipynb
|
annabellegrimes/CPEN-400Q
|
044d521f8109567ec004a9c882898f9e2eb5a19e
|
[
"MIT"
] | null | null | null |
demos/Lecture14-Demos-Blank.ipynb
|
annabellegrimes/CPEN-400Q
|
044d521f8109567ec004a9c882898f9e2eb5a19e
|
[
"MIT"
] | 3 |
2022-02-04T07:48:01.000Z
|
2022-03-22T21:40:06.000Z
| 20.587342 | 50 | 0.534678 |
[
[
[
"import pennylane as qml\nfrom pennylane import numpy as np\nfrom lecture14_helpers import *\n\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs",
"_____no_output_____"
],
[
"n_samples = 100\n\nX, y = make_blobs(\n n_samples=n_samples, \n centers=[[0.1, 0.2], [0.25, 0.4]], \n cluster_std=0.05, \n n_features=2,\n random_state=6\n)",
"_____no_output_____"
]
],
[
[
"## Model 1: amplitude embedding",
"_____no_output_____"
],
[
"<img src=\"fig/model1.png\" width=\"500px\">",
"_____no_output_____"
],
[
"## Model 2: angle embedding",
"_____no_output_____"
],
[
"<img src=\"fig/model2.png\" width=\"500px\">",
"_____no_output_____"
],
[
"## Model 3: multi-layer angle embedding",
"_____no_output_____"
],
[
"<img src=\"fig/model3.png\" width=\"500px\">",
"_____no_output_____"
]
]
] |
[
"code",
"markdown"
] |
[
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a99d7e1644198d3947ddaa932a4bcf58b2a8900
| 480,733 |
ipynb
|
Jupyter Notebook
|
notebooks/cloud_top_pressure_retrieval.ipynb
|
lilianhee/quantnn
|
1fc04411c69f089650b642fb3d0025724248cf2e
|
[
"MIT"
] | null | null | null |
notebooks/cloud_top_pressure_retrieval.ipynb
|
lilianhee/quantnn
|
1fc04411c69f089650b642fb3d0025724248cf2e
|
[
"MIT"
] | 3 |
2022-01-11T08:41:03.000Z
|
2022-02-11T14:25:09.000Z
|
notebooks/cloud_top_pressure_retrieval.ipynb
|
lilianhee/quantnn
|
1fc04411c69f089650b642fb3d0025724248cf2e
|
[
"MIT"
] | 5 |
2020-12-11T03:18:32.000Z
|
2022-02-14T10:32:09.000Z
| 812.048986 | 287,616 | 0.939501 |
[
[
[
"# MODIS Cloud Top Pressure Retrieval\n\nThis notebook demsontrates the application of QRNNs to retrieve cloud-top pressure (CTP) from MODIS infrared observations. A similar retrieval will be used in the next version of the EUMETSAT PPS package, for the production\nof near-real time (NRT) Meteorological data to support Nowcasting activities.",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"from quantnn.models.keras.xception import XceptionNet\n\nmodel = XceptionNet(15, 101)",
"_____no_output_____"
]
],
[
[
"## Downloading the data",
"_____no_output_____"
]
],
[
[
"from quantnn.examples.modis_ctp import download_data\ndownload_data()",
"_____no_output_____"
]
],
[
[
"## Loading and preparing the training data",
"_____no_output_____"
]
],
[
[
"import pathlib\nfrom quantnn.normalizer import Normalizer\ntraining_data = np.load(\"data/ctp_training_data.npz\")\nx_train, y_train = training_data[\"x\"], training_data[\"y\"]",
"_____no_output_____"
],
[
"from quantnn.normalizer import Normalizer\nnormalizer = Normalizer(x_train)\nx_train = normalizer(x_train)",
"_____no_output_____"
]
],
[
[
"## Defining a neural network model",
"_____no_output_____"
]
],
[
[
"quantiles = [0.01, 0.05, 0.15, 0.25, 0.35, 0.45, 0.5, 0.55, 0.65, 0.75, 0.85, 0.95, 0.99]",
"_____no_output_____"
],
[
"import torch\nimport torch.nn as nn\n\nn_layers = 4\nn_neurons = 256\n\n# First block\nlayers = [nn.Linear(16, n_neurons), nn.BatchNorm1d(n_neurons), nn.ReLU(), ]\n# Center blocks\nfor _ in range(n_layers):\n layers.extend([nn.Linear(n_neurons, n_neurons), nn.BatchNorm1d(n_neurons), nn.ReLU()])\n# Final block\nlayers.append(nn.Linear(n_neurons, len(quantiles)))\n\nmodel = nn.Sequential(*layers)",
"_____no_output_____"
]
],
[
[
"## Training the neural network",
"_____no_output_____"
]
],
[
[
"from quantnn import QRNN\nqrnn = QRNN(quantiles=quantiles,\n model=model)",
"_____no_output_____"
],
[
"from torch.utils.data import TensorDataset, DataLoader\nx_tensor = torch.tensor(x_train).float()\ny_tensor = torch.tensor(y_train).float()\ntraining_data = TensorDataset(x_tensor, y_tensor)\ntraining_loader = DataLoader(training_data,\n batch_size=256,\n shuffle=True,\n num_workers=4)",
"_____no_output_____"
],
[
"n_epochs = 10\noptimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\nscheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs)\nqrnn.train(training_loader,\n optimizer=optimizer,\n scheduler=scheduler,\n n_epochs=n_epochs)\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)\nscheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs)\nqrnn.train(training_loader,\n optimizer=optimizer,\n scheduler=scheduler,\n n_epochs=n_epochs)\noptimizer = torch.optim.SGD(model.parameters(), lr=0.001)\nscheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs)\nresults = qrnn.train(training_loader,\n optimizer=optimizer,\n scheduler=scheduler,\n n_epochs=n_epochs)",
"Epoch 1 / 10: train. loss = 34.9570, lr. = 0.1000, time = 78.039981 s\nEpoch 2 / 10: train. loss = 32.6822, lr. = 0.0976, time = 79.268005 s\nEpoch 3 / 10: train. loss = 32.1679, lr. = 0.0905, time = 109.817379 s\nEpoch 4 / 10: train. loss = 31.8007, lr. = 0.0794, time = 100.716209 s\nEpoch 5 / 10: train. loss = 31.4462, lr. = 0.0655, time = 102.477865 s\nEpoch 6 / 10: train. loss = 31.1048, lr. = 0.0500, time = 101.54901 s\nEpoch 7 / 10: train. loss = 30.7651, lr. = 0.0345, time = 96.753168 s\nEpoch 8 / 10: train. loss = 30.4444, lr. = 0.0206, time = 84.953588 s\nEpoch 9 / 10: train. loss = 30.1356, lr. = 0.0095, time = 91.85199 s\nEpoch 10 / 10: train. loss = 29.9127, lr. = 0.0024, time = 88.990988 s\nEpoch 1 / 10: train. loss = 29.6596, lr. = 0.0100, time = 64.579947 s\nEpoch 2 / 10: train. loss = 29.5332, lr. = 0.0098, time = 62.689981 s\nEpoch 3 / 10: train. loss = 29.3946, lr. = 0.0090, time = 62.506368 s\nEpoch 4 / 10: train. loss = 29.2401, lr. = 0.0079, time = 66.526714 s\nEpoch 5 / 10: train. loss = 29.0724, lr. = 0.0065, time = 65.567004 s\nEpoch 6 / 10: train. loss = 28.8954, lr. = 0.0050, time = 66.706429 s\nEpoch 7 / 10: train. loss = 28.7136, lr. = 0.0035, time = 61.993951 s\nEpoch 8 / 10: train. loss = 28.5384, lr. = 0.0021, time = 61.589714 s\nEpoch 9 / 10: train. loss = 28.3947, lr. = 0.0010, time = 70.683748 s\nEpoch 10 / 10: train. loss = 28.2996, lr. = 0.0002, time = 62.438731 s\nEpoch 1 / 10: train. loss = 28.2705, lr. = 0.0010, time = 59.03849 s\nEpoch 2 / 10: train. loss = 28.2641, lr. = 0.0010, time = 62.203731 s\nEpoch 3 / 10: train. loss = 28.2585, lr. = 0.0009, time = 59.684058 s\nEpoch 4 / 10: train. loss = 28.2531, lr. = 0.0008, time = 64.237426 s\nEpoch 5 / 10: train. loss = 28.2478, lr. = 0.0007, time = 57.994276 s\nEpoch 6 / 10: train. loss = 28.2434, lr. = 0.0005, time = 58.905007 s\nEpoch 7 / 10: train. loss = 28.2390, lr. = 0.0003, time = 62.977709 s\nEpoch 8 / 10: train. loss = 28.2359, lr. = 0.0002, time = 58.249768 s\nEpoch 9 / 10: train. loss = 28.2335, lr. = 0.0001, time = 58.4039 s\nEpoch 10 / 10: train. loss = 28.2319, lr. = 0.0000, time = 63.900762 s\n"
]
],
[
[
"## Applying the CTP retrieval\n\nTo validate the CTP retrieval, we will apply the retrieval to observations of Hurricane Nicole of the 2016 Hurricane season and compare the results to the cloud-top pressure determined by the CALIOP lidar on the CALIPSO satellite, which is also used as reference to generate the training data.\n",
"_____no_output_____"
]
],
[
[
"validation_data = np.load(\"data/ctp_validation_data.npz\")\n\n# Overview over full MODIS observations.\nlons_rgb = validation_data[\"longitude_rgb\"]\nlats_rgb = validation_data[\"latitude_rgb\"]\nmodis_rgb = validation_data[\"modis_rgb\"]\nmodis_bt_11 = validation_data[\"bt_11_rgb\"]\nmodis_bt_12 = validation_data[\"bt_12_rgb\"]\n\n# Caliop obervations used as reference\nlons_c = validation_data[\"longitude\"]\nlats_c = validation_data[\"latitude\"]\nctp_c = validation_data[\"ctp\"]\ninput_data = validation_data[\"input_data\"]",
"_____no_output_____"
]
],
[
[
"### Hurricane Nicole\n\nThe plot below shows an overview of the scene the we will be using to validate the retrieval. The scene depicts an overpass of the CALIOP lidar over Hurricane Nicole from the 2016 Hurrican season. The line plotted ontop of the true-color image in panel (a) displays the swath of the CALIOP lidar. As you can see, it passed directly through the eye of the Hurricane.\n\nPanel (b) and (c) show the MODIS observations that are used as input for the retrieval. The two channels are located in the far infrared region and thus measure thermal emission from the atmosphere. Nicole's high clouds are visible as cold regions in the image since the radiation is emitted higher up in the atmosphere, where it is colder.`",
"_____no_output_____"
]
],
[
[
"import cartopy.crs as ccrs\nfrom matplotlib.gridspec import GridSpec\n\nf = plt.figure(figsize=(14, 4))\ngs = GridSpec(2, 3, height_ratios=[1.0, 0.05])\n\nax = plt.subplot(gs[0, 0], projection=ccrs.PlateCarree())\ncolors = modis_rgb[:-1, :-1].reshape(-1, 4) / 256.0\nl = plt.plot(lons_c, lats_c, c=\"r\")\nax.pcolormesh(lons_rgb, lats_rgb, lons_rgb, color=colors)\nax.set_xticks(np.linspace(-85, -55, 7))\nax.set_xlabel(\"Longitude [$^\\circ\\ E$]\")\nax.set_yticks(np.linspace(20, 40, 6))\nax.set_ylabel(\"Latitude [$^\\circ\\ N$]\")\nax.set_title(\"(a) MODIS true color\")\nax = plt.subplot(gs[1, 0])\nax.set_axis_off()\nax.legend(handles=l, labels=[\"CALIOP swath\"], loc=\"center\")\n\nax = plt.subplot(gs[0, 1], projection=ccrs.PlateCarree())\nm = ax.pcolormesh(lons_rgb, lats_rgb, modis_bt_11)\nax.set_xticks(np.linspace(-85, -55, 7))\nax.set_xlabel(\"Longitude [$^\\circ\\ E$]\")\nax.set_yticks(np.linspace(20, 40, 6))\nax.set_title(\"(b) MODIS $11\\mu$\")\nax = plt.subplot(gs[1, 1])\nplt.colorbar(m, cax=ax, orientation=\"horizontal\", label=\"Brightness temperature\")\n\nax = plt.subplot(gs[0, 2], projection=ccrs.PlateCarree())\nimg = ax.pcolormesh(lons_rgb, lats_rgb, modis_bt_12)\nax.set_xticks(np.linspace(-85, -55, 7))\nax.set_xlabel(\"Longitude [$^\\circ\\ E$]\")\nax.set_title(\"(c) MODIS $12\\mu$\")\nax = plt.subplot(gs[1, 2])\nplt.colorbar(m, cax=ax, orientation=\"horizontal\", label=\"Brightness temperature\")\n\nf.canvas.draw()\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"### Running the retrieval\n\nThe validation data comes with pre-processed observation along the CALOP swath. Evaluating the retrieval therfore only requires normalizing the data (using the same normalizer that was used during training) and evaluating the network prediction.",
"_____no_output_____"
]
],
[
[
"y_pred = qrnn.predict(normalizer(input_data))\n\n# CALIOP reference data\ny_ref = ctp_c[:, 0]\ny_ref[y_ref < 0.0] = np.nan",
"WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"
]
],
[
[
"The plot below shows the QRNN-predicted cloud-top pressure as confidence intervals together with the reference data from the CALIOP LIDAR (black markers). Although there is considerable uncertainty in the retrieval, all reference values lie withing the predicted intervals.\n\nHowever, the cloud-top pressure of the hurricane seems to be rather consistently underestimating the reference pressure, which indicates that the the uncertainty estimates are not very well calibrated in this region. This is expected, to some extent, because the QRNN learned to predicted uncertainty based on the a-priori distribution of cloud-top pressures in the training data, which is quite different from those of the Hurricane.",
"_____no_output_____"
]
],
[
[
"from quantnn.plotting import plot_confidence_intervals\n\nf, ax = plt.subplots(1, 1)\nplot_confidence_intervals(ax, lats_c, y_pred, qrnn.quantiles)\nax.scatter(lats_c, y_ref, c=\"k\", marker=\".\", s=2)\n\nax.set_xlim([lats_c.min(), lats_c.max()])\nax.set_ylim([0, 1000])\nax.invert_yaxis()\nax.set_ylabel(\"Cloud-top pressure [hPA]\")\nax.set_xlabel(\"Latitude [$^\\circ\\ N$]\")",
"_____no_output_____"
]
],
[
[
"## Comparison to XGBoost\n\nWe conclude this example by comparing the QRNN performance to that of another machine learning method: gradient-boosted regression trees.",
"_____no_output_____"
]
],
[
[
"import xgboost as xgb\nxgb_retrieval = xgb.XGBRegressor(n_estimators=100,\n reg_lambda=1,\n gamma=0,\n max_depth=3)\nxgb_retrieval.fit(x_train, y_train)",
"_____no_output_____"
],
[
"from quantnn import posterior_mean\ny_pred_xgb = xgb_retrieval.predict(normalizer(input_data))\ny_pred_qrnn = posterior_mean(y_pred.numpy(), qrnn.quantiles)",
"_____no_output_____"
],
[
"f, ax = plt.subplots(1, 1)\nplot_confidence_intervals(ax, lats_c, y_pred, qrnn.quantiles)\nax.scatter(lats_c, y_pred_xgb, c=\"grey\", marker=\".\", s=2)\nax.scatter(lats_c, y_pred_qrnn, c=\"navy\", marker=\".\", s=2)\nax.scatter(lats_c, y_ref, c=\"k\", marker=\".\", s=2)\n\nax.set_xlim([lats_c.min(), lats_c.max()])\nax.set_ylim([0, 1000])\nax.invert_yaxis()\nax.set_ylabel(\"Cloud-top pressure [hPA]\")\nax.set_xlabel(\"Latitude [$^\\circ\\ N$]\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a99de466a95a44f63dd08b501dd94cedc32b007
| 24,646 |
ipynb
|
Jupyter Notebook
|
notes/ECO/ch1/1.ipynb
|
JingjieYang/1-IB
|
3479227de77d3163c0095622fdea4bc0c8ba1c71
|
[
"MIT"
] | 1 |
2018-02-18T06:24:47.000Z
|
2018-02-18T06:24:47.000Z
|
notes/ECO/ch1/1.ipynb
|
Pyxidatol-C/1-IB
|
3479227de77d3163c0095622fdea4bc0c8ba1c71
|
[
"MIT"
] | null | null | null |
notes/ECO/ch1/1.ipynb
|
Pyxidatol-C/1-IB
|
3479227de77d3163c0095622fdea4bc0c8ba1c71
|
[
"MIT"
] | null | null | null | 64.857895 | 633 | 0.679786 |
[
[
[
"`2017-09-11 Monday`\n# Programme sur les 2 ans\nIntroduction ($\\sum\\text{termes techniques}$)\n\n1. Microéconomie\n2. Macroéconomie\n3. Ouverture internationale\n4. Économie du développement\n\n# Chapitre 1: Les fondements de l'Economie\n## Introduction\nL'économie cherche à résoudre les problèmes de satisfaction, des besoins fondamentaux des individus.\n\n## 1. La rareté\nLa rareté est un concept de base qui sert à mesurer notre capacité à satisfaire nos besoins fondamentaux.\n\nPour les économistes, tous les biens et services (B&S) qui ont un prix sont relativement (plus ou moins) rares. En effet, les B&S sont rares par rapport à la demande des personnes. \nEx. des bus, du pétrole ...\n\nLes B&S qui satisfont nos besoins n'existent pas en quantité suffisante c'est-à-dire en quantité rare ou limitée. Notons qu'on parle bien d'une rareté des ressources.\n\nLes économistes utilisent le terme «rareté» d'une façon différente de celle de l'usage quotidien. Cette notion constitue le postulat de base d'un grand nombre de théories économiques.\n\nLa rareté n'est pas une hypothèse mais une réalité universelle et atemporelle: presque tout et rare.\n\nLes bus ne sont pas rares à Paris mais pour les économistes, ils le sont car peu de personnes peuvent acquérir (acheter) un bus.\n\n### Définition\nLa rareté est une tension entre les besoins et les ressources disponibles pour les satisfaire.\n\n- L'économie est la science de la rareté.\n- L'économie étudie la manière dont la société gère ses ressources rares.\n- Cette science étudie comment les ressources rares sont utilisées pour satisfaire les besoins des hommes qui vivent en société.\n\nLa rareté mesure le caractère limité des ressources de la société.\n\n### Exemple\nVenezuela, aujourd'hui (**cadre spatial et cadre temporel**): rareté en terme de nourriture\n\n## 2. Le choix\nles économistes tentent de résoudre le problème de rareté en faisant des choix. \nLes personnes n'ont pas des revenus infinis. Ils doivent faire des choix pour acheter des B&S. \nIls doivent prendre de nombreuses décisions pour bien utiliser leurs ressources limitées. \nEn général, pour obtenir un B&S, il faut renoncer à un autre que l'on aime. Prendre une décision revient donc à comparer deux objectifs. \nEx. 2 T-shirts \nComme la plupart des individus vivent en société, ils sont confrontés à d'autres types de choix. \n\nL'exemple traditionnel en Économie oppose le beurre au canon. Plus l'on dépense en Défense Nationale (canon) pour protéger notre territoire, moins il restera à dépenser pour améliorer notre niveau de la vie à l'intérieur (beurre). *(Samuelson)* \nDans les sociétés contemporaines, un choix devenu vital est celui qui oppose l'environnement propre et niveau de revenu. Ex. les lois qui contraignent les entreprises à réduire leur niveau de pollution génèrent une augmentation des coûts de production des B&S. En effet, les entreprises(e) en question gagnent moins de revenus, payent des salaires inférieurs à leurs employés et augmentent le prix de leurs produits. Finalement, si les lois antipollution nous procurent un environnement plus salubre (sain), elles le font au prix d'une baisse des revenus des propriétaires, des employés et des clients des firmes polluantes.\n\nLa société doit souvent choisir entre efficacité et justice. \nElle se réfère à la taille du gâteau. \nLa justice consiste à distribuer équitablement entre les membres de la société les produits de ces ressources. Elle se réfère à la façon de la partager. \nSavoir que l'on doit faire des choix ne nous renseigne pas sur les décisions qui seront ou devront.\n\n## 3. Le coût d'opportunité\nParce que l'on doit faire des choix, prendre une décision implique d'être capable de comparer des coûts et des bénéfices des diverses options possibles. \nEx. du choix de l'étudiant d'une année supplémentaire à la fac. \n\nToute décision induit (suppose) un coût appelé le coût d'opportunité. \nLe _coût d'opportunité_ est le gain maximum que l'on aurait plus obtenir dans le meilleur emploi alternatif possible d'une ressource. Par ex., les revenus que l'ont emploient pour un voyage ne peuvent pas être utilisés pour un placement financier.\n\n### Définition\n_Le coût d'opportunité d'un B&S est la quantité d'autres B&S à laquelle il faut renoncer pour produire une unité supplémentaire de ce B&S._ \nLe coût d'opportunité est la meilleure alternative prévue d'avance quand une décision économique est prise. \nClassification des B&S en fonction du coût d'opportunité: lorsqu'un B&S a un coût d'opportunité car il est relativement rare, alors il a un prix et il est classé comme un B&S économique. Par contre, lorsqu'un B&S est disponible en abondance et gratuit et dont la production ne nécessite aucun travail humain, alors il est classé comme un B&S libre ou naturel. Ex. l'air est abondant, tout le monde peut en avoir autant qu'il veut.\n\n## 4. Les questions fondamentales\nToute société humaine, que ce soit une nation industrialisée avancée, une économie à planification centrale ou une nation tribale isolée, est inévitablement confrontée à trois problème sou questions fondamentales. \nToute société doit trouver un moyen de déterminer *quelles* marchandises sont produites, *comment* elles le sont et pour *qui* elles le sont.\n\n### Que produire?\nQuelles marchandises sont produites et en quelles quantités? A quel moment la production sera-t-elle mise en œuvre?\n\nLa production est l'activité consistant à créer des B&S propres à satisfaire des besoins individuels ou collectifs.\n\nProduire = utiliser conjointement (combiner) des ressources non directement aptes à satisfaire nos besoins en vue d'obtenir des B&S.\n\nLa production est la somme de la production marchande et la production non-marchande. \nLa production marchande est la production des B&S destinée à être vendue sur un marché. \nLa production non-marchande représente les services gratuits ou quasi-gratuits réalisés avec des facteurs de production obtenus sur le marché.\n\n### Comment les B&S sont-ils produits?\nUne société (un pays) détermine qui effectuera la production, avec quelles ressources et à l'aide de quelles techniques de production. \nL'électricité est-elle produite à partir du pétrole, du charbon ou du soleil? Les usines fonctionnent-elles avec des hommes ou des robots?\n\n### Pour qui les biens sont-ils produits?\nQui profitera des fruits de l'activité économique? La répartition du revenu et de la richesse est-elle impartiale et équitable? Comment le PIBB est-il partagé entre les différents ménages? Y a-t-il beaucoup de pauvres et quelques riches? A qui vont les revenus élevés? L'université doit-elle être pour tous ou pour ceux qui peuvent payer l'enseignement?\n\nToute l'activité économique qui permettra de répondre à ces questions est organisée à l'aide de trois opérations: la production, l'échange et la consommation.\n\n## 5. Les facteurs de production\nIl y a quatre ressources ou moyens qui permettent à une économie de produire ses produits et donc de répondre à ces trois questions fondamentales. \nToute société doit faire des choix concernant les moyens de production et les B&S produits pour l'Economie.\n\n### a) La terre (T)\nLa terre ou ressources naturelles inclut beaucoup d'éléments. Cela comprend tout ce qui est sous la terre comme l'or, le pétrole, le gaz naturel, etc., et tout ce qui est au-dessus de la terre qui est cultivé comme le riz, le blé.\n\n### b) Le travail (L ou W)\nLe travail est un facteur humain. Il désigne dans le pays industrialisés (PI) une activité humaine rémunérée qui donne lieu à une contrepartie monétaire ou en nature. \nLes personnes ou actifs mobilisent leurs capacités physiques ou intellectuelles pour obtenir un B&S qui répondent à des besoins déterminés. \nA noter l'importance de l'organisation du travail qui représente la façon dont l'activité est répartie entre les différents salariés de l'entreprise.\n\n\tPopulation (P.) totale\n\t| - P. inactive\n\t| - P. active = P.AO + P.AI\n\t | - P. A. Occupée\n\t | - P. A. Inoccupée = chômeurs\n\t \n\n### c) Le capital\nLe capital provient de l'investissement(*=achat*) en capital technique et en capital humain.\n\n- Le capital technique est composé des moyens matériels comme les machines, les routes. \nPlus précisément, il y a le stock de biens manufacturés comme les usines et les machines d'une part \net le stock du pays comme les routes, les chemins de fer, les ports et les aéroports, les communications d'autre part.\n- Le capital humain qui représente la valeur de la force du travail comme l'éducation. \nLe capital humain est l'ensemble des capacités intellectuelles et professionnelles d'un individu qui lui assurent des revenus monétaires futurs. \nCf. Gary Becker, prix Nobel d'Economie, 1992, est à l'origine de cette expression. \n\nOn distingue aussi deux formes de capital: \n\n- Le capital fixe qui sert plusieurs fois, à plusieurs cycles de production.\n- Le capital circulant qui disparaît dès la 1ère utilisation dans le processus de production.\n\nEx. La production de transport a besoin de capital fixe comme le camion et de capital circulant comme l'essence.\n\nNB: Facteurs complémentaires et substituables\n\n- Facteurs complémentaires: l'usage d'un facteur rend nécessaire l'usage de l'autre\n- Facteurs substituables: l'usage de l'un peut être remplacé par l'usage d'un autre facteur. On distingue le facteur à forte intensité capitalistique quand il y a peu de travail et beaucoup de capital du facteur à faible capacité capitalistique quand il y a peu de capital et beaucoup de travail.\n\nNe pas confondre la _capital technique_ qui incorpore un certain progrès technique (machines récentes) du _capital physique_ qui représente les biens produits dans le passé et qui sont des moyens de la production présente et future (bâtiments, matériel, machines, produits semi-finis, matières premières) et du _capital financier_ qui regroupe les actifs qui rapportent un intérêt.\n\n### d) Le management\nC'est l'ensemble des connaissances concernant l'organisation et la gestion d'une entreprise. \nÀ noter l'importance de l'organisation du travail qui représente la façon dont l'activité est répartie entre les différents salariés de l'entreprise.\n\n\t(quantité++ && temps++) OU (quantité-- && temps--) ?\n\nProductivité = efficacité de production = $\\frac{Quantité}{temps}$\n\n## 6. Les courbes de possibilité de production\nL'ensemble de nos actes quotidiens, notre vie quotidienne dépend des actions de milliers de personnes que nous ne rencontrerons jamais mais qui ont contribué à produire tout ce dont nous jouissons chaque jour. \nL'Economie coordonne les activités de millions de personnes aux goûts et aux talents différents. Il y a donc une interdépendance économique. \nLes économistes pour montrer les concepts de rareté, de choix, de coût d'opportunité utilisent la(es) courbe(s) des possibilités de production appelée(s) plus précisément la frontière des possibilités de production soit la `fpp`.\n\nLa `fpp` montre les quantités maximales de production qui peuvent être obtenues par l'économie, compte tenu des connaissances technologiques et de la quantité de moyens de production disponibles. On parle de production potentielle.\n\nLa `fpp` exprime l'ensemble des combinaisons de biens et services accessible pour une société donnée. \nEx. p.6 \nAutre exemple celui de Samuelson avec les canons et le beurre. \nLes pays ne disposent pas de moyens illimités des divers produits. Ils sont contraints par les ressources et la technologie disponibles. \n\n### Tableau des possibilités de production\n| Cas | Beurre | Canon |\n|-----|--------|-------|\n|A |0 |15 |\n|B |1 |14 |\n|C |2 |12 |\n|D |3 |9 |\n|E |4 |5 |\n|F |5 |9 |\n|G |5 |0 |\n\n\n\n- C, D: production sous-maximale\n- E, F: situation impossible à moment donné\n\n### Représentations avec la fpp\n[illustrations](../fpp.ipynb)\n\n## 7. L'utilité\n> Maximisation du bien-être, comportement économique rationnel\nIls cherchent à maximiser leur utilité (càd le degré de satisfaction que leurs procurent leurs achats) compte tenu de leurs ressources et des prix fixés sur le marché.\n\nL'utilité mesure le degré de satisfaction que l'on tire d'un produit.\n\nCette notion de l'utilité provient de l'Ecole des Marginalistes. L'école se base sur l'utilité marginale pour décrire la valeur économique d'un produit. \n\nL'utilité mesure la satisfaction des consommateurs et les goûts de préférence entre plusieurs biens. Les ménages ou les consommateurs cherchent à maximiser leur utilité compte tenu de leur ressource (revenu) et des prix fixés par le marché. Ils recherchent à répartir leur budget entre tous les biens et services disponibles. La théorie du consommateur traite de toutes ces décisions prise par le consommateur. Les choix de consommation dépendent de nos besoins des goûts, des prix, des revenus (préférence).\n\n### Définition\nL'utilité désigne la satisfaction ou le plaisir retiré par un individu de la consommation d'un B&S.\n\n### Exemple\n\t\\ 1 / \\ 2 / \\ 3 / \\ 4 / \\ 5 /\n\tU maximale ... U minimale\n\nL'utilité marginale mesure la variation de la satisfaction liée à la consommation d'une unité supplémentaire d'un B&S. L'utilité marginale mesure la variation de l'utilité totale pour une variation très petite de la quantité consommée ($U_m$).\n\nSupposons:\n\n- La consommation de 1 café procure une utilité, $U_1$ = 10\n- et que celle de 2 cafés procure une utilité, $U_2$ = 15\n- ($U_3 = 17$)\n\nL'utilité totale augmente (sinon on ne prendra pas un autre café), mais moins fortement qu'avec le 1^er café. L'utilité marginale correspond au $U_m = U_2 - U_1 = 15 - 10 = 5$\n\nL'intérêt de cette notion d'utilité marginale et du raisonnement \"à la marge\" est de mettre en évidence la Loi de l'$U_m$ décroissante. L'utilité continue à croître puisque la consommation correspond à une utilité, mais elle augmente de moins en moins vite. \n\nNB: les prix relatifs = rapport entre les prix de deux biens ou de plusieurs biens\n\n## 8. Microéconomie et macroéconomie\nPour faciliter l'étude de l'économie, on la divise en deux branches. \n\nLa microéconomie étudie une partie de l'économie, la macroéconomie s'intéresse au fonctionnement de l'économie prise dans son ensemble.\n\nLa microéconomie étudie les comportements des individus, des consommateurs et des producteurs. Elle s'intéresse à la façon dont les choix des uns et des autres s'ajustent au travers de l'équilibre de la demande et de l'offre sur chaque marché des B&S.\n\nLa macroéconomie étudie et cherche des solutions aux grands problèmes économique comme l'inflation, le chômage, la croissance et le développement.\n\nRemarque: même si on distingue les deux sciences, en réalité on peut retrouver le comportement microéconomique dans la macroéconomie.\n\n## 9. Economie positive et économie normative\nL'économie combine des considérations normatives et des constatations positives.\n\n**L'économie positive** s'intéresse à l'explication objective ou scientifique du fonctionnement de l'économie. \n**L'économie normative** fournit des recommandations pour améliorer la situation économique: ces avis reposent sur des opinions. Ces opinions normatives peuvent aussi être à la base d'hypothèses simplificatrices nécessaires à la construction de modèles et méthodes.\n\n- Jean: Le salaire minimum légal est une des causes de chômage\n - économie positive / scientifique => descriptif\n- Paula: Le gouvernement devrait augmenter le salaire minimum légal\n - économie normative => prescriptif\n\n## 10. Circuit économique\nL'activité économique est le résultat d'innombrables opérations effectuées par une multitude d'unités élémentaires telles l'entreprise, les ménages, etc.) \nComme il est impossible de décrire tous ces mouvements particuliers, on regroupe ces unités élémentaires en grandes catégories (cf. les acteurs économiques) afin de schématiser les opérations économiques réalisées. \nLe circuit économique désigne une façon simplifiée de représenter l'activité économique. \nIl représente donc le fonctionnement d'une économie sous la forme de flux orientés reliant des agents économiques (entreprises, ménages, Etat etc.), des marchés (marché du travail) ou des opérations (consommation, production, etc.) \nFrançois Quesnay fut un des premiers à utiliser cette approche avec son «Tableau économique» en 1758. \nCf. les représentations p.9 et en cours\n\n\n\n## 11. Système de rationnement: économique planifiée à l'opposé de l'économie du marché libre\nLe rationnement désigne une situation de marché dans laquelle les prix ne peuvent pas se fixer librement par le jeu de l’offre et de la demande, ce qui conduit à une limitation soit de la quantité offerte soit de la quantité demandée des b&s.\n\n(L'Etat décide sur les 3 questions fondamentales: que produire, comment produire, pour qui produire)\n\n### a) Economie planifiée\nC’est une économie où des agents économiques(=Etat) mettent en place un processus consistant à fixer les prix, pour un horizon de moyen terme (entre 3 et 5 ans) des grandeurs économiques et des mutations ou changements qualitative associées à l’évolution de ces grandeurs (modifications de la structure de consommation, de production ...).\nOn oppose planification impérative comme celle soviétique de la planification indicative comme en France (elle est née dans un contexte de pénurie).\nDans la planification impérative, les objectifs s’opposent aux agents économiques tout particulièrement aux entreprises qui sont tenues d’appliquer les objectifs fixés par le Plan.\n\n### b) Economie du marché libre\n#### Définition\n_Système économique qui accorde un rôle central aux mécanismes de marché pour assurer a régulation des activités techniques économiques._\n\n#### Exemple\nLes économies occidentales\n\nL’économie est alors considérée comme un ensemble de marchés assurant automatiquement l’équilibre entre les offres et les demandes de b&s économiques. \nCette représentation de l’économie est appelée libérale car la régulation ne doit pas être perturbée par les interventions de l’Etat.\n\n## 12. La croissance économique (Cr)\nLa croissance (Cr) désigne l’augmentation _durable_(=long terme / 10+ années) de la production d’une économie.\n\nC’est un phénomène quantitatif que l’on peut mesurer par le taux de Cr. du PIB càd le taux du produit intérieur brut. Il est donné en monnaie constante ou en volume ou en réel. \nLa Cr. réelle est la hausse du PIB après avoir éliminé la hausse due à l’inflation (I°) en %. \n\n> Variable économique (ex. **PIB**)\n>\n> - avec inflation\n> - prix courant\n> - en valeur\n> - en normal\n> - sans inflation\n> - prix constant\n> - en volume\n> - en réel\n\n\nEx. En 2017 (e), le taux de Cr des pays avancés s’élève à 1,6%, celui de la zone euro à 1,7%, et celui des pays émergeants et en développement à 4,2%.\n\nÉcart entre taux de Cr. de la zone euro et celui des pays émergeants: \n\n 4.2% - 1.7% = 2.5 POINTS de %\n\nA ne pas confondre avec l’expansion qui est aussi une hausse de la production d’un pays mais de courte durée soit une année.\n\nA ne pas confondre aussi Cr. et développement. En effet, ces termes sont proches mais distincts. \nCf. la section 2\n\n## 13. Le développement économique (Dt)\nSelon la définition de François Perroux, le développement est:\n\n- une combinaison de changements mentaux et sociaux\n- aptes à faire croître \n- cumulativement et durablement (une génération = 20 ans)\n- le produit réel global.\n\nC’est un phénomène qualitatif. \nIl est mesuré par l’indice de développement humain, IDH. \nCf. la section 4\n\n## 14. Le développement durable\nLe Dvt durable est un nouveau mode de développement (Dvt) officiellement proposé comme objectif à leur état-membre par la CNUCED (Conférence des Nations unies sur l’environnement et le Dvt) et la Banque mondiale par le rapport de la commission Brundtland.\n\nIl y a une volonté de concilier le bien-être des générations présentes avec la sauvegarde de l’environnement pour les générations futures.\n\n### Définition\nLe développement durable est une forme de développement qui répond aux besoins du présent (des générations actuelles) sans compromettre la capacité de répondre aux besoins des générations futures.\n\nAuteurs :\n\n- Adam SMITH (1723-1790)\n- Karl MARX (1818-1883)\n- Dr.Gro BRUNDTLAND (1939-..)\n\n## Complément: STOCK et FLUX\n### Définition\n- Le `stock` désigne une ou des grandeurs disponibles à un moment donné\n- Le `flux` désigne un mouvement de grandeur qui est transporté pendant une période\n\n### Exemple\nAvec 1 entreprise:\n\n- capital technique: **S**\n- investissement: **F**\n\n### Schéma\n =||=\n --------o - flux (Investissement)\n ------o |\n | |\n *\n \n *\n\n \\**************/\n \\************/ - stock\n \\__________ *|\n |*| - capital, usage\n |*|\n \\__\n \n\n",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown"
]
] |
4a99e8e2043653dc80b3dd495d850472669cf310
| 1,770 |
ipynb
|
Jupyter Notebook
|
Determinant_of_Matrix.ipynb
|
timothyolano/Linear-Algebra-58019
|
78894c2e1c1f35f21392ad73a16c5f76e48a0890
|
[
"Apache-2.0"
] | null | null | null |
Determinant_of_Matrix.ipynb
|
timothyolano/Linear-Algebra-58019
|
78894c2e1c1f35f21392ad73a16c5f76e48a0890
|
[
"Apache-2.0"
] | null | null | null |
Determinant_of_Matrix.ipynb
|
timothyolano/Linear-Algebra-58019
|
78894c2e1c1f35f21392ad73a16c5f76e48a0890
|
[
"Apache-2.0"
] | null | null | null | 25.285714 | 251 | 0.464972 |
[
[
[
"<a href=\"https://colab.research.google.com/github/timothyolano/Linear-Algebra-58019/blob/main/Determinant_of_Matrix.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"#Write a Python code to show as its solution. Save it as \"Determinant of Matrix\" in your GitHub repository.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nA = np.array([[1,2,-1],[4,6,-2],[-1,3,3]])\nprint(A)\nB = round(np.linalg.det(A))\nprint(B)\n",
"[[ 1 2 -1]\n [ 4 6 -2]\n [-1 3 3]]\n-14\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a99f98f8df77184dd20751f4ba86af9726acca3
| 23,716 |
ipynb
|
Jupyter Notebook
|
examples/devel/nsga2_constrained.ipynb
|
whitegr/xopt
|
4109cbae22523d753ea117aba5f5a6c807ef932a
|
[
"Apache-2.0"
] | 10 |
2021-10-12T18:39:50.000Z
|
2022-03-30T04:44:20.000Z
|
examples/devel/nsga2_constrained.ipynb
|
roussel-ryan/xopt
|
6a6d8aee9b9219689fc111eafef1cc26bb9b9159
|
[
"Apache-2.0"
] | 9 |
2019-10-02T00:45:16.000Z
|
2021-09-13T00:27:52.000Z
|
examples/devel/nsga2_constrained.ipynb
|
roussel-ryan/xopt
|
6a6d8aee9b9219689fc111eafef1cc26bb9b9159
|
[
"Apache-2.0"
] | 4 |
2019-10-02T00:40:46.000Z
|
2021-09-12T03:27:27.000Z
| 56.199052 | 142 | 0.615745 |
[
[
[
"# Notebook version of NSGA-II constrained, without scoop",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"#!/usr/bin/env python\n\n# This file is part of DEAP.\n#\n# DEAP is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of\n# the License, or (at your option) any later version.\n#\n# DEAP is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.\n\nimport array\nimport random\nimport json\n\nimport time\n\nimport numpy\n\nfrom math import sqrt, cos, atan\n\n#from scoop import futures\n\nfrom deap import algorithms\n#from deap import base\nfrom deap import benchmarks\nfrom deap.benchmarks.tools import diversity, convergence\nfrom deap import creator\nfrom deap import base, tools\nfrom xopt import fitness_with_constraints # Chris' custom routines\n\nfrom deap.benchmarks.tools import diversity, convergence, hypervolume\n\n\ncreator.create(\"FitnessMin\", fitness_with_constraints.FitnessWithConstraints, weights=(-1.0, -1.0, 1.0, 1.0))\ncreator.create(\"Individual\", array.array, typecode='d', fitness=creator.FitnessMin)\n\ntoolbox = base.Toolbox()\n\ndef uniform(low, up, size=None):\n try:\n return [random.uniform(a, b) for a, b in zip(low, up)]\n except TypeError:\n return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]\n\nNDIM = 2\nN_CONSTRAINTS = 2\n\n#BOUND_LOW, BOUND_UP = [0.1, 0.0] , [1.0, 1.0]\ndef CONSTR(individual):\n #time.sleep(.01)\n x1=individual[0]\n x2=individual[1]\n objectives = (x1, (1.0+x2)/x1)\n constraints = (x2+9*x1-6.0, -x2+9*x1-1.0)\n return (objectives, constraints)\n\nBOUND_LOW, BOUND_UP = [0.0, 0.0], [3.14159, 3.14159] \ndef TNK(individual): \n x1=individual[0]\n x2=individual[1]\n objectives = (x1, x2)\n constraints = (x1**2+x2**2-1.0 - 0.1*cos(16*atan(x1/x2)), 0.5-(x1-0.5)**2-(x2-0.5)**2 )\n return (objectives, constraints, (x1, x2))\n\n#BOUND_LOW, BOUND_UP = [-20.0, -20.0], [20.0, 20.0] \ndef SRN(individual): \n x1=individual[0]\n x2=individual[1]\n objectives = ( (x1-2.0)**2 + (x2-1.0)**2+2.0, 9*x1-(x2-1.0)**2 )\n constraints = (225.0-x1**2-x2**2, -10.0 -x1 - 3*x2 )\n return (objectives, constraints)\n\n\n\n\ntoolbox.register(\"attr_float\", uniform, BOUND_LOW, BOUND_UP, NDIM)\ntoolbox.register(\"individual\", tools.initIterate, creator.Individual, toolbox.attr_float)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n# scoop map function\n#toolbox.register('map', futures.map)\ntoolbox.register('map', map) \n \n#toolbox.register(\"evaluate\", CONSTR)\ntoolbox.register(\"evaluate\", TNK)\n#toolbox.register(\"evaluate\", SRN)\n\ntoolbox.register(\"mate\", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)\ntoolbox.register(\"mutate\", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)\ntoolbox.register(\"select\", tools.selNSGA2)\n\ndef main(seed=None):\n random.seed(seed)\n\n NGEN = 50\n MU = 100\n CXPB = 0.9\n\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean, axis=0)\n stats.register(\"std\", numpy.std, axis=0)\n stats.register(\"min\", numpy.min, axis=0)\n stats.register(\"max\", numpy.max, axis=0)\n \n logbook = tools.Logbook()\n logbook.header = \"gen\", \"evals\", \"std\", \"min\", \"avg\", \"max\"\n \n pop = toolbox.population(n=MU)\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in pop if not ind.fitness.valid]\n evaluate_result = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, evaluate_result):\n ind.fitness.values = fit[0]\n ind.fitness.cvalues = fit[1]\n ind.fitness.n_constraints = len(fit[1])\n\n\n # This is just to assign the crowding distance to the individuals\n # no actual selection is done\n pop = toolbox.select(pop, len(pop))\n \n record = stats.compile(pop)\n logbook.record(gen=0, evals=len(invalid_ind), **record)\n print(logbook.stream)\n\n # Begin the generational process\n for gen in range(1, NGEN):\n # Vary the population\n offspring = tools.selTournamentDCD(pop, len(pop))\n offspring = [toolbox.clone(ind) for ind in offspring]\n \n for ind1, ind2 in zip(offspring[::2], offspring[1::2]):\n if random.random() <= CXPB:\n toolbox.mate(ind1, ind2)\n \n toolbox.mutate(ind1)\n toolbox.mutate(ind2)\n del ind1.fitness.values, ind2.fitness.values\n \n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit[0]\n ind.fitness.cvalues = fit[1]\n ind.fitness.n_constraints = len(fit[1])\n # Allow for additional info to be saved (for example, a dictionary of properties)\n if len(fit) > 2:\n ind.fitness.info = fit[2]\n\n # Select the next generation population\n pop = toolbox.select(pop + offspring, MU)\n record = stats.compile(pop)\n logbook.record(gen=gen, evals=len(invalid_ind), **record)\n print(logbook.stream, hypervolume(pop, [1.0,1.0]))\n \n\n return pop, logbook",
"_____no_output_____"
],
[
"#if __name__ == \"__main__\":\n# #optimal_front = json.load(open(\"pareto_front/zdt4_front.json\"))\n# # Use 500 of the 1000 points in the json file\n# #optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))\n \npop, stats = main()\npop.sort(key=lambda x: x.fitness.values)\n\nprint(stats)\n#print(\"Convergence: \", convergence(pop, optimal_front))\n#print(\"Diversity: \", diversity(pop, optimal_front[0], optimal_front[-1]))\n\nimport matplotlib.pyplot as plt\nimport numpy\n \nfront = numpy.array([ind.fitness.values for ind in pop])\n#optimal_front = numpy.array(optimal_front)\n#plt.scatter(optimal_front[:,0], optimal_front[:,1], c=\"r\")\nplt.scatter(front[:,0], front[:,1], c=\"b\")\nplt.axis(\"tight\")\nplt.show()",
"gen\tevals\tstd \tmin \tavg \tmax \n0 \t100 \t[0.91649251 0.90040956]\t[0.00696173 0.117216 ]\t[1.48194486 1.5918278 ]\t[3.12493056 3.13810868]\n1 \t100 \t[0.68643602 0.65040504]\t[0.00696173 0.00176618]\t[1.03872441 1.19646164]\t[2.39417415 2.33098936] 0.027153985982854886\n2 \t100 \t[0.45751492 0.489784 ]\t[0.00525888 0.00176618]\t[0.79131658 1.01278825]\t[1.62948529 1.64482547] 0.05690554854043568\n3 \t100 \t[0.35546585 0.37348248]\t[0.00330041 0.06687579]\t[0.70692979 0.90707228]\t[1.3723356 1.35730776] 0.09328966366554098\n4 \t100 \t[0.26436076 0.33677161]\t[0.17021773 0.1341936 ]\t[0.74487848 0.78720988]\t[1.13599652 1.15030495] 0.11946232579149695\n5 \t100 \t[0.29191083 0.34347156]\t[0.11201606 0.06659987]\t[0.69565158 0.7457581 ]\t[1.13599652 1.14659998] 0.1612740624979951\n6 \t100 \t[0.30949881 0.32869486]\t[0.11201606 0.06659987]\t[0.70776412 0.68634416]\t[1.10507591 1.1205152 ] 0.17126313653667863\n7 \t100 \t[0.3279433 0.33734022]\t[0.11201606 0.06659987]\t[0.70526607 0.64877159]\t[1.09434503 1.14841224] 0.1816956572794289\n8 \t100 \t[0.31370032 0.3328357 ]\t[0.11201606 0.06659987]\t[0.70421359 0.62729417]\t[1.09037915 1.10377208] 0.19015975950656525\n9 \t100 \t[0.30677724 0.32013623]\t[0.11201606 0.06659987]\t[0.66450502 0.66270874]\t[1.08282441 1.10377208] 0.19602769569527798\n10 \t100 \t[0.32072185 0.34304009]\t[0.11201606 0.06659987]\t[0.65437482 0.63693876]\t[1.05820304 1.10377208] 0.19885157354827465\n11 \t100 \t[0.31544381 0.33448986]\t[0.11201606 0.06659987]\t[0.67742997 0.61196273]\t[1.05624949 1.0809787 ] 0.1997018501889775\n12 \t100 \t[0.31182269 0.32840865]\t[0.08167453 0.06659987]\t[0.6797464 0.60896368]\t[1.05624949 1.07663992] 0.2000273502104228\n13 \t100 \t[0.31476306 0.32130671]\t[0.08167453 0.06659987]\t[0.63870841 0.65274927]\t[1.05624949 1.07663992] 0.20492466502937334\n14 \t100 \t[0.3039031 0.32377162]\t[0.08167453 0.06327784]\t[0.67570163 0.61516669]\t[1.05624949 1.07663992] 0.20497255380356125\n15 \t100 \t[0.29622827 0.31317175]\t[0.08167453 0.06327784]\t[0.65394293 0.64662824]\t[1.05624949 1.07663992] 0.20538633310962517\n16 \t100 \t[0.28460199 0.29703107]\t[0.07984592 0.04993859]\t[0.64444158 0.6655724 ]\t[1.05282197 1.06841742] 0.20588408953322104\n17 \t100 \t[0.29340178 0.29235333]\t[0.07801028 0.04993859]\t[0.61928286 0.68148796]\t[1.05282197 1.06841742] 0.2066367664918389\n18 \t100 \t[0.29203774 0.3014975 ]\t[0.06294778 0.04993859]\t[0.65557759 0.63920895]\t[1.05282197 1.05688769] 0.20802921558992385\n19 \t100 \t[0.29353507 0.29010449]\t[0.06294778 0.04993859]\t[0.63166145 0.66695694]\t[1.05282197 1.05688769] 0.20878722415090736\n20 \t100 \t[0.29975756 0.28756193]\t[0.06294778 0.04993859]\t[0.59733185 0.69480141]\t[1.04525061 1.05221432] 0.2103558295001688\n21 \t100 \t[0.29324228 0.28902884]\t[0.06294778 0.04993859]\t[0.60562114 0.68793437]\t[1.04525061 1.05221432] 0.2118750956879809\n22 \t100 \t[0.27511368 0.2776117 ]\t[0.06294778 0.04993859]\t[0.6251355 0.68134673]\t[1.04525061 1.05221432] 0.21201370535041347\n23 \t100 \t[0.28096301 0.27734877]\t[0.06294778 0.04993859]\t[0.63225258 0.67137814]\t[1.04525061 1.05221432] 0.21295205643021475\n24 \t100 \t[0.28295582 0.28048153]\t[0.06294778 0.04993859]\t[0.63202807 0.66673154]\t[1.04525061 1.05221432] 0.21380489583294315\n25 \t100 \t[0.28472157 0.2694035 ]\t[0.05976438 0.04993859]\t[0.62099057 0.68271681]\t[1.04525061 1.04663717] 0.2139601714672758\n26 \t100 \t[0.28244692 0.27897769]\t[0.05976438 0.04993859]\t[0.63116506 0.67025971]\t[1.04525061 1.04663717] 0.21414275306691677\n27 \t100 \t[0.28977461 0.27684764]\t[0.05976438 0.04993859]\t[0.62020921 0.67617821]\t[1.03801526 1.04663717] 0.21499775294200957\n28 \t100 \t[0.29408325 0.27816392]\t[0.05976438 0.04993859]\t[0.61525224 0.67736976]\t[1.03801526 1.04663717] 0.21563322708980795\n29 \t100 \t[0.29983011 0.28717199]\t[0.05976438 0.04993859]\t[0.62397867 0.66075616]\t[1.03801526 1.04663717] 0.2157835022295358\n30 \t100 \t[0.30126657 0.29124463]\t[0.05976438 0.04993859]\t[0.60862581 0.67241129]\t[1.03801526 1.04663717] 0.21763077355822322\n31 \t100 \t[0.31095561 0.30971152]\t[0.05976438 0.04993859]\t[0.60481558 0.66442509]\t[1.03801526 1.04663717] 0.21840761404683237\n32 \t100 \t[0.31075476 0.31415463]\t[0.05976438 0.04993859]\t[0.62158773 0.64276711]\t[1.03801526 1.04628497] 0.21897919266265037\n33 \t100 \t[0.31566177 0.32231067]\t[0.05976438 0.04993859]\t[0.6424449 0.61616491]\t[1.03801526 1.04512073] 0.21949622498009735\n34 \t100 \t[0.3142915 0.32266054]\t[0.05976438 0.04993859]\t[0.64146176 0.61699266]\t[1.03801526 1.042178 ] 0.21957361669870606\n35 \t100 \t[0.31677012 0.333243 ]\t[0.05976438 0.04993859]\t[0.68187979 0.56977387]\t[1.03801526 1.042178 ] 0.21957471238185008\n36 \t100 \t[0.31512235 0.32684806]\t[0.05792684 0.04993859]\t[0.66810376 0.59309455]\t[1.03801526 1.03447633] 0.22021047194106522\n37 \t100 \t[0.30978599 0.32778643]\t[0.05792684 0.04993859]\t[0.66856012 0.59123482]\t[1.03801526 1.03447633] 0.22069529531553936\n38 \t100 \t[0.30558267 0.3132096 ]\t[0.05792684 0.049564 ]\t[0.66320769 0.60263687]\t[1.03801526 1.03447633] 0.2209460521040062\n39 \t100 \t[0.2911209 0.30647227]\t[0.05792684 0.049564 ]\t[0.68013681 0.59336189]\t[1.03801526 1.03447633] 0.22122611842032722\n40 \t100 \t[0.300672 0.31567227]\t[0.05792684 0.049564 ]\t[0.66499083 0.60086178]\t[1.03801526 1.03447633] 0.22133390366921118\n41 \t100 \t[0.29654647 0.31645457]\t[0.05792684 0.049564 ]\t[0.67369903 0.59211784]\t[1.03801526 1.03372254] 0.22135074469473287\n42 \t100 \t[0.30056543 0.31126819]\t[0.05792684 0.049564 ]\t[0.6535928 0.61438007]\t[1.03801526 1.03372254] 0.22194820542951846\n43 \t100 \t[0.30045772 0.30741863]\t[0.05792684 0.049564 ]\t[0.65514227 0.61487181]\t[1.03801526 1.03372254] 0.22206926529620352\n44 \t100 \t[0.30168472 0.31211171]\t[0.05792684 0.049564 ]\t[0.65624715 0.61086859]\t[1.03801526 1.03372254] 0.222073577045853\n45 \t100 \t[0.30685963 0.31361139]\t[0.05792684 0.049564 ]\t[0.64928815 0.61390077]\t[1.03801526 1.03372254] 0.22207584665822735\n46 \t100 \t[0.30428189 0.31569888]\t[0.05792684 0.049564 ]\t[0.65834286 0.604876 ]\t[1.03801526 1.03372254] 0.22207896502196084\n47 \t100 \t[0.30655985 0.31348607]\t[0.05792684 0.049564 ]\t[0.6483969 0.61489568]\t[1.03801526 1.03372254] 0.2221826671600712\n48 \t100 \t[0.30331887 0.31502778]\t[0.05792684 0.04953291]\t[0.65848199 0.60561883]\t[1.03801526 1.03372254] 0.22221425485892382\n49 \t100 \t[0.30420692 0.31586446]\t[0.05792684 0.04953291]\t[0.66049776 0.60482911]\t[1.03801526 1.03372254] 0.22223407844949816\ngen\tevals\tstd \tmin \tavg \tmax \n0 \t100 \t[0.91649251 0.90040956]\t[0.00696173 0.117216 ]\t[1.48194486 1.5918278 ]\t[3.12493056 3.13810868]\n1 \t100 \t[0.68643602 0.65040504]\t[0.00696173 0.00176618]\t[1.03872441 1.19646164]\t[2.39417415 2.33098936]\n2 \t100 \t[0.45751492 0.489784 ]\t[0.00525888 0.00176618]\t[0.79131658 1.01278825]\t[1.62948529 1.64482547]\n3 \t100 \t[0.35546585 0.37348248]\t[0.00330041 0.06687579]\t[0.70692979 0.90707228]\t[1.3723356 1.35730776]\n4 \t100 \t[0.26436076 0.33677161]\t[0.17021773 0.1341936 ]\t[0.74487848 0.78720988]\t[1.13599652 1.15030495]\n5 \t100 \t[0.29191083 0.34347156]\t[0.11201606 0.06659987]\t[0.69565158 0.7457581 ]\t[1.13599652 1.14659998]\n6 \t100 \t[0.30949881 0.32869486]\t[0.11201606 0.06659987]\t[0.70776412 0.68634416]\t[1.10507591 1.1205152 ]\n7 \t100 \t[0.3279433 0.33734022]\t[0.11201606 0.06659987]\t[0.70526607 0.64877159]\t[1.09434503 1.14841224]\n8 \t100 \t[0.31370032 0.3328357 ]\t[0.11201606 0.06659987]\t[0.70421359 0.62729417]\t[1.09037915 1.10377208]\n9 \t100 \t[0.30677724 0.32013623]\t[0.11201606 0.06659987]\t[0.66450502 0.66270874]\t[1.08282441 1.10377208]\n10 \t100 \t[0.32072185 0.34304009]\t[0.11201606 0.06659987]\t[0.65437482 0.63693876]\t[1.05820304 1.10377208]\n11 \t100 \t[0.31544381 0.33448986]\t[0.11201606 0.06659987]\t[0.67742997 0.61196273]\t[1.05624949 1.0809787 ]\n12 \t100 \t[0.31182269 0.32840865]\t[0.08167453 0.06659987]\t[0.6797464 0.60896368]\t[1.05624949 1.07663992]\n13 \t100 \t[0.31476306 0.32130671]\t[0.08167453 0.06659987]\t[0.63870841 0.65274927]\t[1.05624949 1.07663992]\n14 \t100 \t[0.3039031 0.32377162]\t[0.08167453 0.06327784]\t[0.67570163 0.61516669]\t[1.05624949 1.07663992]\n15 \t100 \t[0.29622827 0.31317175]\t[0.08167453 0.06327784]\t[0.65394293 0.64662824]\t[1.05624949 1.07663992]\n16 \t100 \t[0.28460199 0.29703107]\t[0.07984592 0.04993859]\t[0.64444158 0.6655724 ]\t[1.05282197 1.06841742]\n17 \t100 \t[0.29340178 0.29235333]\t[0.07801028 0.04993859]\t[0.61928286 0.68148796]\t[1.05282197 1.06841742]\n18 \t100 \t[0.29203774 0.3014975 ]\t[0.06294778 0.04993859]\t[0.65557759 0.63920895]\t[1.05282197 1.05688769]\n19 \t100 \t[0.29353507 0.29010449]\t[0.06294778 0.04993859]\t[0.63166145 0.66695694]\t[1.05282197 1.05688769]\n20 \t100 \t[0.29975756 0.28756193]\t[0.06294778 0.04993859]\t[0.59733185 0.69480141]\t[1.04525061 1.05221432]\n21 \t100 \t[0.29324228 0.28902884]\t[0.06294778 0.04993859]\t[0.60562114 0.68793437]\t[1.04525061 1.05221432]\n22 \t100 \t[0.27511368 0.2776117 ]\t[0.06294778 0.04993859]\t[0.6251355 0.68134673]\t[1.04525061 1.05221432]\n23 \t100 \t[0.28096301 0.27734877]\t[0.06294778 0.04993859]\t[0.63225258 0.67137814]\t[1.04525061 1.05221432]\n24 \t100 \t[0.28295582 0.28048153]\t[0.06294778 0.04993859]\t[0.63202807 0.66673154]\t[1.04525061 1.05221432]\n25 \t100 \t[0.28472157 0.2694035 ]\t[0.05976438 0.04993859]\t[0.62099057 0.68271681]\t[1.04525061 1.04663717]\n26 \t100 \t[0.28244692 0.27897769]\t[0.05976438 0.04993859]\t[0.63116506 0.67025971]\t[1.04525061 1.04663717]\n27 \t100 \t[0.28977461 0.27684764]\t[0.05976438 0.04993859]\t[0.62020921 0.67617821]\t[1.03801526 1.04663717]\n28 \t100 \t[0.29408325 0.27816392]\t[0.05976438 0.04993859]\t[0.61525224 0.67736976]\t[1.03801526 1.04663717]\n29 \t100 \t[0.29983011 0.28717199]\t[0.05976438 0.04993859]\t[0.62397867 0.66075616]\t[1.03801526 1.04663717]\n30 \t100 \t[0.30126657 0.29124463]\t[0.05976438 0.04993859]\t[0.60862581 0.67241129]\t[1.03801526 1.04663717]\n31 \t100 \t[0.31095561 0.30971152]\t[0.05976438 0.04993859]\t[0.60481558 0.66442509]\t[1.03801526 1.04663717]\n32 \t100 \t[0.31075476 0.31415463]\t[0.05976438 0.04993859]\t[0.62158773 0.64276711]\t[1.03801526 1.04628497]\n33 \t100 \t[0.31566177 0.32231067]\t[0.05976438 0.04993859]\t[0.6424449 0.61616491]\t[1.03801526 1.04512073]\n34 \t100 \t[0.3142915 0.32266054]\t[0.05976438 0.04993859]\t[0.64146176 0.61699266]\t[1.03801526 1.042178 ]\n35 \t100 \t[0.31677012 0.333243 ]\t[0.05976438 0.04993859]\t[0.68187979 0.56977387]\t[1.03801526 1.042178 ]\n36 \t100 \t[0.31512235 0.32684806]\t[0.05792684 0.04993859]\t[0.66810376 0.59309455]\t[1.03801526 1.03447633]\n37 \t100 \t[0.30978599 0.32778643]\t[0.05792684 0.04993859]\t[0.66856012 0.59123482]\t[1.03801526 1.03447633]\n38 \t100 \t[0.30558267 0.3132096 ]\t[0.05792684 0.049564 ]\t[0.66320769 0.60263687]\t[1.03801526 1.03447633]\n39 \t100 \t[0.2911209 0.30647227]\t[0.05792684 0.049564 ]\t[0.68013681 0.59336189]\t[1.03801526 1.03447633]\n40 \t100 \t[0.300672 0.31567227]\t[0.05792684 0.049564 ]\t[0.66499083 0.60086178]\t[1.03801526 1.03447633]\n41 \t100 \t[0.29654647 0.31645457]\t[0.05792684 0.049564 ]\t[0.67369903 0.59211784]\t[1.03801526 1.03372254]\n42 \t100 \t[0.30056543 0.31126819]\t[0.05792684 0.049564 ]\t[0.6535928 0.61438007]\t[1.03801526 1.03372254]\n43 \t100 \t[0.30045772 0.30741863]\t[0.05792684 0.049564 ]\t[0.65514227 0.61487181]\t[1.03801526 1.03372254]\n44 \t100 \t[0.30168472 0.31211171]\t[0.05792684 0.049564 ]\t[0.65624715 0.61086859]\t[1.03801526 1.03372254]\n45 \t100 \t[0.30685963 0.31361139]\t[0.05792684 0.049564 ]\t[0.64928815 0.61390077]\t[1.03801526 1.03372254]\n46 \t100 \t[0.30428189 0.31569888]\t[0.05792684 0.049564 ]\t[0.65834286 0.604876 ]\t[1.03801526 1.03372254]\n47 \t100 \t[0.30655985 0.31348607]\t[0.05792684 0.049564 ]\t[0.6483969 0.61489568]\t[1.03801526 1.03372254]\n48 \t100 \t[0.30331887 0.31502778]\t[0.05792684 0.04953291]\t[0.65848199 0.60561883]\t[1.03801526 1.03372254]\n49 \t100 \t[0.30420692 0.31586446]\t[0.05792684 0.04953291]\t[0.66049776 0.60482911]\t[1.03801526 1.03372254]\n"
],
[
"pop[0]",
"_____no_output_____"
],
[
"[float(x) for x in pop[0]]",
"_____no_output_____"
],
[
"pop[0].fitness.info",
"_____no_output_____"
]
],
[
[
"# Hypervolume",
"_____no_output_____"
]
],
[
[
"from deap.benchmarks.tools import diversity, convergence, hypervolume",
"_____no_output_____"
],
[
"print(\"Final population hypervolume is %f\" % hypervolume(pop, [1.0,1.0]))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9a121c09687c949f738c3b7c02af0e10135802
| 37,075 |
ipynb
|
Jupyter Notebook
|
06_EPO/e-TeenAstronomyCafe/09_Exoplanet_Spectra/FER_Espectros_de_los_Exoplanetas.ipynb
|
noaodatalab/notebooks_default
|
3001f40c0de05445e65e205fdb3806f85e91dbfe
|
[
"BSD-3-Clause"
] | null | null | null |
06_EPO/e-TeenAstronomyCafe/09_Exoplanet_Spectra/FER_Espectros_de_los_Exoplanetas.ipynb
|
noaodatalab/notebooks_default
|
3001f40c0de05445e65e205fdb3806f85e91dbfe
|
[
"BSD-3-Clause"
] | null | null | null |
06_EPO/e-TeenAstronomyCafe/09_Exoplanet_Spectra/FER_Espectros_de_los_Exoplanetas.ipynb
|
noaodatalab/notebooks_default
|
3001f40c0de05445e65e205fdb3806f85e91dbfe
|
[
"BSD-3-Clause"
] | null | null | null | 34.746954 | 1,184 | 0.614107 |
[
[
[
"# Encontrando los Ingredientes de Otros Mundos\n\nDidier Queloz y Michel Mayor encontraron el primer exoplaneta orbitando una estrella similar al Sol, lo que les valió el [premio Nobel de Física 2019](https://www.nobelprize.org/prizes/physics/2019/summary/). Desde entonces, el número de planetas conocidos ha crecido exponencialmente. Ahora, los astrónomos van más allá de solo hecho de descubrir planetas fuera del Sistema Solar, tienen el desafio de aprender sobre sus atmósferas. En este \"simulador\" obtendremos espectros de sistemas de exoplanetas para entender de qué están hechas sus atmósferas.\n\n___",
"_____no_output_____"
],
[
"# Table of Contents\n\n* [Cómo Usar esta Guía](#Cómo-Usar-esta-Guía)\n* [Configuración Previa a la Actividad](#Configuración-Previa-a-la-Actividad)\n* [Actividad 1: Introducción - Curvas de Luz del Planeta](#Actividad-1:-Introducción---Curvas-de-Luz-del-Planeta)\n* [Actividad 2: Radio del Planeta](#Actividad-2:-Radio-del-Planeta)\n* [Actividad 3: Un espectro Planetario](#Actividad-3:-Un-espectro-Planetario)\n* [Actividad 4: Ejemplos de Atmósferas de Planetas](#Actividad-4:-Ejemplos-de-Atmósferas-de-Planetas)\n* [Actividad 5: Atmósferas de Planetas Misteriosos](#Actividad-5:-Atmósferas-de-Planetas-Misteriosos)\n* [Actividad 6: Conclusiones](#Actividad-6:-Conclusiones)\n\n___",
"_____no_output_____"
],
[
"# Cómo Usar esta Guía\n\nLa página web en la que se encuentra es en realidad una aplicación llamada Jupyter Notebook, muy parecida a las de su teléfono. Esta aplicación consta de celdas.\n\nUna celda de *entrada/input* parece un cuadro gris claro con `[ ]` a su izquierda. Cada una de las celdas de entrada contiene código: instrucciones para hacer que la computadora haga algo.\n\nPara activar o seleccionar una celda, haga clic en cualquier lugar dentro de ella.\n\n\\\\\n<div class='alert alert-info'>\n <font size='3'><b>Seleccione la celda de abajo y lea su contenido.</b></font>\n</div>",
"_____no_output_____"
]
],
[
[
"# El texto que sigue a un \"#\" es un comentario.\n# Los comentarios no afectan su código de ninguna manera.\n# Lea siempre los comentarios en la parte superior de cada celda con la que interactúe.\n# Los comentarios se utilizarán para describir lo que realmente está haciendo el código de la celda.",
"_____no_output_____"
]
],
[
[
"Para ejecutar una celda seleccionada, haga clic en el pequeño botón de reproducción o presione `[Shift + Enter]` en su teclado.\n\n\\\\\n<div class='alert alert-info'>\n <font size='3'><b>Seleccione la celda de abajo y lea su contenido. Luego, ejecute la celda.</b></font>\n <br> Si aparece una advertencia, simplemente haga clic en <em>\"Ejecutar de todos modos (/Run Anyway, REVISAR CON UN PC EN ESPAÑOL)\"</em>, este código es seguro 😉 \n <br> Además, si desea guardar tú progreso, haga clic en el botón <em>\"Copiar a Drive(/Copy to Drive, REVISAR CON UN PC EN ESPAÑOL)\"</em> en la parte superior.\n </div>",
"_____no_output_____"
]
],
[
[
"# El texto que NO esta antecedido por un \"#\" se considera código.\n# Las líneas de código son instrucciones dadas a su computadora.\n# La línea de código a continuación es una instrucción de \"impresión\", que literalmente imprime el texto entre comillas.\n\nprint(\"¡Felicitaciones! ¡Has ejecutado con éxito tu primera celda!\")",
"_____no_output_____"
]
],
[
[
"Ejecutar una celda crea una salida directamente debajo de ella. Una salida puede ser un texto, un gráfico, un control deslizante interactivo, ¡o incluso nada en absoluto! Cuando ha corrido una celda, aparece un número entre corchetes, por ejemplo [1] al lado izquierdo de la celda.\n\n<div class='alert alert-info'>\n <font size='3'><b>Abra todas las secciones de este bloc de notas seleccionando el menú \"Ver\" y \"Expandir secciones\" </b></font>\n <br> \n</div>\n\n\nPuede obtener más información sobre cómo funcionan los Jupyter Notebooks en https://try.jupyter.org/ (Página en Ingles)\n___",
"_____no_output_____"
],
[
"# Configuración Previa a la Actividad\n\nPara que cualquiera de las actividades funcione correctamente, debe importar las bibliotecas necesarias para el funcionamiento del código de esta guía. Estos ya deberían haberse cargado cuando ejecutó todas las celdas.",
"_____no_output_____"
]
],
[
[
"# Las siguientes pasos cargamos las bibliotecas necesarias para correr el código de esta guía.\nfrom httpimport import remote_repo\n\nrepoURL = 'https://raw.githubusercontent.com/astro-datalab/notebooks-latest/master/06_EPO/e-TeenAstronomyCafe/'\n\nwith remote_repo(['lightcurve_sliderES'], repoURL+'09_Exoplanet_Spectra') :\n import lightcurve_sliderES\n \nprint(\"Bibliotecas importadas con éxito.\")",
"_____no_output_____"
],
[
"lightcurve_sliderES.initial_imports()\n\nprint(\"Archivos importados con éxito.\")",
"_____no_output_____"
]
],
[
[
"<div class='alert alert-info'>\n <font size='3'><b>Ajuste el control deslizante a continuación a 5.0.</b></font>\n</div>",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.practice_slider()",
"_____no_output_____"
]
],
[
[
"<div class='alert alert-info'>\n <font size='3'><b>Pase el cursor sobre el texto verde a continuación.</b></font>\n</div>",
"_____no_output_____"
],
[
"**Finalmente**, habrán algunas <span title=\"Terminología es un lenguaje especial utilizado por personas en un campo de estudio específico, generalmente como un atajo\"><font color='green'>terminologías</font></span> utilizadas en esta guía. Puede pasar el cursor sobre el texto para obtener más información.",
"_____no_output_____"
],
[
"<div class='alert alert-info'>\n <font size='3'><b>En este punto, asegúrese de haber ejecutado todas las celdas y expandido todas las secciones siguiendo las instrucciones anteriores.</b></font>\n</div>\n\n___",
"_____no_output_____"
],
[
"# Actividad 1: Introducción - Curvas de Luz del Planeta",
"_____no_output_____"
],
[
"Comencemos con una <span title=\"Esta es una gráfica que muestra cómo cambia el brillo de un sistema estrella + planeta en función del tiempo a medida que el planeta pasa frente a la estrella.\"><font color='green'>curva de luz de un tránsito de un exoplaneta</font></span>. Esta es una gráfica que muestra cómo el brillo de un sistema estrella + planeta cambia con el tiempo a medida que el planeta pasa frente a la estrella. El eje **x** es el tiempo en horas, el eje **y** es el brillo en porcentaje. El tiempo se muestra en relación con <span title=\"Este es el momento en que un planeta y una estrella se alinean\"><font color='green'>centro del transito</font></span>, que es cuando el planeta y la estrella se alinean.\n\n\\\\\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre el control deslizante para cambiar la hora. <br>Mira lo que sucede con el brillo (Curva de Luz) y el planeta que cruza la estrella (animación de la estrella)</b></font>\n</div>\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.lightcurve_slider(free_radius=False)",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>Pregunta 1: ¿Cuándo cambia el brillo? ¿Por qué crees que es 100% al principio y al final?</b>\n</font>\n\n___",
"_____no_output_____"
],
[
"# Actividad 2: Radio del Planeta\nLa siguiente gráfico es otra <span title=\"Esta es una gráfica que muestra cómo cambia el brillo de un sistema estrella + planeta en función del tiempo a medida que el planeta pasa frente a la estrella.\"><font color='green'>curva de luz de un exoplaneta en transito</font></span>. Esto debería ser familiar a lo que viste arriba con los mismos ejes y forma. Ahora, hemos agregado una nueva variable, el radio del planeta. Aquí, damos el radio del planeta en <span title=\"El radio de la Tierra es de poco más de 6.000 kilometros. Podrías colocar unas 11 Tierras a lo largo de Júpiter y unas 109 Tierras a lo largo del sol.\"><font color='green'>radio terrestre</font></span>. Considerando que el radio de la Tierra es de 6.371 km.\n\n\n<div class='alert alert-info'>\n <font size='3'><b>\n\n* Arrastre el control deslizante del Radio para ver cómo afecta la curva de luz y la vista de la estrella y el planeta.\n* Arrastre el control deslizante Tiempo a una posición diferente para ver cómo afecta la geometría allí. En realidad, no podemos ver el círculo negro sino solo la curva de luz.\n</b></font>\n</div>\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.lightcurve_slider()",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 2: ¿Aumentar el radio del planeta hace que la caída de la curva de luz sea más profunda o menos profunda?<br>\n </b>\n</font>\n<br>\n<font size='4' color='#0076b6'>\n <b>\n Pregunta 3: ¿Cómo afecta el radio del planeta al rango de tiempo en el cual cae la curva de luz cae por debajo del 100%?\n </b>\n</font>\n\n\n___",
"_____no_output_____"
],
[
"# Actividad 3: Un espectro Planetario",
"_____no_output_____"
],
[
"#### Tamaño del planeta en diferentes colores",
"_____no_output_____"
],
[
"Ahora exploremos qué sucede si un planeta tiene una atmósfera. Algunos colores de luz (<span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite.\"><font color='green'>longitudes de onda</font></span>) atravesarán la atmósfera, mientras que otros serán absorbidos o dispersados. Puede notar esto en nuestro planeta, La Tierra, durante las puestas de sol, donde la atmósfera dispersa la luz azul y la luz roja atraviesa la atmósfera. Desde la perspectiva del espacio, la Tierra se ve un poco más grande en las <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite.\"><font color='green'>longitudes de onda</font></span> azules que en las rojas. \n\nVeamos qué sucede con el tamaño efectivo de un planeta en cada color cuando agregas una atmósfera a un planeta. \n\n\\\\\nEl control deslizante a continuación controla el espesor de la atmósfera en <span title=\"El radio de la Tierra es de poco más de 6.000 kilometros. Podrías colocar unas 11 Tierras a lo largo de Júpiter y unas 109 Tierras a lo largo del sol.\"><font color='green'>radio Terrestre</font></span>. Los ejes **x** e **y** son efectivamente reglas para medir el tamaño del planeta en <span title=\"El radio de la Tierra es de poco más de 6.000 kilometros. Podrías colocar unas 11 Tierras a lo largo de Júpiter y unas 109 Tierras a lo largo del sol.\"><font color='green'>radio Terrestre</font></span>.\n\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre el control deslizante para cambiar el espesor atmosférico.</b></font>\n</div>\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.scattering_slider(plots=['planet'])",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 4: ¿En qué color aparece el planeta más grande?\n </b>\n</font>\n<br>\n<br>\n<font size='4' color='#0076b6'>\n <b>\n Pregunta 5: ¿Cómo podrías saber si un planeta tiene atmósfera?\n </b>\n</font>\n",
"_____no_output_____"
],
[
"#### Una gráfica del espectro",
"_____no_output_____"
],
[
"La forma en que los astrónomos visualizan la imagen en color de arriba de un planeta es a través de un <span title=\"Un espectro es un gráfico del tamaño del planeta versus la longitud de onda.\"><font color='green'>espectro de transmisión</font></span>. Esta es una gráfica del tamaño del planeta en <span title=\"El radio de la Tierra es de poco más de 6.000 kilometros. Podrías colocar unas 11 Tierras a lo largo de Júpiter y unas 109 Tierras a lo largo del sol.\"><font color='green'>radio Terrestre</font></span> versus la <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite.\"><font color='green'>longitudes de onda</font></span>. La longitud de onda se mide en unidades de <span title=\"Un micrón es una unidad de longitud que es una millonésima parte de un metro. El cabello humano tiene unas 75 micras de diámetro.\"><font color='green'>micrón</font></span>. Un micrón es la millonésima parte de un metro. El ancho típico de un cabello humano es de 75 micrones (Smith 2002, *Metrología Industrial*).\n\n\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre el control deslizante para cambiar el espesor atmosférico.</b></font>\n</div>\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.scattering_slider(plots=['planet','spectrum'])",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 6: ¿Cómo describirías el espectro cuando la pendiente de esta línea es cero?\n </b>\n</font>\n<br>\n<br>\n<font size='4' color='#0076b6'>\n <b>\n Pregunta 7: ¿Cómo describirías la atmósfera cuando la pendiente de esta línea es cero?\n </b>\n</font>\n",
"_____no_output_____"
],
[
"#### Curva de luz multicolor",
"_____no_output_____"
],
[
"Ahora que hemos construido una cierta comprensión de las <span title=\"Esta es una gráfica que muestra cómo cambia el brillo de un sistema estrella + planeta en función del tiempo a medida que el planeta pasa frente a la estrella.\"><font color='green'>curvas de luz del tránsito de un exoplanetas</font></span> en [Sección 1](#1.-Introducción---Curvas-de-Luz-del-Planeta) y [Sección 2](##Actividad-2:-Radio-del-Planeta), los examinaremos en diferentes <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite\"><font color='green'>longitudes de onda</font></span>. La curva de luz y el radio del planeta pueden ser diferentes de una longitud de onda a la siguiente porque parte de la luz atraviesa la atmósfera mientras que otra luz es absorbida. Ahora examinará la curva de luz para diferentes colores con una variable para el espesor de una atmósfera en radios terrestres. <span title=\"El radio de la Tierra es de poco más de 6.000 kilometros. Podrías colocar unas 11 Tierras a lo largo de Júpiter y unas 109 Tierras a lo largo del sol.\"><font color='green'>radios terrestres</font></span>.\n\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre el control deslizante para cambiar el espesor atmosférico.</b></font>\n</div>\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.scattering_slider(plots=['planet','spectrum','lightcurve'])",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 8: ¿Qué tipo de observaciones podrías hacer para saber si un planeta tiene atmósfera?\n </b>\n</font>\n\n___",
"_____no_output_____"
],
[
"# Actividad 4: Ejemplos de atmósferas de planetas\n\nAhora que tenemos una idea de cómo <span title=\"A spectrum is a plot of a planet's size versus wavelength.\"><font color='green'>espectros de transmisión</font></span> funciona, consideremos diferentes tipos de modelos. Los tamaños atmosféricos se han hecho más grandes que la realidad para que sean más fáciles de ver.",
"_____no_output_____"
],
[
"#### Una atmósfera de vapor de agua\n\n\nEl siguiente modelo atmosférico contiene vapor de agua. Las moléculas de agua vibrarán y rotarán en algunas <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite\"><font color='green'>longitudes de onda</font></span> mejor que en otras, por lo que el planeta se verá más grande en esas longitudes de onda cercanas a los 2,6 <span title=\"Un micrón es una unidad de longitud que es una millonésima parte de un metro. El cabello humano tiene unas 75 micras de diámetro.\"><font color='green'>micrones</font></span>.\n\n<div class='alert alert-info'>\n <font size='3'><b>Inspeccione el espectro a continuación.</b></font>\n</div>\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra(atmospheres=['H2O'])",
"_____no_output_____"
]
],
[
[
"#### Una atmósfera de metano\n\nEl siguiente modelo atmosférico contiene metano. Como el agua, las moléculas de metano vibrarán y rotarán a una <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite\"><font color='green'>longitudes de onda</font></span> mejor que a otra. Sin embargo, el metano tiene una configuración diferente de átomos, por lo que el planeta parece más grande, cerca de 3,4 <span title=\"Un micrón es una unidad de longitud que es una millonésima parte de un metro. El cabello humano tiene unas 75 micras de diámetro.\"><font color='green'>micrones</font></span>.\n\n<div class='alert alert-info'>\n <font size='3'><b>Inspeccione el espectro a continuación.</b></font>\n</div>\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra(atmospheres=['CH4'])",
"_____no_output_____"
]
],
[
[
"#### Una atmósfera de dióxido de carbono\n\nCEl dióxido de carbono es otra configuración de moléculas con dos átomos de oxígeno en lados opuestos del carbono. La simetría de la molécula significa que solo hay unas pocas formas de hacer vibrar el dióxido de carbono. Este planeta se verá más grande en 2.8 <span title=\"Un micrón es una unidad de longitud que es una millonésima parte de un metro. El cabello humano tiene unas 75 micras de diámetro.\"><font color='green'>micrones</font></span> y 4.4 <span title=\"Un micrón es una unidad de longitud que es una millonésima parte de un metro. El cabello humano tiene unas 75 micras de diámetro.\"><font color='green'>micrones</font></span> pero más pequeño en la mayoría de las otras <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite\"><font color='green'>longitudes de onda</font></span>.\n\n<div class='alert alert-info'>\n <font size='3'><b>Inspeccione el espectro a continuación.</b></font>\n</div>\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra(atmospheres=['CO2'])",
"_____no_output_____"
]
],
[
[
"#### Sin atmósfera\n\nSi un planeta no tiene atmósfera, todas las <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite\"><font color='green'>longitudes de onda</font></span> estarán en el limite rocoso del planeta. Por lo tanto, un planeta sin aire se verá del mismo tamaño en todas las longitudes de onda.\n\n<div class='alert alert-info'>\n <font size='3'><b>Inspeccione el espectro a continuación.</b></font>\n</div>\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra(atmospheres=['No Atmosphere'])",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 9: Hay una superficie sólida visible aquí. ¿A qué nivel (en radios terrestres) se encuentra la superficie? ¿Dónde crees que estaba en las atmósferas anteriores?\n </b>\n</font>\n\n\n___",
"_____no_output_____"
],
[
"# Actividad 5: Atmósferas de planetas misteriosos\n\nAhora estás jugando el papel de un astrónomo. Mide la curva de luz de un planeta a diferentes <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite\"><font color='green'>longitudes de onda</font></span> y esto se muestra a continuación como una dispersión de puntos en cada color. Deberá averiguar cuál es el radio del planeta (en <span title=\"El radio de la Tierra es de poco más de 6.000 kilometros. Podrías colocar unas 11 Tierras a lo largo de Júpiter y unas 109 Tierras a lo largo del sol.\"><font color='green'>radios terrestres</font></span>) para esa <span title=\"En una onda periódica la longitud de onda es la distancia física entre dos puntos a partir de los cuales la onda se repite\"><font color='green'>longitudes de onda</font></span>.\n",
"_____no_output_____"
],
[
"#### Planeta Misterioso 1",
"_____no_output_____"
],
[
"\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre los controles deslizantes para hacer que las líneas coincidan con los puntos de cada color, formando las líneas que mejor se ajusten. Asegúrese de desplazarse lo suficiente para ver ambos gráficos.\n\n</b></font>\n</div>\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.transmission_spec_slider(mysteryNum=1)",
"_____no_output_____"
]
],
[
[
"Ahora ha encontrado un <span title=\"Un espectro es un gráfico del tamaño del planeta versus la longitud de onda.\"><font color='green'>espectro de transmisión</font></span> del planeta que mejor se ajusta a los datos.\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra()",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 10: Compare su espectro de transmisión con los modelos. ¿Qué tipo de ambiente encontraste?\n </b>\n</font>\n",
"_____no_output_____"
],
[
"#### Planeta Misterioso 2",
"_____no_output_____"
],
[
"\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre los controles deslizantes para hacer que las líneas coincidan con los puntos de cada color, formando las líneas que mejor se ajusten. Asegúrese de desplazarse lo suficiente para ver ambos gráficos.\n\n</b></font>\n</div>\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.transmission_spec_slider(mysteryNum=2)",
"_____no_output_____"
]
],
[
[
"Ahora ha encontrado un <span title=\"Un espectro es un gráfico del tamaño del planeta versus la longitud de onda.\"><font color='green'>espectro de transmisión</font></span> del planeta que mejor se ajusta a los datos.",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra()",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 11: Compare su espectro de transmisión con los modelos. ¿Qué tipo de ambiente encontraste?\n </b>\n</font>",
"_____no_output_____"
],
[
"#### Planeta Misterioso ",
"_____no_output_____"
],
[
"\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre los controles deslizantes para hacer que las líneas coincidan con los puntos de cada color, formando las líneas que mejor se ajusten. Asegúrese de desplazarse lo suficiente para ver ambos gráficos.\n\n</b></font>\n</div>\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.transmission_spec_slider(mysteryNum=3)",
"_____no_output_____"
]
],
[
[
"Ahora ha encontrado un <span title=\"Un espectro es un gráfico del tamaño del planeta versus la longitud de onda.\"><font color='green'>espectro de transmisión</font></span> del planeta que mejor se ajusta a los datos.\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra()",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 12: Compare su espectro de transmisión con los modelos. ¿Qué tipo de ambiente encontraste?\n </b>\n</font>\n",
"_____no_output_____"
],
[
"#### Mystery Planet 4",
"_____no_output_____"
],
[
"\n<div class='alert alert-info'>\n <font size='3'><b>Arrastre los controles deslizantes para hacer que las líneas coincidan con los puntos de cada color, formando las líneas que mejor se ajusten. Asegúrese de desplazarse lo suficiente para ver ambos gráficos.\n\n</b></font>\n</div>\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.transmission_spec_slider(mysteryNum=4)",
"_____no_output_____"
]
],
[
[
"Ahora ha encontrado un <span title=\"Un espectro es un gráfico del tamaño del planeta versus la longitud de onda.\"><font color='green'>espectro de transmisión</font></span> del planeta que mejor se ajusta a los datos.\n\n",
"_____no_output_____"
]
],
[
[
"lightcurve_sliderES.example_spectra()",
"_____no_output_____"
]
],
[
[
"<font size='4' color='#0076b6'>\n <b>\n Pregunta 13: Compare su espectro de transmisión con los modelos. ¿Qué tipo de ambiente encontraste?\n </b>\n</font>\n\n___",
"_____no_output_____"
],
[
"# Actividad 6: Conclusiones\n\n¡Felicidades! Ahora estás averiguando de qué están hechas las atmósferas de los planetas o si un planeta carece de atmósfera. En atmósferas reales, obtendremos una mezcla de moléculas que nos pueden informar sobre la química de los planetas y, algún día, incluso ayudarnos a encontrar vida en otras partes del Universo. \n\nLos astrónomos están explorando atmósferas de planetas reales con telescopios operativos actualmente y a la espera de que nuevos telescopios comiencen a operar, como el telescopio espacial James Webb. Puede leer sobre el telescopio Webb y ver imágenes del mismo en [jwst.nasa.gov/](https://jwst.nasa.gov/content/features/index.html#educationalFeatures).\n",
"_____no_output_____"
],
[
"<!-- <div class='alert alert-info'>\n <font size='3'><b>If you are doing this for a class, turn in your answers to the bold questions. If you are doing it for fun, you're done!</b></font><br> -->\n\n\n\n\n————\n\n##### Jupyter Notebook by [Everett Schlawin](http://mips.as.arizona.edu/~schlawin/) and the [NOIR Lab's Teen Astronomy Cafe Team](http://www.teenastronomycafe.org)\n\n#### Version 1.0\n\nThe source code for this notebook is available at <a href=\"https://github.com/eas342/interactive_lc\">https://github.com/eas342/interactive_lc</a>.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4a9a1441187075c78a9cb668cc7716dc3669e795
| 20,309 |
ipynb
|
Jupyter Notebook
|
01_introduction_to_python_variables_simple_types.ipynb
|
glow-mdsol/phuse_eu_connect_python
|
9bab0ea24cd737eb62eacce21427a330edf9335a
|
[
"MIT"
] | null | null | null |
01_introduction_to_python_variables_simple_types.ipynb
|
glow-mdsol/phuse_eu_connect_python
|
9bab0ea24cd737eb62eacce21427a330edf9335a
|
[
"MIT"
] | null | null | null |
01_introduction_to_python_variables_simple_types.ipynb
|
glow-mdsol/phuse_eu_connect_python
|
9bab0ea24cd737eb62eacce21427a330edf9335a
|
[
"MIT"
] | 1 |
2019-11-14T10:23:13.000Z
|
2019-11-14T10:23:13.000Z
| 21.111227 | 353 | 0.49126 |
[
[
[
"## Let's start!\n\nManipulating values in any language is done through the use of variables and operations.\n\n### Variables\n\nA variable is a holder for data and allows the programmer to pass around references to the data. \nVariables are generally said to be:\n* **mutable** - a variable can be changed after creation\n* **immutable** - a variable is fixed and unchangeable (there's nothing to stop you overwriting it)\n\nVariables have data types; Python is a little unusual with respect to its type system. It has a loose typing system. \n\nThis means:\n* You don't have to declare what type a variable is - the interpreter will work it out as you *assign* the variable a value\n\nWhen you want to work out what type a variable is, you can use the `type` function, it returns the python type for a variable instance.\n\nFollowing this are a set of assigments for the python fundamental types.\n",
"_____no_output_____"
]
],
[
[
"# first off - an integer (int)\na = 1\ntype(a)",
"_____no_output_____"
],
[
"# next a float (float)\na = 1.1\ntype(a)",
"_____no_output_____"
],
[
"# next a string (str)\na = \"1\"\ntype(a)",
"_____no_output_____"
],
[
"# next a boolean (bool)\na = True\ntype(a)",
"_____no_output_____"
],
[
"# next a complex number\na = 1.0 - 1.0j\nprint(type(a))\nprint(a.real, a.imag)",
"<class 'complex'>\n1.0 -1.0\n"
]
],
[
[
"In each of these cases we have not declared up front what `type` of variable `a` is - we assign the value to the variable and python has worked it out. The type inference is very powerful, but you must be careful in how you use it. As the types are not defined in advance, there's nothing to stop or warn you about an incompatible assignment.\n\nYou can use the `isinstance` function to check:",
"_____no_output_____"
]
],
[
[
"a = \"1\"\nb = 1\nprint(\"a is a string: \", isinstance(a, (str,)))\nprint(\"a is an integer: \", isinstance(a, (int,)))\n\nprint(\"b is a string: \", isinstance(b, (str,)))\nprint(\"b is an integer: \", isinstance(b, (int,)))\n",
"a is a string: True\na is an integer: False\nb is a string: False\nb is an integer: True\n"
]
],
[
[
"The type of variable is important when you want to use it, in these examples we use the increment operator `+=` on different types of variable",
"_____no_output_____"
]
],
[
[
"# note that += 1 is a shorthand for a = a + 1 (+= is an operator)\na = 1\na += 1\na",
"_____no_output_____"
],
[
"# a bit strange\na = 1.1\na += 1\na",
"_____no_output_____"
],
[
"# very strange\na = \"1\"\na += 1\na",
"_____no_output_____"
],
[
"# are you out of your mind???\na = True\na += 1\na",
"_____no_output_____"
]
],
[
[
"Note that Python has just worked it out where it makes sense, and if it doesn't make sense then it throws an error (which you can anticipate and handle, but more on that later)",
"_____no_output_____"
],
[
"## Type casting\nPython will try to do the right thing when you attempt to use a variable as a different type. Changing the type of variable is called *casting*",
"_____no_output_____"
]
],
[
[
"x = 1.5 # a float \nprint(x, type(x))",
"_____no_output_____"
],
[
"x = int(x) # cast the float to an int - note the cast to int will floor (take the lower bound of) the value\nprint(x, type(x))",
"_____no_output_____"
],
[
"# Where it makes sense, you can cast from a string\nx = \"1\"\nc = int(x)\nprint(c, type(c))",
"1 <class 'int'>\n"
],
[
"x = \"1.1\"\nc = float(x)\nprint(c, type(c))\n",
"1.1 <class 'float'>\n"
],
[
"x = \"1\"\nc = bool(x)\nprint(c, type(c))",
"True <class 'bool'>\n"
]
],
[
[
"What do you think the cast to `bool` value will be for the following cases:\n* \"0\"\n* \"\"\n* \"false\"\n* \"true\"\n* \"banana\"\n\nMake your prediction in the following sections",
"_____no_output_____"
]
],
[
[
"prediction = None # replace None with True or False\nx = \"0\"\nc = bool(x)\n\nassert prediction == c, \"You guessed wrong, try again\"",
"_____no_output_____"
],
[
"prediction = None # replace None with True or False\nx = \"\"\nc = bool(x)\n\nassert prediction == c, \"You guessed wrong, try again\"",
"_____no_output_____"
],
[
"prediction = None # replace None with True or False\nx = \"false\"\nc = bool(x)\n\nassert prediction == c, \"You guessed wrong, try again\"",
"_____no_output_____"
],
[
"prediction = None # replace None with True or False\nx = \"true\"\nc = bool(x)\n\nassert prediction == c, \"You guessed wrong, try again\"",
"_____no_output_____"
],
[
"prediction = None # replace None with True or False\nx = \"banana\"\nc = bool(x)\n\nassert prediction == c, \"You guessed wrong, try again\"",
"_____no_output_____"
]
],
[
[
"### Strings\n\nJust a couple of comments on strings as these will form an important part of your usage of Python. By default in Python 3 strings are immutable sequences of Unicode code points.\n\nStrings can be created in a few ways:\n* Single quotes: 'allows embedded \"double\" quotes'\n* Double quotes: \"allows embedded 'single' quotes\".\n* Triple quoted: '''Three single quotes''', \"\"\"Three double quotes\"\"\"\n\nTriple quoted strings can include newlines.",
"_____no_output_____"
]
],
[
[
"some_string = \"Some strings\nare longer than one line\"",
"_____no_output_____"
],
[
"some_string = \"\"\"Some strings\nare longer than one line\"\"\"",
"_____no_output_____"
]
],
[
[
"To get the length of a string use the `len` function.",
"_____no_output_____"
]
],
[
[
"sample_str = \"Far far away, behind the word mountains, far from the countries Vokalia and Consonantia, there live the blind texts. Separated they live in Bookmarksgrove right at the coast of the Semantics\"\n\nlen(sample_str)\n",
"_____no_output_____"
]
],
[
[
"You can access specific characters (or ranges of characters) using indexes `[]`",
"_____no_output_____"
]
],
[
[
"# get the first character\nprint(sample_str[0])\n\n# get the first 5 characters\nprint(sample_str[0:5])\n\n# get the last 5 characters\nprint(sample_str[-5:])\n\n# get the last 5th through 10th characters\nprint(sample_str[5:10])",
"F\nFar f\nntics\nar aw\n"
]
],
[
[
"There are many really useful operators on strings, I wanted to highlight a couple here as really helpful",
"_____no_output_____"
]
],
[
[
"# upper, lower and capitalize modify strings\nsome_string = \"Here I am\"\nprint(some_string.lower())\nprint(some_string.upper())\nprint(some_string.capitalize())\n",
"here i am\nHERE I AM\nHere i am\n"
],
[
"# strip removes whitespace characters\nsome_string = \" Here I am \"\nprint(some_string.strip())\n",
"_____no_output_____"
],
[
"# split splits a string based on a delimiter (defaults to space) and returns a list (more on this later)\nsome_string = \" Here I am \"\nprint(some_string.split())\n\n# pass the delimiter as an argument\ndelim = \"10,120,30\"\nprint(delim.split(\",\"))",
"_____no_output_____"
],
[
"# replace replaces a substring with another\nsome_string = \" Here I am \"\nprint(some_string.replace(\"am\", \"was\"))\n\n# NOTE the replace operation returns a new string, it doesn't update the original string\nwas_string = some_string.replace(\"am\", \"was\")\nprint(some_string) \nprint(was_string)\n",
" Here I was \n Here I am \n Here I was \n"
],
[
"# format is used to substitute text, the {} is replaced by the corresponding argument from the format \nprint(\"The string '{}' after replace was '{}'\".format(some_string, was_string))\n",
"The string ' Here I am ' after replace was ' Here I was '\n"
]
],
[
[
"The output format of the values in the `format` method can be controlled using [format strings](https://docs.python.org/3/library/string.html#formatstrings).\n\nThis is just a highlight of some of the methods on strings; check out [String Methods](https://docs.python.org/3/library/stdtypes.html#string-methods) for more. \n\nNote, there are also similar methods for the other types; the [Standard Types](https://docs.python.org/3/library/stdtypes.html) page will serve you well.\n",
"_____no_output_____"
],
[
"## Operators",
"_____no_output_____"
],
[
"### Arithmetic Operators\n* `+` (addition)\n* `-` (subtraction)\n* `*` (multiplication)\n* `/` (division)\n* `//` (integer division)\n* `**` (power)\n* `%` (modulus)",
"_____no_output_____"
]
],
[
[
"print(1 + 2, 1.0 + 2.0)",
"_____no_output_____"
],
[
"print(2 - 1, 2.0 - 1.0)",
"_____no_output_____"
],
[
"print(3 * 4, 3.0 * 4.0)",
"_____no_output_____"
],
[
"print(3 / 4, 3.0 / 4.0)",
"_____no_output_____"
],
[
"print(3.0 // 4.0)",
"_____no_output_____"
],
[
"print(3**2, 3.0**2.0)",
"_____no_output_____"
],
[
"print( 5 % 2, 5.0 % 2)",
"_____no_output_____"
]
],
[
[
"### Logical Operators\n* `not` (`!`)\n* `and` (`&`)\n* `or` (`|`)\n* xor (`^`)",
"_____no_output_____"
]
],
[
[
"not True",
"_____no_output_____"
],
[
"(True and False, True & False)",
"_____no_output_____"
],
[
"(True or False, True | False)",
"_____no_output_____"
],
[
"# Exclusive or!\n(True ^ False, True ^ True, False ^ False)\n",
"_____no_output_____"
]
],
[
[
"### Comparison Operators\n* `==` equals\n* `<` less than\n* `>` greater than\n* `<=` less than or equal to\n* `>=` greater than or equal to",
"_____no_output_____"
]
],
[
[
"a = 1\nb = 2\n\nprint(\"Equal:\", a == b)\n\nprint(\"Less than:\", a < b)\n\nprint(\"Greater than:\", a > b)",
"_____no_output_____"
]
],
[
[
"Try using these operators with non-numeric variables",
"_____no_output_____"
]
],
[
[
"a = \"apple\"\nb = \"banana\"\n\nprint(\"Equal:\", a == b)\n\nprint(\"Less than:\", a < b)\n\nprint(\"Greater than:\", a > b)",
"Equal: False\nLess than: True\nGreater than: False\n"
]
],
[
[
"Based on the output, what attributes of the strings do you think are being used for evaluation?",
"_____no_output_____"
],
[
"## Next\n\nNow, we're going to move on to compound types. Click [here](./02_introduction_to_python_variables_compound_types.ipynb) to continue.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a9a20d20f304f7d3d76f704dbf36d8910c3713e
| 52,170 |
ipynb
|
Jupyter Notebook
|
BCNcode/0_vibratioon_signal/1250/CNN/CNN_1250-016-512-y.ipynb
|
Decaili98/BCN-code-2022
|
ab0ce085cb29fbf12b6d773861953cb2cef23e20
|
[
"MulanPSL-1.0"
] | null | null | null |
BCNcode/0_vibratioon_signal/1250/CNN/CNN_1250-016-512-y.ipynb
|
Decaili98/BCN-code-2022
|
ab0ce085cb29fbf12b6d773861953cb2cef23e20
|
[
"MulanPSL-1.0"
] | null | null | null |
BCNcode/0_vibratioon_signal/1250/CNN/CNN_1250-016-512-y.ipynb
|
Decaili98/BCN-code-2022
|
ab0ce085cb29fbf12b6d773861953cb2cef23e20
|
[
"MulanPSL-1.0"
] | null | null | null | 111.713062 | 17,912 | 0.790608 |
[
[
[
"from tensorflow import keras\nfrom tensorflow.keras import *\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.regularizers import l2#正则化L2\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"# 12-0.2\n# 13-2.4\n# 18-12.14\nimport pandas as pd\nimport numpy as np\nnormal = np.loadtxt(r'F:\\张老师课题学习内容\\code\\数据集\\试验数据(包括压力脉动和振动)\\2013.9.12-未发生缠绕前\\2013-9.12振动\\2013-9-12振动-1250rmin-mat\\1250rnormalviby.txt', delimiter=',')\nchanrao = np.loadtxt(r'F:\\张老师课题学习内容\\code\\数据集\\试验数据(包括压力脉动和振动)\\2013.9.17-发生缠绕后\\振动\\9-18上午振动1250rmin-mat\\1250r_chanraoviby.txt', delimiter=',')\nprint(normal.shape,chanrao.shape,\"***************************************************\")\ndata_normal=normal[8:10] #提取前两行\ndata_chanrao=chanrao[8:10] #提取前两行\nprint(data_normal.shape,data_chanrao.shape)\nprint(data_normal,\"\\r\\n\",data_chanrao,\"***************************************************\")\ndata_normal=data_normal.reshape(1,-1)\ndata_chanrao=data_chanrao.reshape(1,-1)\nprint(data_normal.shape,data_chanrao.shape)\nprint(data_normal,\"\\r\\n\",data_chanrao,\"***************************************************\")",
"(22, 32768) (20, 32768) ***************************************************\n(2, 32768) (2, 32768)\n[[ 0.52418 0.12881 0.10946 ... 0.27694 -0.37448 0.24678 ]\n [-1.3162 0.13418 0.35766 ... -0.96742 -0.002735 -0.28469 ]] \r\n [[ 0.019579 -1.93 0.054093 ... -1.009 0.34406 0.538 ]\n [ 1.0829 0.057349 2.1288 ... -0.67624 -0.28287 0.59637 ]] ***************************************************\n(1, 65536) (1, 65536)\n[[ 0.52418 0.12881 0.10946 ... -0.96742 -0.002735 -0.28469 ]] \r\n [[ 0.019579 -1.93 0.054093 ... -0.67624 -0.28287 0.59637 ]] ***************************************************\n"
],
[
"#水泵的两种故障类型信号normal正常,chanrao故障\ndata_normal=data_normal.reshape(-1, 512)#(65536,1)-(128, 515)\ndata_chanrao=data_chanrao.reshape(-1,512)\nprint(data_normal.shape,data_chanrao.shape)\n",
"(128, 512) (128, 512)\n"
],
[
"import numpy as np\ndef yuchuli(data,label):#(4:1)(51:13)\n #打乱数据顺序\n np.random.shuffle(data)\n train = data[0:102,:]\n test = data[102:128,:]\n label_train = np.array([label for i in range(0,102)])\n label_test =np.array([label for i in range(0,26)])\n return train,test ,label_train ,label_test\ndef stackkk(a,b,c,d,e,f,g,h):\n aa = np.vstack((a, e))\n bb = np.vstack((b, f))\n cc = np.hstack((c, g))\n dd = np.hstack((d, h))\n return aa,bb,cc,dd\nx_tra0,x_tes0,y_tra0,y_tes0 = yuchuli(data_normal,0)\nx_tra1,x_tes1,y_tra1,y_tes1 = yuchuli(data_chanrao,1)\ntr1,te1,yr1,ye1=stackkk(x_tra0,x_tes0,y_tra0,y_tes0 ,x_tra1,x_tes1,y_tra1,y_tes1)\n\nx_train=tr1\nx_test=te1\ny_train = yr1\ny_test = ye1\n\n#打乱数据\nstate = np.random.get_state()\nnp.random.shuffle(x_train)\nnp.random.set_state(state)\nnp.random.shuffle(y_train)\n\nstate = np.random.get_state()\nnp.random.shuffle(x_test)\nnp.random.set_state(state)\nnp.random.shuffle(y_test)\n\n\n#对训练集和测试集标准化\ndef ZscoreNormalization(x):\n \"\"\"Z-score normaliaztion\"\"\"\n x = (x - np.mean(x)) / np.std(x)\n return x\nx_train=ZscoreNormalization(x_train)\nx_test=ZscoreNormalization(x_test)\n# print(x_test[0])\n\n\n#转化为一维序列\nx_train = x_train.reshape(-1,512,1)\nx_test = x_test.reshape(-1,512,1)\nprint(x_train.shape,x_test.shape)\n\ndef to_one_hot(labels,dimension=2):\n results = np.zeros((len(labels),dimension))\n for i,label in enumerate(labels):\n results[i,label] = 1\n return results\none_hot_train_labels = to_one_hot(y_train)\none_hot_test_labels = to_one_hot(y_test)\n",
"(204, 512, 1) (52, 512, 1)\n"
],
[
"x = layers.Input(shape=[512,1,1])\n#普通卷积层\nconv1 = layers.Conv2D(filters=16, kernel_size=(2, 1), activation='relu',padding='valid',name='conv1')(x)\n#池化层\nPOOL1 = MaxPooling2D((2,1))(conv1)\n#普通卷积层\nconv2 = layers.Conv2D(filters=32, kernel_size=(2, 1), activation='relu',padding='valid',name='conv2')(POOL1)\n#池化层\nPOOL2 = MaxPooling2D((2,1))(conv2)\n#Dropout层\nDropout=layers.Dropout(0.1)(POOL2 )\nFlatten=layers.Flatten()(Dropout)\n#全连接层\nDense1=layers.Dense(50, activation='relu')(Flatten)\nDense2=layers.Dense(2, activation='softmax')(Dense1)\nmodel = keras.Model(x, Dense2) \nmodel.summary() ",
"Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 512, 1, 1)] 0 \n_________________________________________________________________\nconv1 (Conv2D) (None, 511, 1, 16) 48 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 255, 1, 16) 0 \n_________________________________________________________________\nconv2 (Conv2D) (None, 254, 1, 32) 1056 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 127, 1, 32) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 127, 1, 32) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 4064) 0 \n_________________________________________________________________\ndense (Dense) (None, 50) 203250 \n_________________________________________________________________\ndense_1 (Dense) (None, 2) 102 \n=================================================================\nTotal params: 204,456\nTrainable params: 204,456\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"\n#定义优化\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',metrics=['accuracy']) ",
"_____no_output_____"
],
[
"import time\ntime_begin = time.time()\nhistory = model.fit(x_train,one_hot_train_labels,\n validation_split=0.1,\n epochs=50,batch_size=10,\n shuffle=True)\ntime_end = time.time()\ntime = time_end - time_begin\nprint('time:', time)",
"Epoch 1/50\n19/19 [==============================] - 2s 77ms/step - loss: 0.7293 - accuracy: 0.4682 - val_loss: 0.5265 - val_accuracy: 0.5714\nEpoch 2/50\n19/19 [==============================] - 0s 9ms/step - loss: 0.6089 - accuracy: 0.5518 - val_loss: 0.4097 - val_accuracy: 0.9048\nEpoch 3/50\n19/19 [==============================] - 0s 8ms/step - loss: 0.3415 - accuracy: 0.8742 - val_loss: 0.1977 - val_accuracy: 1.0000\nEpoch 4/50\n19/19 [==============================] - 0s 10ms/step - loss: 0.1031 - accuracy: 1.0000 - val_loss: 0.0612 - val_accuracy: 1.0000\nEpoch 5/50\n19/19 [==============================] - 0s 8ms/step - loss: 0.0288 - accuracy: 1.0000 - val_loss: 0.0249 - val_accuracy: 1.0000\nEpoch 6/50\n19/19 [==============================] - 0s 9ms/step - loss: 0.0116 - accuracy: 1.0000 - val_loss: 0.0146 - val_accuracy: 1.0000\nEpoch 7/50\n19/19 [==============================] - 0s 8ms/step - loss: 0.0050 - accuracy: 1.0000 - val_loss: 0.0112 - val_accuracy: 1.0000\nEpoch 8/50\n19/19 [==============================] - 0s 7ms/step - loss: 0.0027 - accuracy: 1.0000 - val_loss: 0.0088 - val_accuracy: 1.0000\nEpoch 9/50\n19/19 [==============================] - 0s 11ms/step - loss: 0.0022 - accuracy: 1.0000 - val_loss: 0.0069 - val_accuracy: 1.0000\nEpoch 10/50\n19/19 [==============================] - 0s 11ms/step - loss: 0.0015 - accuracy: 1.0000 - val_loss: 0.0069 - val_accuracy: 1.0000\nEpoch 11/50\n19/19 [==============================] - 0s 8ms/step - loss: 0.0015 - accuracy: 1.0000 - val_loss: 0.0070 - val_accuracy: 1.0000\nEpoch 12/50\n19/19 [==============================] - 0s 8ms/step - loss: 9.6300e-04 - accuracy: 1.0000 - val_loss: 0.0048 - val_accuracy: 1.0000\nEpoch 13/50\n19/19 [==============================] - 0s 8ms/step - loss: 9.5801e-04 - accuracy: 1.0000 - val_loss: 0.0057 - val_accuracy: 1.0000\nEpoch 14/50\n19/19 [==============================] - 0s 8ms/step - loss: 6.5595e-04 - accuracy: 1.0000 - val_loss: 0.0043 - val_accuracy: 1.0000\nEpoch 15/50\n19/19 [==============================] - 0s 8ms/step - loss: 5.7011e-04 - accuracy: 1.0000 - val_loss: 0.0043 - val_accuracy: 1.0000\nEpoch 16/50\n19/19 [==============================] - 0s 7ms/step - loss: 5.7517e-04 - accuracy: 1.0000 - val_loss: 0.0040 - val_accuracy: 1.0000\nEpoch 17/50\n19/19 [==============================] - 0s 8ms/step - loss: 4.7672e-04 - accuracy: 1.0000 - val_loss: 0.0038 - val_accuracy: 1.0000\nEpoch 18/50\n19/19 [==============================] - 0s 8ms/step - loss: 4.0534e-04 - accuracy: 1.0000 - val_loss: 0.0033 - val_accuracy: 1.0000\nEpoch 19/50\n19/19 [==============================] - 0s 7ms/step - loss: 4.1599e-04 - accuracy: 1.0000 - val_loss: 0.0029 - val_accuracy: 1.0000\nEpoch 20/50\n19/19 [==============================] - 0s 8ms/step - loss: 3.3495e-04 - accuracy: 1.0000 - val_loss: 0.0036 - val_accuracy: 1.0000\nEpoch 21/50\n19/19 [==============================] - 0s 7ms/step - loss: 3.0851e-04 - accuracy: 1.0000 - val_loss: 0.0031 - val_accuracy: 1.0000\nEpoch 22/50\n19/19 [==============================] - 0s 8ms/step - loss: 2.8511e-04 - accuracy: 1.0000 - val_loss: 0.0030 - val_accuracy: 1.0000\nEpoch 23/50\n19/19 [==============================] - 0s 8ms/step - loss: 2.2022e-04 - accuracy: 1.0000 - val_loss: 0.0028 - val_accuracy: 1.0000\nEpoch 24/50\n19/19 [==============================] - 0s 7ms/step - loss: 2.3853e-04 - accuracy: 1.0000 - val_loss: 0.0028 - val_accuracy: 1.0000\nEpoch 25/50\n19/19 [==============================] - 0s 7ms/step - loss: 2.1611e-04 - accuracy: 1.0000 - val_loss: 0.0029 - val_accuracy: 1.0000\nEpoch 26/50\n19/19 [==============================] - 0s 8ms/step - loss: 2.3482e-04 - accuracy: 1.0000 - val_loss: 0.0028 - val_accuracy: 1.0000\nEpoch 27/50\n19/19 [==============================] - 0s 8ms/step - loss: 1.6115e-04 - accuracy: 1.0000 - val_loss: 0.0025 - val_accuracy: 1.0000\nEpoch 28/50\n19/19 [==============================] - 0s 7ms/step - loss: 1.6332e-04 - accuracy: 1.0000 - val_loss: 0.0026 - val_accuracy: 1.0000\nEpoch 29/50\n19/19 [==============================] - 0s 7ms/step - loss: 1.6600e-04 - accuracy: 1.0000 - val_loss: 0.0024 - val_accuracy: 1.0000\nEpoch 30/50\n19/19 [==============================] - 0s 8ms/step - loss: 1.2241e-04 - accuracy: 1.0000 - val_loss: 0.0022 - val_accuracy: 1.0000\nEpoch 31/50\n19/19 [==============================] - 0s 8ms/step - loss: 1.5362e-04 - accuracy: 1.0000 - val_loss: 0.0023 - val_accuracy: 1.0000\nEpoch 32/50\n19/19 [==============================] - 0s 7ms/step - loss: 1.0263e-04 - accuracy: 1.0000 - val_loss: 0.0024 - val_accuracy: 1.0000\nEpoch 33/50\n19/19 [==============================] - 0s 8ms/step - loss: 8.9152e-05 - accuracy: 1.0000 - val_loss: 0.0027 - val_accuracy: 1.0000\nEpoch 34/50\n19/19 [==============================] - 0s 7ms/step - loss: 1.1545e-04 - accuracy: 1.0000 - val_loss: 0.0017 - val_accuracy: 1.0000\nEpoch 35/50\n19/19 [==============================] - 0s 8ms/step - loss: 9.6943e-05 - accuracy: 1.0000 - val_loss: 0.0020 - val_accuracy: 1.0000\nEpoch 36/50\n19/19 [==============================] - 0s 8ms/step - loss: 7.7345e-05 - accuracy: 1.0000 - val_loss: 0.0021 - val_accuracy: 1.0000\nEpoch 37/50\n19/19 [==============================] - 0s 8ms/step - loss: 9.7932e-05 - accuracy: 1.0000 - val_loss: 0.0020 - val_accuracy: 1.0000\nEpoch 38/50\n19/19 [==============================] - 0s 8ms/step - loss: 9.1244e-05 - accuracy: 1.0000 - val_loss: 0.0019 - val_accuracy: 1.0000\nEpoch 39/50\n19/19 [==============================] - 0s 9ms/step - loss: 6.2889e-05 - accuracy: 1.0000 - val_loss: 0.0021 - val_accuracy: 1.0000\nEpoch 40/50\n19/19 [==============================] - 0s 8ms/step - loss: 7.1868e-05 - accuracy: 1.0000 - val_loss: 0.0017 - val_accuracy: 1.0000\nEpoch 41/50\n19/19 [==============================] - 0s 7ms/step - loss: 6.8233e-05 - accuracy: 1.0000 - val_loss: 0.0019 - val_accuracy: 1.0000\nEpoch 42/50\n19/19 [==============================] - 0s 7ms/step - loss: 6.2157e-05 - accuracy: 1.0000 - val_loss: 0.0018 - val_accuracy: 1.0000\nEpoch 43/50\n19/19 [==============================] - 0s 9ms/step - loss: 5.8923e-05 - accuracy: 1.0000 - val_loss: 0.0017 - val_accuracy: 1.0000\nEpoch 44/50\n19/19 [==============================] - 0s 9ms/step - loss: 4.6790e-05 - accuracy: 1.0000 - val_loss: 0.0017 - val_accuracy: 1.0000\nEpoch 45/50\n19/19 [==============================] - 0s 8ms/step - loss: 5.3712e-05 - accuracy: 1.0000 - val_loss: 0.0017 - val_accuracy: 1.0000\nEpoch 46/50\n19/19 [==============================] - 0s 9ms/step - loss: 5.7056e-05 - accuracy: 1.0000 - val_loss: 0.0017 - val_accuracy: 1.0000\nEpoch 47/50\n19/19 [==============================] - 0s 8ms/step - loss: 4.4449e-05 - accuracy: 1.0000 - val_loss: 0.0016 - val_accuracy: 1.0000\nEpoch 48/50\n19/19 [==============================] - 0s 8ms/step - loss: 4.2601e-05 - accuracy: 1.0000 - val_loss: 0.0018 - val_accuracy: 1.0000\nEpoch 49/50\n19/19 [==============================] - 0s 9ms/step - loss: 4.2589e-05 - accuracy: 1.0000 - val_loss: 0.0018 - val_accuracy: 1.0000\nEpoch 50/50\n19/19 [==============================] - 0s 10ms/step - loss: 4.8914e-05 - accuracy: 1.0000 - val_loss: 0.0015 - val_accuracy: 1.0000\ntime: 10.053858280181885\n"
],
[
"import time\ntime_begin = time.time()\nscore = model.evaluate(x_test,one_hot_test_labels, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n \ntime_end = time.time()\ntime = time_end - time_begin\nprint('time:', time)",
"Test loss: 0.000328180321957916\nTest accuracy: 1.0\ntime: 0.06382942199707031\n"
],
[
"#绘制acc-loss曲线\nimport matplotlib.pyplot as plt\n\nplt.plot(history.history['loss'],color='r')\nplt.plot(history.history['val_loss'],color='g')\nplt.plot(history.history['accuracy'],color='b')\nplt.plot(history.history['val_accuracy'],color='k')\nplt.title('model loss and acc')\nplt.ylabel('Accuracy')\nplt.xlabel('epoch')\nplt.legend(['train_loss', 'test_loss','train_acc', 'test_acc'], loc='center right')\n# plt.legend(['train_loss','train_acc'], loc='upper left')\n#plt.savefig('1.png')\nplt.show()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nplt.plot(history.history['loss'],color='r')\nplt.plot(history.history['accuracy'],color='b')\nplt.title('model loss and sccuracy ')\nplt.ylabel('loss/sccuracy')\nplt.xlabel('epoch')\nplt.legend(['train_loss', 'train_sccuracy'], loc='center right')\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9a24db828527317ef08972d56e7fe3ffff955c
| 86,530 |
ipynb
|
Jupyter Notebook
|
TimeSeries/MultivariateLSTMForecastModelMultipleLag.ipynb
|
vladiant/MachineLearningUtils
|
c6d1f4e928d4a258b8b6e441c93004e2337ca301
|
[
"MIT"
] | null | null | null |
TimeSeries/MultivariateLSTMForecastModelMultipleLag.ipynb
|
vladiant/MachineLearningUtils
|
c6d1f4e928d4a258b8b6e441c93004e2337ca301
|
[
"MIT"
] | null | null | null |
TimeSeries/MultivariateLSTMForecastModelMultipleLag.ipynb
|
vladiant/MachineLearningUtils
|
c6d1f4e928d4a258b8b6e441c93004e2337ca301
|
[
"MIT"
] | null | null | null | 101.204678 | 48,748 | 0.822963 |
[
[
[
"https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/\n# Multivariate Time Series Forecasting with LSTMs in Keras",
"_____no_output_____"
],
[
"## Dataset\nThis is a dataset that reports on the weather and the level of pollution each hour for five years at the US embassy in Beijing, China.\n\nBeijing PM2.5 Data Set (rename to raw.csv)\n\nhttps://raw.githubusercontent.com/jbrownlee/Datasets/master/pollution.csv",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\nfrom math import sqrt\n\nfrom numpy import concatenate\n\nfrom pandas import read_csv, DataFrame, concat\n\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM\n\nfrom matplotlib import pyplot",
"_____no_output_____"
],
[
"# Load data\ndef parse(x):\n return datetime.strptime(x, '%Y %m %d %H')",
"_____no_output_____"
],
[
"dataset = read_csv('raw.csv', parse_dates=[['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)",
"_____no_output_____"
],
[
"dataset",
"_____no_output_____"
],
[
"dataset.drop('No', axis=1, inplace=True)",
"_____no_output_____"
],
[
"# Manually specify column names\ndataset.columns =['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']\ndataset.index.name ='date'",
"_____no_output_____"
],
[
"# Mark all N.A. values with 0\ndataset['pollution'].fillna(0, inplace=True)",
"_____no_output_____"
],
[
"# Drop the first 24 hours\ndataset = dataset[24:]",
"_____no_output_____"
],
[
"# Summarize first 5 rows\nprint(dataset.head(5))",
" pollution dew temp press wnd_dir wnd_spd snow rain\ndate \n2010-01-02 00:00:00 129.0 -16 -4.0 1020.0 SE 1.79 0 0\n2010-01-02 01:00:00 148.0 -15 -4.0 1020.0 SE 2.68 0 0\n2010-01-02 02:00:00 159.0 -11 -5.0 1021.0 SE 3.57 0 0\n2010-01-02 03:00:00 181.0 -7 -5.0 1022.0 SE 5.36 1 0\n2010-01-02 04:00:00 138.0 -7 -5.0 1022.0 SE 6.25 2 0\n"
],
[
"# Save to file\ndataset.to_csv('pollution.csv')",
"_____no_output_____"
]
],
[
[
"## Process the new dataset",
"_____no_output_____"
]
],
[
[
"# Load dataset\ndataset = read_csv('pollution.csv', header=0, index_col=0)\nvalues = dataset.values",
"_____no_output_____"
],
[
"# Specify columns to plot\ngroups = [0, 1, 2, 3, 5, 6, 7]\ni = 1",
"_____no_output_____"
],
[
"# Plot each column\npyplot.figure()\nfor group in groups:\n pyplot.subplot(len(groups), 1, i)\n pyplot.plot(values[:, group])\n pyplot.title(dataset.columns[group], y=0.5, loc='right')\n i += 1\npyplot.show()",
"_____no_output_____"
]
],
[
[
"## Prepare data for the LSTM",
"_____no_output_____"
]
],
[
[
"# Convert series to supervised learning\ndef series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n n_vars = 1 if type(data) is list else data.shape[1]\n df = DataFrame(data)\n cols, names = list(), list()\n\n # Input sequence (t-n, ..., t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\n # Forecast sequence\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n \n # Put it all together\n agg = concat(cols, axis=1)\n agg.columns = names\n \n # Drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n \n return agg",
"_____no_output_____"
],
[
"# Load dataset\ndataset = read_csv('pollution.csv', header=0, index_col=0)\nvalues = dataset.values",
"_____no_output_____"
],
[
"# Integer encode direction\nencoder = LabelEncoder()\nvalues[:, 4] = encoder.fit_transform(values[:, 4])",
"_____no_output_____"
],
[
"# Ensure all data is float\nvalues = values.astype('float32')",
"_____no_output_____"
],
[
"# Normalize features\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled = scaler.fit_transform(values)",
"_____no_output_____"
],
[
"# Specify number of lag hours\nn_hours = 3\nn_features = 8",
"_____no_output_____"
],
[
"# Frame as supervised learning\nreframed = series_to_supervised(scaled, n_hours, 1)",
"_____no_output_____"
],
[
"reframed.shape",
"_____no_output_____"
]
],
[
[
"## Define and fit the model",
"_____no_output_____"
]
],
[
[
"# Split into train and test sets\nvalues = reframed.values\nn_train_hours = 365 * 24\ntrain = values[:n_train_hours, :]\ntest = values[n_train_hours:, :]",
"_____no_output_____"
],
[
"# Split into input and outputs\nn_obs = n_hours * n_features\ntrain_X, train_y = train[:, :n_obs], train[:, -n_features]\ntest_X, test_y = test[:, :n_obs], test[:, -n_features]",
"_____no_output_____"
],
[
"# Reshape input to be 3D [samples, timesteps, features]\ntrain_X = train_X.reshape((train_X.shape[0], n_hours, n_features))\ntest_X = test_X.reshape((test_X.shape[0], n_hours, n_features))",
"_____no_output_____"
],
[
"train_X.shape, train_y.shape, test_X.shape, test_y.shape",
"_____no_output_____"
],
[
"# Design network\nmodel = Sequential()\nmodel.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))\nmodel.add(Dense(1))\nmodel.compile(loss='mae', optimizer='adam')",
"_____no_output_____"
],
[
"# Fit network\nhistory = model.fit(train_X, train_y, \n epochs=50, batch_size=72, \n validation_data=(test_X, test_y), \n verbose=2, shuffle=False)",
"Epoch 1/50\n122/122 - 1s - loss: 0.0541 - val_loss: 0.0851\nEpoch 2/50\n122/122 - 1s - loss: 0.0325 - val_loss: 0.0629\nEpoch 3/50\n122/122 - 0s - loss: 0.0240 - val_loss: 0.0378\nEpoch 4/50\n122/122 - 0s - loss: 0.0211 - val_loss: 0.0243\nEpoch 5/50\n122/122 - 0s - loss: 0.0209 - val_loss: 0.0209\nEpoch 6/50\n122/122 - 0s - loss: 0.0203 - val_loss: 0.0200\nEpoch 7/50\n122/122 - 1s - loss: 0.0199 - val_loss: 0.0194\nEpoch 8/50\n122/122 - 0s - loss: 0.0194 - val_loss: 0.0190\nEpoch 9/50\n122/122 - 0s - loss: 0.0187 - val_loss: 0.0184\nEpoch 10/50\n122/122 - 0s - loss: 0.0181 - val_loss: 0.0182\nEpoch 11/50\n122/122 - 0s - loss: 0.0178 - val_loss: 0.0180\nEpoch 12/50\n122/122 - 0s - loss: 0.0175 - val_loss: 0.0172\nEpoch 13/50\n122/122 - 0s - loss: 0.0168 - val_loss: 0.0176\nEpoch 14/50\n122/122 - 0s - loss: 0.0166 - val_loss: 0.0176\nEpoch 15/50\n122/122 - 0s - loss: 0.0161 - val_loss: 0.0180\nEpoch 16/50\n122/122 - 1s - loss: 0.0159 - val_loss: 0.0181\nEpoch 17/50\n122/122 - 0s - loss: 0.0154 - val_loss: 0.0180\nEpoch 18/50\n122/122 - 1s - loss: 0.0152 - val_loss: 0.0180\nEpoch 19/50\n122/122 - 0s - loss: 0.0150 - val_loss: 0.0184\nEpoch 20/50\n122/122 - 0s - loss: 0.0150 - val_loss: 0.0179\nEpoch 21/50\n122/122 - 1s - loss: 0.0149 - val_loss: 0.0175\nEpoch 22/50\n122/122 - 0s - loss: 0.0147 - val_loss: 0.0172\nEpoch 23/50\n122/122 - 0s - loss: 0.0148 - val_loss: 0.0168\nEpoch 24/50\n122/122 - 0s - loss: 0.0146 - val_loss: 0.0171\nEpoch 25/50\n122/122 - 0s - loss: 0.0146 - val_loss: 0.0168\nEpoch 26/50\n122/122 - 0s - loss: 0.0147 - val_loss: 0.0162\nEpoch 27/50\n122/122 - 0s - loss: 0.0145 - val_loss: 0.0164\nEpoch 28/50\n122/122 - 0s - loss: 0.0146 - val_loss: 0.0155\nEpoch 29/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0159\nEpoch 30/50\n122/122 - 0s - loss: 0.0145 - val_loss: 0.0158\nEpoch 31/50\n122/122 - 0s - loss: 0.0145 - val_loss: 0.0155\nEpoch 32/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0154\nEpoch 33/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0153\nEpoch 34/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0149\nEpoch 35/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0150\nEpoch 36/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0147\nEpoch 37/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0153\nEpoch 38/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0149\nEpoch 39/50\n122/122 - 0s - loss: 0.0145 - val_loss: 0.0145\nEpoch 40/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0144\nEpoch 41/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0146\nEpoch 42/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0146\nEpoch 43/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0144\nEpoch 44/50\n122/122 - 0s - loss: 0.0144 - val_loss: 0.0145\nEpoch 45/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0143\nEpoch 46/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0143\nEpoch 47/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0142\nEpoch 48/50\n122/122 - 0s - loss: 0.0142 - val_loss: 0.0140\nEpoch 49/50\n122/122 - 0s - loss: 0.0143 - val_loss: 0.0139\nEpoch 50/50\n122/122 - 0s - loss: 0.0142 - val_loss: 0.0140\n"
],
[
"# Plot history\npyplot.plot(history.history['loss'], label='train')\npyplot.plot(history.history['val_loss'], label='test')\npyplot.legend()\npyplot.show()",
"_____no_output_____"
]
],
[
[
"## Evaluate model",
"_____no_output_____"
]
],
[
[
"# Make a prediction\nyhat = model.predict(test_X)\ntest_X = test_X.reshape((test_X.shape[0], n_hours*n_features))",
"_____no_output_____"
],
[
"# Invert scaling for forecast\ninv_yhat = concatenate((yhat, test_X[:, -7:]), axis=1)\ninv_yhat = scaler.inverse_transform(inv_yhat)\ninv_yhat = inv_yhat[:, 0]",
"_____no_output_____"
],
[
"# Invert scaling for actual\ntest_y = test_y.reshape(len(test_y), 1)\ninv_y = concatenate((test_y, test_X[:, -7:]), axis=1)\ninv_y = scaler.inverse_transform(inv_y)\ninv_y = inv_y[:, 0]",
"_____no_output_____"
],
[
"# Calculate RMSE\nrmse = sqrt(mean_squared_error(inv_y, inv_yhat))\nrmse",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a9a5104d304f1db9854891ddfb0d031ae9bd698
| 717,598 |
ipynb
|
Jupyter Notebook
|
Train_ppo2-CNN.ipynb
|
pleslabay/finalRL
|
bd03be194163ba81468127ee8ecd8f8c0ca1f8b6
|
[
"MIT"
] | null | null | null |
Train_ppo2-CNN.ipynb
|
pleslabay/finalRL
|
bd03be194163ba81468127ee8ecd8f8c0ca1f8b6
|
[
"MIT"
] | null | null | null |
Train_ppo2-CNN.ipynb
|
pleslabay/finalRL
|
bd03be194163ba81468127ee8ecd8f8c0ca1f8b6
|
[
"MIT"
] | null | null | null | 48.506016 | 2,416 | 0.354138 |
[
[
[
"import gym\n\nfrom stable_baselines.common.policies import MlpPolicy, CnnPolicy, CnnLstmPolicy\nfrom stable_baselines.common import make_vec_env\nfrom stable_baselines import PPO2\n",
"/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
],
[
"agent='CarRacing-v0'\n#agent='MountainCar-v0'\n#agent='Pendulum-v0'\n",
"_____no_output_____"
],
[
"env0 = gym.make(agent)\nprint(env0.action_space)\nprint(env0.observation_space)",
"Box(3,)\nBox(96, 96, 3)\n"
],
[
"try:\n print(env0.action_space.high)\n print(env0.action_space.low)\nexcept:\n print('discrete')\n ",
"[1. 1. 1.]\n[-1. 0. 0.]\n"
],
[
"print(env0.observation_space.high)\nprint(env0.observation_space.low)",
"[[[255 255 255]\n [255 255 255]\n [255 255 255]\n ...\n [255 255 255]\n [255 255 255]\n [255 255 255]]\n\n [[255 255 255]\n [255 255 255]\n [255 255 255]\n ...\n [255 255 255]\n [255 255 255]\n [255 255 255]]\n\n [[255 255 255]\n [255 255 255]\n [255 255 255]\n ...\n [255 255 255]\n [255 255 255]\n [255 255 255]]\n\n ...\n\n [[255 255 255]\n [255 255 255]\n [255 255 255]\n ...\n [255 255 255]\n [255 255 255]\n [255 255 255]]\n\n [[255 255 255]\n [255 255 255]\n [255 255 255]\n ...\n [255 255 255]\n [255 255 255]\n [255 255 255]]\n\n [[255 255 255]\n [255 255 255]\n [255 255 255]\n ...\n [255 255 255]\n [255 255 255]\n [255 255 255]]]\n[[[0 0 0]\n [0 0 0]\n [0 0 0]\n ...\n [0 0 0]\n [0 0 0]\n [0 0 0]]\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]\n ...\n [0 0 0]\n [0 0 0]\n [0 0 0]]\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]\n ...\n [0 0 0]\n [0 0 0]\n [0 0 0]]\n\n ...\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]\n ...\n [0 0 0]\n [0 0 0]\n [0 0 0]]\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]\n ...\n [0 0 0]\n [0 0 0]\n [0 0 0]]\n\n [[0 0 0]\n [0 0 0]\n [0 0 0]\n ...\n [0 0 0]\n [0 0 0]\n [0 0 0]]]\n"
],
[
"env0.close()",
"_____no_output_____"
],
[
"# multiprocess environment\nparalelo=1\n\nif paralelo==1:\n env1 = gym.make(agent)\nelse:\n env1 = make_vec_env(agent, n_envs=paralelo)\n\nobs = env1.reset()\nprint(obs.shape)\n",
"/home/pablo/anaconda3/envs/rl/lib/python3.7/site-packages/gym/logger.py:30: UserWarning: \u001b[33mWARN: Box bound precision lowered by casting to float32\u001b[0m\n warnings.warn(colorize('%s: %s'%('WARN', msg % args), 'yellow'))\n"
],
[
"batch=512\nupdates=1024\ncont=True\n\nif not cont:\n model = PPO2(CnnPolicy, env1, verbose=1, n_steps=batch, n_cpu_tf_sess=2)#, cliprange_vf=-1, nminibatches=paralelo)\n\nmodel.learn(total_timesteps=updates*batch*paralelo)\n",
"Track generation: 1358..1702 -> 344-tiles track\n------------------------------------\n| approxkl | 30.452763 |\n| clipfrac | 0.6582031 |\n| explained_variance | -8.46 |\n| fps | 10 |\n| n_updates | 1 |\n| policy_entropy | 4.414331 |\n| policy_loss | 0.017552488 |\n| serial_timesteps | 512 |\n| time_elapsed | 1.57e-05 |\n| total_timesteps | 512 |\n| value_loss | 395.07672 |\n------------------------------------\n-----------------------------------\n| approxkl | 0.08196479 |\n| clipfrac | 0.50097656 |\n| explained_variance | -5.55 |\n| fps | 10 |\n| n_updates | 2 |\n| policy_entropy | 4.4140873 |\n| policy_loss | 0.05990212 |\n| serial_timesteps | 1024 |\n| time_elapsed | 50.8 |\n| total_timesteps | 1024 |\n| value_loss | 46.911892 |\n-----------------------------------\nTrack generation: 1112..1394 -> 282-tiles track\n------------------------------------\n| approxkl | 0.12324326 |\n| clipfrac | 0.58203125 |\n| explained_variance | -0.131 |\n| fps | 9 |\n| n_updates | 3 |\n| policy_entropy | 4.4145885 |\n| policy_loss | 0.051527593 |\n| serial_timesteps | 1536 |\n| time_elapsed | 101 |\n| total_timesteps | 1536 |\n| value_loss | 54.0653 |\n------------------------------------\n-----------------------------------\n| approxkl | 0.07969497 |\n| clipfrac | 0.53515625 |\n| explained_variance | -3.28 |\n| fps | 8 |\n| n_updates | 4 |\n| policy_entropy | 4.4154415 |\n| policy_loss | 0.06186733 |\n| serial_timesteps | 2048 |\n| time_elapsed | 155 |\n| total_timesteps | 2048 |\n| value_loss | 27.371786 |\n-----------------------------------\nTrack generation: 1160..1454 -> 294-tiles track\n------------------------------------\n| approxkl | 0.08322383 |\n| clipfrac | 0.5175781 |\n| explained_variance | -0.166 |\n| fps | 8 |\n| n_updates | 5 |\n| policy_entropy | 4.4171557 |\n| policy_loss | 0.036015417 |\n| serial_timesteps | 2560 |\n| time_elapsed | 215 |\n| total_timesteps | 2560 |\n| value_loss | 35.373856 |\n------------------------------------\n------------------------------------\n| approxkl | 0.024412131 |\n| clipfrac | 0.25048828 |\n| explained_variance | -3.67 |\n| fps | 8 |\n| n_updates | 6 |\n| policy_entropy | 4.412349 |\n| policy_loss | 0.010634458 |\n| serial_timesteps | 3072 |\n| time_elapsed | 277 |\n| total_timesteps | 3072 |\n| value_loss | 19.557426 |\n------------------------------------\nTrack generation: 1175..1473 -> 298-tiles track\n------------------------------------\n| approxkl | 0.05419058 |\n| clipfrac | 0.41845703 |\n| explained_variance | -0.496 |\n| fps | 7 |\n| n_updates | 7 |\n| policy_entropy | 4.413053 |\n| policy_loss | 0.031729143 |\n| serial_timesteps | 3584 |\n| time_elapsed | 336 |\n| total_timesteps | 3584 |\n| value_loss | 25.909788 |\n------------------------------------\n------------------------------------\n| approxkl | 0.06438479 |\n| clipfrac | 0.44677734 |\n| explained_variance | -1.92 |\n| fps | 10 |\n| n_updates | 8 |\n| policy_entropy | 4.4194098 |\n| policy_loss | 0.030053284 |\n| serial_timesteps | 4096 |\n| time_elapsed | 406 |\n| total_timesteps | 4096 |\n| value_loss | 13.552184 |\n------------------------------------\nTrack generation: 1104..1388 -> 284-tiles track\nretry to generate track (normal if there are not many of this messages)\nTrack generation: 1189..1490 -> 301-tiles track\n------------------------------------\n| approxkl | 0.022072453 |\n| clipfrac | 0.28222656 |\n| explained_variance | -0.51 |\n| fps | 15 |\n| n_updates | 9 |\n| policy_entropy | 4.4243546 |\n| policy_loss | 0.014426061 |\n| serial_timesteps | 4608 |\n| time_elapsed | 457 |\n| total_timesteps | 4608 |\n| value_loss | 17.271883 |\n------------------------------------\n------------------------------------\n| approxkl | 0.03974934 |\n| clipfrac | 0.3696289 |\n| explained_variance | -5.5 |\n| fps | 16 |\n| n_updates | 10 |\n| policy_entropy | 4.423881 |\n| policy_loss | 0.030785115 |\n| serial_timesteps | 5120 |\n| time_elapsed | 489 |\n| total_timesteps | 5120 |\n| value_loss | 12.561057 |\n------------------------------------\nTrack generation: 1191..1492 -> 301-tiles track\n-------------------------------------\n| approxkl | 0.030279512 |\n| clipfrac | 0.2836914 |\n| explained_variance | -0.175 |\n| fps | 16 |\n| n_updates | 11 |\n| policy_entropy | 4.4235 |\n| policy_loss | 0.0046181697 |\n| serial_timesteps | 5632 |\n| time_elapsed | 521 |\n| total_timesteps | 5632 |\n| value_loss | 13.519143 |\n-------------------------------------\n-------------------------------------\n| approxkl | 0.027909067 |\n| clipfrac | 0.3100586 |\n| explained_variance | -0.604 |\n| fps | 16 |\n| n_updates | 12 |\n| policy_entropy | 4.4210453 |\n| policy_loss | 0.0058654537 |\n| serial_timesteps | 6144 |\n| time_elapsed | 552 |\n| total_timesteps | 6144 |\n| value_loss | 9.549747 |\n-------------------------------------\nTrack generation: 1238..1552 -> 314-tiles track\nretry to generate track (normal if there are not many of this messages)\nTrack generation: 1103..1382 -> 279-tiles track\n------------------------------------\n| approxkl | 0.053726386 |\n| clipfrac | 0.43310547 |\n| explained_variance | -0.107 |\n| fps | 17 |\n| n_updates | 13 |\n| policy_entropy | 4.4174304 |\n| policy_loss | 0.03069439 |\n| serial_timesteps | 6656 |\n| time_elapsed | 584 |\n| total_timesteps | 6656 |\n| value_loss | 10.954506 |\n------------------------------------\n------------------------------------\n| approxkl | 0.062089637 |\n| clipfrac | 0.4169922 |\n| explained_variance | -0.296 |\n| fps | 16 |\n| n_updates | 14 |\n| policy_entropy | 4.4227204 |\n| policy_loss | 0.031849213 |\n| serial_timesteps | 7168 |\n| time_elapsed | 614 |\n| total_timesteps | 7168 |\n| value_loss | 8.155254 |\n------------------------------------\nTrack generation: 1151..1443 -> 292-tiles track\n------------------------------------\n| approxkl | 0.07581169 |\n| clipfrac | 0.46484375 |\n| explained_variance | -0.12 |\n| fps | 16 |\n| n_updates | 15 |\n| policy_entropy | 4.42677 |\n| policy_loss | 0.036599256 |\n| serial_timesteps | 7680 |\n| time_elapsed | 644 |\n| total_timesteps | 7680 |\n| value_loss | 9.95517 |\n------------------------------------\n------------------------------------\n| approxkl | 0.026541503 |\n| clipfrac | 0.33203125 |\n| explained_variance | -0.242 |\n| fps | 16 |\n| n_updates | 16 |\n| policy_entropy | 4.4303145 |\n| policy_loss | 0.00642135 |\n| serial_timesteps | 8192 |\n| time_elapsed | 674 |\n| total_timesteps | 8192 |\n| value_loss | 7.982852 |\n------------------------------------\n"
],
[
"import pickle\n\nmodel.save(\"ppo2_cnn_agentII\", cloudpickle=True)\nparam_list=model.get_parameter_list()\nparam=model.get_parameters()\n\nf = open(\"ppo2_cnn_agentII_param.pkl\",\"wb\")\n#pickle.dump(param_list,f)\npickle.dump(param,f)\nf.close()\n\nenv1.close()",
"_____no_output_____"
],
[
"# Enjoy trained agent\nenv2 = gym.make(agent)\n\nif paralelo==1:\n obs=env2.reset()\nelse:\n import numpy as np\n obs=np.empty([paralelo,96,96,3])\n for i in range(paralelo):\n obs[i,:,:,:] = env2.reset()\n\n \ndone = False\npasos = 0\n_states=None\n\nwhile not done and pasos<1000:\n action, _states = model.predict(obs,_states)\n obs, reward, done, info = env2.step(action)\n env2.render()\n pasos+=1\n\n \nenv2.close()",
"Track generation: 1155..1448 -> 293-tiles track\n"
],
[
"print(reward, done, info, pasos)",
"-0.10000000000000142 False {} 691\n"
],
[
"print(action, _states)",
"[-1. 0. 0.] None\n"
],
[
"obs.shape",
"_____no_output_____"
],
[
"env2.close()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9a5167d44723ee72e6018cd2e57c553889d74b
| 5,140 |
ipynb
|
Jupyter Notebook
|
_downloads/3e284c0b2569d71a0b84598b36c8d3c6/plot_multiview_construction.ipynb
|
mvlearn/mvlearn.github.io
|
0dd5940ca2e08aee1bd31ce50a896db11b20eafd
|
[
"MIT"
] | null | null | null |
_downloads/3e284c0b2569d71a0b84598b36c8d3c6/plot_multiview_construction.ipynb
|
mvlearn/mvlearn.github.io
|
0dd5940ca2e08aee1bd31ce50a896db11b20eafd
|
[
"MIT"
] | null | null | null |
_downloads/3e284c0b2569d71a0b84598b36c8d3c6/plot_multiview_construction.ipynb
|
mvlearn/mvlearn.github.io
|
0dd5940ca2e08aee1bd31ce50a896db11b20eafd
|
[
"MIT"
] | null | null | null | 95.185185 | 2,481 | 0.718482 |
[
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# Constructing multiple views to classify singleview data\n\nAs demonstrated in \"Asymmetric bagging and random subspace for support vector\nmachines-based relevance feedback in image retrieval\" (Dacheng 2006), in high\ndimensional data it can be useful to subsample the features and construct\nmultiple classifiers on each subsample whose individual predictions are\ncombined using majority vote. This is akin to bagging but concerns the\nfeatures rather than samples and is how random forests are ensembled\nfrom individual decision trees. Here, we apply Linear Discriminant Analysis\n(LDA) to a high dimensional image classification problem and demonstrate\nhow subsampling features can help when the sample size is relatively low.\n\nA variety of possible subsample dimensions are considered, and for each the\nnumber of classifiers (views) is chosen such that their product is equal to\nthe number of features in the singleview data.\n\nTwo subsampling methods are applied. The random subspace method simply selects\na random subset of the features. The random Gaussian projection method creates\nnew features by sampling random multivariate Gaussian vectors used to project\nthe original features. The latter method can potentially help in complicated\nsettings where combinations of features better capture informative relations.\n\nIt is clear that subsampling features in this setting leads to improved\nout-of-sample accuracy, most likely as it reduces overfitting to the large\nnumber of raw features. This is confirmed as the accuracy seems to peak\naround when the number of features is equal to the number of samples, at which\npoint overfitting becomes possible.\n",
"_____no_output_____"
]
],
[
[
"# Author: Ronan Perry\n# License: MIT\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import cross_val_score, ShuffleSplit\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom mvlearn.compose import RandomSubspaceMethod, RandomGaussianProjection, \\\n ViewClassifier\n\n# Load the singleview Olivevetti faces dataset from sklearn\nX, y = fetch_olivetti_faces(return_X_y=True)\n\n# The data has 4096 features. The following subspace dimensions are used\ndims = [16, 64, 256, 1024]\n\n# We are interested in when the low sample size, high dimensionality setting\ntrain_size = 0.2\nrsm_scores = []\nrgp_scores = []\n\n# Initialze cross validation\nsplitter = ShuffleSplit(n_splits=5, train_size=train_size, random_state=0)\n\n# Compute singleview score, using all dimensions\nsingleview_clf = make_pipeline(StandardScaler(), LinearDiscriminantAnalysis())\nsingleview_scores = cross_val_score(singleview_clf, X, y, cv=splitter)\n\n# For each dimension, we compute scores for a multiview classifier\nfor dim in dims:\n n_views = int(X.shape[1] / dim)\n\n rsm_clf = make_pipeline(\n StandardScaler(),\n RandomSubspaceMethod(n_views=n_views, subspace_dim=dim),\n ViewClassifier(LinearDiscriminantAnalysis())\n )\n rsm_scores.append(cross_val_score(rsm_clf, X, y, cv=splitter))\n\n rgp_clf = make_pipeline(\n StandardScaler(),\n RandomGaussianProjection(n_views=n_views, n_components=dim),\n ViewClassifier(LinearDiscriminantAnalysis())\n )\n rgp_scores.append(cross_val_score(rgp_clf, X, y, cv=splitter))\n\n# The results are plotted\nfig, ax = plt.subplots()\nax.axvline(X.shape[0] * train_size, ls=':', c='grey',\n label='Number of training samples')\nax.axhline(np.mean(singleview_scores), ls='--', c='grey',\n label='LDA singleview score')\nax.errorbar(\n dims, np.mean(rsm_scores, axis=1),\n yerr=np.std(rsm_scores, axis=1), label='LDA o Random Subspace')\nax.errorbar(\n dims, np.mean(rgp_scores, axis=1),\n yerr=np.std(rgp_scores, axis=1), label='LDA o Random Gaussian Projection')\nax.set_xlabel('Number of subsampled dimensions')\nax.set_ylabel('Score')\nplt.title('Classification accuracy using constructed multiview data')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9a55ab7a4da8e0e561b45c484496b62ea9fa5e
| 271,527 |
ipynb
|
Jupyter Notebook
|
SurfsUp_Challenge.ipynb
|
KathiaF/surfs_up
|
62f2bf03c775efe8308c71f8b04737bd11e0bc6a
|
[
"MIT"
] | null | null | null |
SurfsUp_Challenge.ipynb
|
KathiaF/surfs_up
|
62f2bf03c775efe8308c71f8b04737bd11e0bc6a
|
[
"MIT"
] | null | null | null |
SurfsUp_Challenge.ipynb
|
KathiaF/surfs_up
|
62f2bf03c775efe8308c71f8b04737bd11e0bc6a
|
[
"MIT"
] | null | null | null | 69.038139 | 37,410 | 0.529969 |
[
[
[
"# Dependencies\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\n%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt\n\n# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func",
"_____no_output_____"
],
[
"engine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station",
"_____no_output_____"
],
[
"# Create our session (link) from Python to the DB\nsession = Session(engine)",
"_____no_output_____"
]
],
[
[
"## D1: Determine the Summary Statistics for June",
"_____no_output_____"
]
],
[
[
"# 1. Import the sqlalchemy extract function.\nfrom sqlalchemy import extract\n\n# 2. Write a query that filters the Measurement table to retrieve the temperatures for the month of June. \nresults = session.query(Measurement.date, Measurement.tobs).filter(extract('month', Measurement.date) == 6)",
"_____no_output_____"
],
[
"# 3. Convert the June temperatures to a list.\njune = []\njune = session.query(Measurement.date, Measurement.tobs).filter(extract('month', Measurement.date) == 6).all()\nprint(june)",
"[('2010-06-01', 78.0), ('2010-06-02', 76.0), ('2010-06-03', 78.0), ('2010-06-04', 76.0), ('2010-06-05', 77.0), ('2010-06-06', 78.0), ('2010-06-07', 77.0), ('2010-06-08', 78.0), ('2010-06-09', 78.0), ('2010-06-10', 79.0), ('2010-06-11', 78.0), ('2010-06-12', 78.0), ('2010-06-13', 78.0), ('2010-06-14', 77.0), ('2010-06-15', 78.0), ('2010-06-16', 78.0), ('2010-06-17', 77.0), ('2010-06-18', 77.0), ('2010-06-19', 82.0), ('2010-06-20', 78.0), ('2010-06-21', 78.0), ('2010-06-22', 78.0), ('2010-06-23', 78.0), ('2010-06-24', 78.0), ('2010-06-25', 77.0), ('2010-06-26', 76.0), ('2010-06-27', 78.0), ('2010-06-28', 78.0), ('2010-06-29', 78.0), ('2010-06-30', 78.0), ('2011-06-01', 77.0), ('2011-06-02', 78.0), ('2011-06-03', 73.0), ('2011-06-04', 70.0), ('2011-06-05', 76.0), ('2011-06-06', 77.0), ('2011-06-07', 77.0), ('2011-06-08', 77.0), ('2011-06-09', 77.0), ('2011-06-10', 78.0), ('2011-06-11', 77.0), ('2011-06-12', 77.0), ('2011-06-13', 78.0), ('2011-06-14', 78.0), ('2011-06-15', 77.0), ('2011-06-17', 78.0), ('2011-06-18', 78.0), ('2011-06-19', 75.0), ('2011-06-20', 76.0), ('2011-06-21', 76.0), ('2011-06-24', 77.0), ('2011-06-25', 78.0), ('2011-06-26', 76.0), ('2011-06-27', 77.0), ('2011-06-28', 75.0), ('2011-06-29', 77.0), ('2012-06-01', 76.0), ('2012-06-02', 76.0), ('2012-06-03', 75.0), ('2012-06-04', 77.0), ('2012-06-05', 77.0), ('2012-06-06', 76.0), ('2012-06-07', 78.0), ('2012-06-08', 77.0), ('2012-06-09', 76.0), ('2012-06-10', 77.0), ('2012-06-11', 76.0), ('2012-06-12', 76.0), ('2012-06-13', 77.0), ('2012-06-14', 77.0), ('2012-06-15', 78.0), ('2012-06-16', 77.0), ('2012-06-17', 77.0), ('2012-06-18', 77.0), ('2012-06-19', 76.0), ('2012-06-20', 75.0), ('2012-06-21', 76.0), ('2012-06-22', 76.0), ('2012-06-23', 77.0), ('2012-06-24', 76.0), ('2012-06-25', 73.0), ('2012-06-26', 73.0), ('2012-06-27', 75.0), ('2012-06-28', 77.0), ('2012-06-29', 76.0), ('2012-06-30', 77.0), ('2013-06-01', 79.0), ('2013-06-02', 78.0), ('2013-06-03', 79.0), ('2013-06-04', 78.0), ('2013-06-05', 78.0), ('2013-06-06', 79.0), ('2013-06-07', 77.0), ('2013-06-08', 77.0), ('2013-06-09', 77.0), ('2013-06-10', 77.0), ('2013-06-11', 77.0), ('2013-06-12', 78.0), ('2013-06-13', 77.0), ('2013-06-14', 76.0), ('2013-06-15', 78.0), ('2013-06-16', 78.0), ('2013-06-17', 77.0), ('2013-06-18', 78.0), ('2013-06-19', 78.0), ('2013-06-20', 73.0), ('2013-06-21', 76.0), ('2013-06-22', 77.0), ('2013-06-23', 75.0), ('2013-06-24', 76.0), ('2013-06-25', 74.0), ('2013-06-26', 75.0), ('2013-06-27', 77.0), ('2013-06-28', 76.0), ('2013-06-29', 77.0), ('2013-06-30', 78.0), ('2014-06-01', 79.0), ('2014-06-02', 77.0), ('2014-06-03', 78.0), ('2014-06-04', 79.0), ('2014-06-05', 78.0), ('2014-06-06', 78.0), ('2014-06-07', 78.0), ('2014-06-08', 78.0), ('2014-06-09', 79.0), ('2014-06-10', 79.0), ('2014-06-11', 82.0), ('2014-06-12', 80.0), ('2014-06-13', 79.0), ('2014-06-14', 79.0), ('2014-06-15', 78.0), ('2014-06-16', 78.0), ('2014-06-17', 77.0), ('2014-06-18', 78.0), ('2014-06-19', 78.0), ('2014-06-20', 73.0), ('2014-06-21', 75.0), ('2014-06-22', 75.0), ('2014-06-23', 76.0), ('2014-06-24', 84.0), ('2014-06-25', 79.0), ('2014-06-26', 76.0), ('2014-06-27', 79.0), ('2014-06-28', 79.0), ('2014-06-29', 77.0), ('2014-06-30', 79.0), ('2015-06-01', 78.0), ('2015-06-02', 78.0), ('2015-06-03', 79.0), ('2015-06-04', 77.0), ('2015-06-05', 79.0), ('2015-06-06', 78.0), ('2015-06-07', 78.0), ('2015-06-08', 78.0), ('2015-06-09', 79.0), ('2015-06-10', 77.0), ('2015-06-11', 78.0), ('2015-06-12', 78.0), ('2015-06-13', 78.0), ('2015-06-14', 78.0), ('2015-06-15', 79.0), ('2015-06-16', 79.0), ('2015-06-17', 77.0), ('2015-06-18', 78.0), ('2015-06-19', 78.0), ('2015-06-20', 78.0), ('2015-06-21', 79.0), ('2015-06-22', 79.0), ('2015-06-23', 77.0), ('2015-06-24', 79.0), ('2015-06-25', 79.0), ('2015-06-26', 79.0), ('2015-06-27', 81.0), ('2015-06-28', 76.0), ('2015-06-29', 80.0), ('2015-06-30', 78.0), ('2016-06-01', 76.0), ('2016-06-02', 71.0), ('2016-06-03', 71.0), ('2016-06-04', 74.0), ('2016-06-05', 76.0), ('2016-06-06', 78.0), ('2016-06-07', 77.0), ('2016-06-08', 77.0), ('2016-06-09', 73.0), ('2016-06-10', 77.0), ('2016-06-11', 78.0), ('2016-06-12', 78.0), ('2016-06-13', 80.0), ('2016-06-14', 79.0), ('2016-06-15', 78.0), ('2016-06-16', 77.0), ('2016-06-17', 76.0), ('2016-06-18', 77.0), ('2016-06-19', 77.0), ('2016-06-20', 78.0), ('2016-06-21', 80.0), ('2016-06-22', 79.0), ('2016-06-23', 79.0), ('2016-06-24', 77.0), ('2016-06-25', 78.0), ('2016-06-26', 79.0), ('2016-06-27', 80.0), ('2016-06-28', 76.0), ('2016-06-29', 79.0), ('2016-06-30', 80.0), ('2017-06-01', 79.0), ('2017-06-02', 79.0), ('2017-06-03', 79.0), ('2017-06-04', 79.0), ('2017-06-05', 80.0), ('2017-06-06', 79.0), ('2017-06-07', 79.0), ('2017-06-08', 80.0), ('2017-06-09', 80.0), ('2017-06-10', 77.0), ('2017-06-11', 79.0), ('2017-06-12', 83.0), ('2017-06-13', 80.0), ('2017-06-14', 80.0), ('2017-06-15', 78.0), ('2017-06-16', 79.0), ('2017-06-17', 80.0), ('2017-06-18', 77.0), ('2017-06-19', 80.0), ('2017-06-20', 78.0), ('2017-06-21', 79.0), ('2017-06-22', 80.0), ('2017-06-23', 78.0), ('2017-06-24', 80.0), ('2017-06-25', 80.0), ('2017-06-26', 81.0), ('2017-06-27', 80.0), ('2017-06-28', 79.0), ('2017-06-29', 79.0), ('2017-06-30', 75.0), ('2010-06-01', 74.0), ('2010-06-02', 76.0), ('2010-06-03', 75.0), ('2010-06-04', 75.0), ('2010-06-05', 74.0), ('2010-06-06', 75.0), ('2010-06-07', 75.0), ('2010-06-08', 75.0), ('2010-06-09', 75.0), ('2010-06-10', 75.0), ('2010-06-11', 83.0), ('2010-06-12', 75.0), ('2010-06-13', 76.0), ('2010-06-14', 73.0), ('2010-06-15', 74.0), ('2010-06-16', 74.0), ('2010-06-17', 75.0), ('2010-06-18', 70.0), ('2010-06-19', 74.0), ('2010-06-20', 75.0), ('2010-06-21', 75.0), ('2010-06-22', 75.0), ('2010-06-23', 75.0), ('2010-06-24', 75.0), ('2010-06-25', 74.0), ('2010-06-26', 72.0), ('2010-06-27', 76.0), ('2010-06-28', 73.0), ('2010-06-29', 75.0), ('2010-06-30', 76.0), ('2011-06-01', 74.0), ('2011-06-02', 75.0), ('2011-06-03', 71.0), ('2011-06-04', 69.0), ('2011-06-05', 69.0), ('2011-06-06', 74.0), ('2011-06-07', 71.0), ('2011-06-08', 74.0), ('2011-06-09', 74.0), ('2011-06-10', 75.0), ('2011-06-11', 73.0), ('2011-06-12', 73.0), ('2011-06-13', 73.0), ('2011-06-14', 75.0), ('2011-06-15', 75.0), ('2011-06-16', 74.0), ('2011-06-17', 74.0), ('2011-06-18', 75.0), ('2011-06-19', 74.0), ('2011-06-20', 73.0), ('2011-06-21', 72.0), ('2011-06-22', 74.0), ('2011-06-23', 72.0), ('2011-06-24', 75.0), ('2011-06-25', 76.0), ('2011-06-26', 74.0), ('2011-06-27', 74.0), ('2011-06-28', 74.0), ('2011-06-29', 74.0), ('2011-06-30', 74.0), ('2012-06-01', 71.0), ('2012-06-02', 71.0), ('2012-06-03', 75.0), ('2012-06-04', 72.0), ('2012-06-05', 72.0), ('2012-06-06', 73.0), ('2012-06-07', 73.0), ('2012-06-08', 73.0), ('2012-06-09', 74.0), ('2012-06-10', 73.0), ('2012-06-11', 72.0), ('2012-06-12', 72.0), ('2012-06-13', 70.0), ('2012-06-14', 72.0), ('2012-06-15', 72.0), ('2012-06-16', 72.0), ('2012-06-17', 74.0), ('2012-06-18', 73.0), ('2012-06-19', 73.0), ('2012-06-20', 70.0), ('2012-06-21', 73.0), ('2012-06-22', 72.0), ('2012-06-23', 73.0), ('2012-06-24', 69.0), ('2012-06-25', 72.0), ('2012-06-26', 71.0), ('2012-06-27', 71.0), ('2012-06-28', 73.0), ('2012-06-29', 73.0), ('2012-06-30', 72.0), ('2013-06-01', 75.0), ('2013-06-02', 75.0), ('2013-06-03', 75.0), ('2013-06-04', 75.0), ('2013-06-05', 74.0), ('2013-06-06', 74.0), ('2013-06-07', 74.0), ('2013-06-08', 75.0), ('2013-06-09', 75.0), ('2013-06-10', 74.0), ('2013-06-11', 75.0), ('2013-06-12', 75.0), ('2013-06-13', 74.0), ('2013-06-14', 72.0), ('2013-06-15', 74.0), ('2013-06-16', 74.0), ('2013-06-17', 74.0), ('2013-06-18', 74.0), ('2013-06-19', 74.0), ('2013-06-20', 72.0), ('2013-06-21', 70.0), ('2013-06-22', 75.0), ('2013-06-23', 74.0), ('2013-06-24', 71.0), ('2013-06-25', 71.0), ('2013-06-26', 70.0), ('2013-06-27', 73.0), ('2013-06-28', 71.0), ('2013-06-29', 71.0), ('2013-06-30', 77.0), ('2014-06-01', 77.0), ('2014-06-02', 74.0), ('2014-06-03', 73.0), ('2014-06-04', 75.0), ('2014-06-05', 75.0), ('2014-06-06', 74.0), ('2014-06-07', 74.0), ('2014-06-08', 75.0), ('2014-06-09', 75.0), ('2014-06-10', 72.0), ('2014-06-11', 74.0), ('2014-06-12', 76.0), ('2014-06-13', 76.0), ('2014-06-14', 75.0), ('2014-06-15', 75.0), ('2014-06-16', 74.0), ('2014-06-17', 72.0), ('2014-06-18', 74.0), ('2014-06-19', 75.0), ('2014-06-20', 70.0), ('2014-06-21', 71.0), ('2014-06-22', 72.0), ('2014-06-23', 71.0), ('2014-06-24', 73.0), ('2014-06-25', 74.0), ('2014-06-26', 72.0), ('2014-06-27', 74.0), ('2014-06-28', 76.0), ('2014-06-29', 72.0), ('2014-06-30', 75.0), ('2015-06-03', 75.0), ('2015-06-04', 72.0), ('2015-06-05', 70.0), ('2015-06-06', 76.0), ('2015-06-07', 78.0), ('2015-06-08', 72.0), ('2015-06-09', 74.0), ('2015-06-10', 74.0), ('2015-06-11', 72.0), ('2015-06-12', 71.0), ('2015-06-13', 79.0), ('2015-06-14', 79.0), ('2015-06-15', 76.0), ('2015-06-16', 75.0), ('2015-06-17', 72.0), ('2015-06-18', 74.0), ('2015-06-19', 76.0), ('2015-06-20', 76.0), ('2015-06-21', 77.0), ('2015-06-22', 75.0), ('2015-06-23', 76.0), ('2015-06-24', 75.0), ('2015-06-25', 75.0), ('2015-06-26', 76.0), ('2015-06-27', 76.0), ('2015-06-29', 74.0), ('2016-06-01', 70.0), ('2016-06-02', 69.0), ('2016-06-03', 70.0), ('2016-06-04', 68.0), ('2016-06-05', 70.0), ('2016-06-06', 74.0), ('2016-06-07', 75.0), ('2016-06-08', 74.0), ('2016-06-09', 71.0), ('2016-06-10', 73.0), ('2016-06-11', 75.0), ('2016-06-12', 74.0), ('2016-06-13', 76.0), ('2016-06-14', 76.0), ('2016-06-15', 76.0), ('2016-06-16', 76.0), ('2016-06-17', 75.0), ('2016-06-18', 75.0), ('2016-06-19', 75.0), ('2016-06-20', 74.0), ('2016-06-21', 73.0), ('2016-06-22', 76.0), ('2016-06-23', 74.0), ('2016-06-24', 78.0), ('2016-06-25', 71.0), ('2016-06-26', 75.0), ('2016-06-27', 75.0), ('2016-06-28', 74.0), ('2016-06-29', 77.0), ('2016-06-30', 77.0), ('2017-06-01', 76.0), ('2017-06-02', 76.0), ('2017-06-03', 76.0), ('2017-06-04', 78.0), ('2017-06-05', 76.0), ('2017-06-06', 75.0), ('2017-06-07', 75.0), ('2017-06-08', 76.0), ('2017-06-09', 78.0), ('2017-06-10', 75.0), ('2017-06-11', 73.0), ('2017-06-12', 76.0), ('2017-06-13', 76.0), ('2017-06-14', 76.0), ('2017-06-15', 77.0), ('2017-06-16', 76.0), ('2017-06-17', 77.0), ('2017-06-18', 72.0), ('2017-06-19', 77.0), ('2017-06-20', 75.0), ('2017-06-21', 82.0), ('2017-06-22', 77.0), ('2017-06-23', 76.0), ('2017-06-24', 74.0), ('2017-06-25', 73.0), ('2017-06-26', 77.0), ('2017-06-27', 77.0), ('2017-06-28', 77.0), ('2017-06-29', 76.0), ('2017-06-30', 74.0), ('2010-06-01', 73.0), ('2010-06-02', 72.0), ('2010-06-03', 74.0), ('2010-06-04', 73.0), ('2010-06-05', 72.0), ('2010-06-06', 75.0), ('2010-06-07', 74.0), ('2010-06-08', 75.0), ('2010-06-09', 73.0), ('2010-06-10', 75.0), ('2010-06-11', 76.0), ('2010-06-12', 74.0), ('2010-06-13', 74.0), ('2010-06-14', 73.0), ('2010-06-15', 73.0), ('2010-06-16', 76.0), ('2010-06-17', 75.0), ('2010-06-18', 70.0), ('2010-06-19', 75.0), ('2010-06-20', 73.0), ('2010-06-21', 74.0), ('2010-06-22', 74.0), ('2010-06-23', 74.0), ('2010-06-24', 74.0), ('2010-06-25', 70.0), ('2010-06-27', 75.0), ('2010-06-28', 74.0), ('2010-06-29', 73.0), ('2010-06-30', 74.0), ('2011-06-01', 72.0), ('2011-06-02', 73.0), ('2011-06-03', 67.0), ('2011-06-04', 69.0), ('2011-06-05', 76.0), ('2011-06-06', 73.0), ('2011-06-07', 70.0), ('2011-06-08', 72.0), ('2011-06-09', 74.0), ('2011-06-10', 72.0), ('2011-06-13', 72.0), ('2011-06-14', 75.0), ('2011-06-15', 73.0), ('2011-06-16', 73.0), ('2011-06-17', 72.0), ('2011-06-18', 74.0), ('2011-06-19', 76.0), ('2011-06-21', 72.0), ('2011-06-22', 74.0), ('2011-06-23', 70.0), ('2011-06-24', 74.0), ('2011-06-25', 74.0), ('2011-06-27', 74.0), ('2011-06-28', 71.0), ('2012-06-21', 75.0), ('2012-06-22', 75.0), ('2012-06-24', 76.0), ('2012-06-25', 77.0), ('2012-06-26', 75.0), ('2012-06-27', 74.0), ('2012-06-28', 75.0), ('2012-06-29', 76.0), ('2012-06-30', 78.0), ('2013-06-03', 78.0), ('2013-06-04', 76.0), ('2013-06-05', 78.0), ('2013-06-06', 76.0), ('2013-06-07', 76.0), ('2013-06-08', 78.0), ('2013-06-09', 79.0), ('2013-06-10', 77.0), ('2013-06-11', 76.0), ('2013-06-12', 77.0), ('2013-06-13', 77.0), ('2013-06-14', 76.0), ('2013-06-16', 78.0), ('2013-06-17', 76.0), ('2013-06-18', 76.0), ('2013-06-19', 77.0), ('2013-06-20', 71.0), ('2013-06-25', 74.0), ('2013-06-26', 72.0), ('2013-06-27', 77.0), ('2014-06-01', 77.0), ('2014-06-02', 78.0), ('2014-06-03', 77.0), ('2014-06-04', 77.0), ('2014-06-05', 76.0), ('2014-06-06', 75.0), ('2014-06-07', 79.0), ('2014-06-09', 75.0), ('2014-06-10', 73.0), ('2014-06-11', 76.0), ('2014-06-12', 78.0), ('2014-06-13', 77.0), ('2014-06-14', 78.0), ('2014-06-16', 76.0), ('2014-06-17', 75.0), ('2014-06-18', 76.0), ('2014-06-19', 77.0), ('2014-06-20', 74.0), ('2014-06-21', 78.0), ('2014-06-23', 77.0), ('2014-06-24', 77.0), ('2014-06-25', 74.0), ('2014-06-26', 74.0), ('2014-06-27', 78.0), ('2014-06-29', 79.0), ('2014-06-30', 79.0), ('2015-06-01', 78.0), ('2015-06-03', 75.0), ('2015-06-04', 76.0), ('2015-06-05', 78.0), ('2015-06-06', 78.0), ('2015-06-08', 74.0), ('2015-06-09', 78.0), ('2015-06-10', 77.0), ('2015-06-11', 76.0), ('2015-06-12', 78.0), ('2015-06-14', 78.0), ('2015-06-15', 78.0), ('2015-06-16', 78.0), ('2015-06-17', 74.0), ('2015-06-18', 79.0), ('2015-06-19', 77.0), ('2015-06-20', 73.0), ('2015-06-21', 79.0), ('2015-06-22', 79.0), ('2015-06-23', 78.0), ('2015-06-24', 77.0), ('2015-06-25', 78.0), ('2015-06-26', 78.0), ('2015-06-27', 80.0), ('2015-06-28', 80.0), ('2015-06-29', 79.0), ('2015-06-30', 74.0), ('2016-06-01', 71.0), ('2016-06-02', 74.0), ('2016-06-03', 75.0), ('2016-06-07', 75.0), ('2016-06-08', 74.0), ('2016-06-09', 76.0), ('2016-06-10', 75.0), ('2016-06-11', 77.0), ('2016-06-12', 77.0), ('2016-06-13', 79.0), ('2016-06-14', 79.0), ('2016-06-15', 78.0), ('2016-06-16', 79.0), ('2016-06-17', 73.0), ('2016-06-18', 78.0), ('2016-06-19', 79.0), ('2016-06-20', 80.0), ('2016-06-21', 76.0), ('2016-06-22', 79.0), ('2016-06-23', 79.0), ('2016-06-24', 77.0), ('2016-06-25', 80.0), ('2016-06-26', 79.0), ('2016-06-27', 78.0), ('2016-06-28', 78.0), ('2016-06-29', 79.0), ('2016-06-30', 78.0), ('2017-06-01', 81.0), ('2017-06-02', 78.0), ('2017-06-03', 80.0), ('2017-06-04', 79.0), ('2017-06-05', 81.0), ('2017-06-06', 80.0), ('2017-06-07', 81.0), ('2017-06-08', 79.0), ('2017-06-09', 81.0), ('2017-06-10', 78.0), ('2017-06-11', 79.0), ('2017-06-12', 79.0), ('2017-06-13', 75.0), ('2017-06-14', 79.0), ('2017-06-15', 78.0), ('2017-06-16', 80.0), ('2017-06-17', 78.0), ('2017-06-18', 77.0), ('2017-06-19', 78.0), ('2017-06-20', 79.0), ('2017-06-23', 76.0), ('2017-06-26', 80.0), ('2017-06-29', 76.0), ('2017-06-30', 81.0), ('2010-06-01', 77.0), ('2010-06-02', 76.0), ('2010-06-03', 77.0), ('2010-06-04', 75.0), ('2010-06-07', 76.0), ('2010-06-08', 79.0), ('2010-06-09', 79.0), ('2010-06-10', 77.0), ('2010-06-15', 76.0), ('2010-06-16', 77.0), ('2010-06-17', 76.0), ('2010-06-18', 73.0), ('2010-06-21', 76.0), ('2010-06-22', 77.0), ('2010-06-23', 77.0), ('2010-06-24', 78.0), ('2010-06-25', 77.0), ('2010-06-28', 76.0), ('2010-06-29', 78.0), ('2010-06-30', 78.0), ('2011-06-01', 75.0), ('2011-06-02', 77.0), ('2011-06-03', 65.0), ('2011-06-06', 76.0), ('2011-06-07', 74.0), ('2011-06-08', 76.0), ('2011-06-09', 76.0), ('2011-06-13', 77.0), ('2011-06-14', 77.0), ('2011-06-15', 76.0), ('2011-06-16', 77.0), ('2011-06-17', 76.0), ('2011-06-20', 77.0), ('2011-06-21', 74.0), ('2011-06-22', 76.0), ('2011-06-23', 73.0), ('2011-06-24', 75.0), ('2011-06-27', 76.0), ('2011-06-28', 75.0), ('2011-06-29', 76.0), ('2011-06-30', 75.0), ('2012-06-01', 74.0), ('2012-06-04', 74.0), ('2012-06-05', 74.0), ('2012-06-06', 74.0), ('2012-06-07', 76.0), ('2012-06-08', 76.0), ('2012-06-12', 77.0), ('2012-06-13', 77.0), ('2012-06-14', 78.0), ('2012-06-15', 73.0), ('2012-06-18', 75.0), ('2012-06-19', 72.0), ('2012-06-20', 76.0), ('2012-06-21', 76.0), ('2012-06-22', 73.0), ('2012-06-25', 74.0), ('2012-06-26', 72.0), ('2012-06-27', 76.0), ('2012-06-28', 76.0), ('2012-06-29', 76.0), ('2013-06-03', 78.0), ('2013-06-04', 78.0), ('2013-06-05', 78.0), ('2013-06-06', 78.0), ('2013-06-07', 75.0), ('2013-06-10', 77.0), ('2013-06-12', 73.0), ('2013-06-13', 78.0), ('2013-06-14', 75.0), ('2013-06-17', 73.0), ('2013-06-18', 77.0), ('2013-06-19', 80.0), ('2013-06-20', 70.0), ('2013-06-21', 78.0), ('2013-06-25', 72.0), ('2013-06-26', 78.0), ('2013-06-27', 72.0), ('2013-06-28', 78.0), ('2014-06-02', 77.0), ('2014-06-03', 79.0), ('2014-06-04', 77.0), ('2014-06-05', 78.0), ('2014-06-06', 74.0), ('2014-06-09', 79.0), ('2014-06-10', 79.0), ('2014-06-12', 79.0), ('2014-06-13', 79.0), ('2014-06-16', 77.0), ('2014-06-17', 79.0), ('2014-06-18', 78.0), ('2014-06-19', 80.0), ('2014-06-20', 81.0), ('2014-06-27', 81.0), ('2015-06-10', 78.0), ('2015-06-12', 77.0), ('2015-06-15', 78.0), ('2015-06-16', 79.0), ('2015-06-17', 77.0), ('2015-06-18', 79.0), ('2015-06-19', 80.0), ('2015-06-22', 79.0), ('2015-06-23', 78.0), ('2015-06-24', 79.0), ('2015-06-25', 79.0), ('2015-06-29', 79.0), ('2015-06-30', 81.0), ('2016-06-01', 64.0), ('2016-06-02', 65.0), ('2016-06-03', 75.0), ('2016-06-06', 76.0), ('2016-06-07', 78.0), ('2016-06-08', 77.0), ('2016-06-09', 75.0), ('2016-06-13', 78.0), ('2016-06-14', 81.0), ('2016-06-15', 78.0), ('2016-06-16', 76.0), ('2016-06-17', 73.0), ('2016-06-20', 74.0), ('2016-06-21', 76.0), ('2016-06-22', 78.0), ('2016-06-23', 79.0), ('2016-06-24', 78.0), ('2016-06-27', 72.0), ('2016-06-28', 78.0), ('2016-06-29', 80.0), ('2016-06-30', 81.0), ('2017-06-02', 79.0), ('2017-06-05', 78.0), ('2017-06-06', 80.0), ('2017-06-07', 80.0), ('2017-06-08', 78.0), ('2017-06-09', 80.0), ('2017-06-13', 81.0), ('2017-06-14', 78.0), ('2017-06-15', 77.0), ('2017-06-16', 78.0), ('2017-06-19', 78.0), ('2017-06-20', 77.0), ('2017-06-21', 76.0), ('2017-06-22', 81.0), ('2017-06-23', 76.0), ('2017-06-26', 82.0), ('2017-06-27', 80.0), ('2017-06-28', 80.0), ('2017-06-29', 79.0), ('2017-06-30', 74.0), ('2010-06-01', 69.0), ('2010-06-02', 70.0), ('2010-06-03', 67.0), ('2010-06-04', 70.0), ('2010-06-05', 73.0), ('2010-06-06', 73.0), ('2010-06-09', 72.0), ('2010-06-10', 72.0), ('2010-06-11', 77.0), ('2010-06-12', 70.0), ('2010-06-13', 78.0), ('2010-06-15', 73.0), ('2010-06-17', 75.0), ('2010-06-18', 70.0), ('2010-06-19', 74.0), ('2010-06-20', 77.0), ('2010-06-22', 69.0), ('2010-06-23', 73.0), ('2010-06-24', 73.0), ('2010-06-25', 74.0), ('2010-06-28', 73.0), ('2010-06-29', 72.0), ('2010-06-30', 75.0), ('2011-06-08', 73.0), ('2011-06-09', 76.0), ('2011-06-12', 74.0), ('2011-06-13', 76.0), ('2011-06-14', 73.0), ('2011-06-15', 76.0), ('2011-06-16', 76.0), ('2011-06-20', 78.0), ('2011-06-21', 75.0), ('2011-06-22', 77.0), ('2012-06-01', 75.0), ('2012-06-14', 73.0), ('2012-06-21', 76.0), ('2012-06-26', 71.0), ('2012-06-29', 71.0), ('2010-06-01', 76.0), ('2010-06-02', 76.0), ('2010-06-03', 76.0), ('2010-06-04', 73.0), ('2010-06-05', 78.0), ('2010-06-06', 77.0), ('2010-06-07', 77.0), ('2010-06-08', 77.0), ('2010-06-09', 78.0), ('2010-06-10', 78.0), ('2010-06-11', 76.0), ('2010-06-12', 76.0), ('2010-06-13', 79.0), ('2010-06-14', 74.0), ('2010-06-15', 75.0), ('2010-06-16', 76.0), ('2010-06-17', 75.0), ('2010-06-18', 73.0), ('2010-06-19', 76.0), ('2010-06-20', 78.0), ('2010-06-21', 76.0), ('2010-06-22', 79.0), ('2010-06-23', 77.0), ('2010-06-24', 76.0), ('2010-06-25', 74.0), ('2010-06-26', 75.0), ('2010-06-28', 76.0), ('2010-06-29', 76.0), ('2010-06-30', 76.0), ('2011-06-01', 79.0), ('2011-06-02', 76.0), ('2011-06-03', 72.0), ('2011-06-04', 71.0), ('2011-06-05', 72.0), ('2011-06-06', 76.0), ('2011-06-07', 74.0), ('2011-06-08', 75.0), ('2011-06-09', 76.0), ('2011-06-10', 77.0), ('2011-06-11', 76.0), ('2011-06-12', 75.0), ('2011-06-13', 76.0), ('2011-06-14', 76.0), ('2011-06-15', 76.0), ('2011-06-16', 75.0), ('2011-06-17', 76.0), ('2011-06-18', 76.0), ('2011-06-19', 79.0), ('2011-06-20', 74.0), ('2011-06-21', 73.0), ('2011-06-22', 75.0), ('2011-06-23', 74.0), ('2011-06-24', 75.0), ('2011-06-25', 76.0), ('2011-06-26', 72.0), ('2011-06-27', 74.0), ('2011-06-28', 74.0), ('2011-06-29', 75.0), ('2011-06-30', 75.0), ('2012-06-01', 74.0), ('2012-06-02', 75.0), ('2012-06-03', 77.0), ('2012-06-04', 75.0), ('2012-06-05', 74.0), ('2012-06-06', 75.0), ('2012-06-07', 74.0), ('2012-06-08', 75.0), ('2012-06-09', 73.0), ('2012-06-10', 75.0), ('2012-06-11', 76.0), ('2012-06-12', 76.0), ('2012-06-13', 76.0), ('2012-06-14', 79.0), ('2012-06-15', 74.0), ('2012-06-16', 78.0), ('2012-06-17', 80.0), ('2012-06-18', 75.0), ('2012-06-19', 72.0), ('2012-06-20', 72.0), ('2012-06-21', 74.0), ('2012-06-22', 78.0), ('2012-06-23', 76.0), ('2012-06-24', 77.0), ('2012-06-25', 72.0), ('2012-06-26', 72.0), ('2012-06-27', 73.0), ('2012-06-28', 75.0), ('2012-06-29', 79.0), ('2012-06-30', 81.0), ('2013-06-01', 76.0), ('2013-06-02', 76.0), ('2013-06-03', 77.0), ('2013-06-04', 78.0), ('2013-06-05', 79.0), ('2013-06-06', 79.0), ('2013-06-07', 75.0), ('2013-06-08', 79.0), ('2013-06-09', 79.0), ('2013-06-10', 75.0), ('2013-06-11', 77.0), ('2013-06-12', 81.0), ('2013-06-13', 76.0), ('2013-06-14', 75.0), ('2013-06-15', 78.0), ('2013-06-16', 81.0), ('2013-06-17', 76.0), ('2013-06-18', 76.0), ('2013-06-19', 76.0), ('2013-06-20', 72.0), ('2013-06-21', 73.0), ('2013-06-22', 74.0), ('2013-06-23', 74.0), ('2013-06-24', 75.0), ('2013-06-25', 73.0), ('2013-06-26', 73.0), ('2013-06-27', 76.0), ('2013-06-29', 73.0), ('2013-06-30', 79.0), ('2014-06-01', 78.0), ('2014-06-02', 76.0), ('2014-06-04', 76.0), ('2014-06-05', 76.0), ('2014-06-06', 77.0), ('2014-06-07', 79.0), ('2014-06-08', 80.0), ('2014-06-09', 78.0), ('2014-06-10', 76.0), ('2014-06-11', 78.0), ('2014-06-12', 78.0), ('2014-06-13', 77.0), ('2014-06-14', 79.0), ('2014-06-15', 79.0), ('2014-06-16', 75.0), ('2014-06-18', 76.0), ('2014-06-19', 77.0), ('2014-06-20', 74.0), ('2014-06-21', 77.0), ('2014-06-22', 76.0), ('2014-06-23', 76.0), ('2014-06-24', 75.0), ('2014-06-25', 74.0), ('2014-06-26', 79.0), ('2014-06-28', 76.0), ('2014-06-29', 74.0), ('2014-06-30', 76.0), ('2015-06-01', 77.0), ('2015-06-02', 78.0), ('2015-06-03', 77.0), ('2015-06-04', 75.0), ('2015-06-05', 75.0), ('2015-06-06', 80.0), ('2015-06-07', 77.0), ('2015-06-08', 77.0), ('2015-06-09', 77.0), ('2015-06-10', 80.0), ('2015-06-11', 76.0), ('2015-06-12', 78.0), ('2015-06-13', 77.0), ('2015-06-14', 79.0), ('2015-06-15', 79.0), ('2015-06-16', 77.0), ('2015-06-17', 75.0), ('2015-06-18', 76.0), ('2015-06-19', 78.0), ('2015-06-20', 78.0), ('2015-06-21', 80.0), ('2015-06-22', 78.0), ('2015-06-23', 79.0), ('2015-06-24', 79.0), ('2015-06-25', 77.0), ('2015-06-26', 77.0), ('2015-06-27', 81.0), ('2015-06-29', 75.0), ('2015-06-30', 72.0), ('2016-06-01', 75.0), ('2016-06-02', 75.0), ('2016-06-03', 75.0), ('2016-06-04', 75.0), ('2016-06-05', 73.0), ('2016-06-06', 78.0), ('2016-06-07', 77.0), ('2016-06-08', 80.0), ('2016-06-09', 72.0), ('2016-06-10', 78.0), ('2016-06-11', 78.0), ('2016-06-12', 77.0), ('2016-06-13', 78.0), ('2016-06-14', 79.0), ('2016-06-15', 80.0), ('2016-06-17', 76.0), ('2016-06-18', 77.0), ('2016-06-19', 79.0), ('2016-06-20', 79.0), ('2016-06-21', 77.0), ('2016-06-22', 79.0), ('2016-06-23', 80.0), ('2016-06-24', 80.0), ('2016-06-25', 77.0), ('2016-06-26', 79.0), ('2016-06-27', 81.0), ('2016-06-28', 78.0), ('2016-06-29', 80.0), ('2016-06-30', 79.0), ('2017-06-01', 79.0), ('2017-06-02', 81.0), ('2017-06-03', 79.0), ('2017-06-04', 81.0), ('2017-06-05', 78.0), ('2017-06-06', 78.0), ('2017-06-07', 78.0), ('2017-06-08', 77.0), ('2017-06-09', 78.0), ('2017-06-10', 75.0), ('2017-06-11', 81.0), ('2017-06-12', 80.0), ('2017-06-13', 77.0), ('2017-06-14', 80.0), ('2017-06-15', 78.0), ('2017-06-16', 80.0), ('2017-06-17', 77.0), ('2017-06-18', 78.0), ('2017-06-19', 78.0), ('2017-06-21', 80.0), ('2017-06-22', 80.0), ('2017-06-23', 79.0), ('2017-06-24', 82.0), ('2017-06-25', 80.0), ('2017-06-26', 81.0), ('2017-06-27', 81.0), ('2017-06-28', 79.0), ('2017-06-29', 78.0), ('2017-06-30', 75.0), ('2010-06-01', 71.0), ('2010-06-02', 71.0), ('2010-06-03', 72.0), ('2010-06-04', 72.0), ('2010-06-05', 76.0), ('2010-06-06', 77.0), ('2010-06-07', 71.0), ('2010-06-08', 73.0), ('2010-06-09', 73.0), ('2010-06-10', 75.0), ('2010-06-11', 81.0), ('2010-06-12', 74.0), ('2010-06-13', 77.0), ('2010-06-14', 70.0), ('2010-06-15', 69.0), ('2010-06-16', 70.0), ('2010-06-17', 72.0), ('2010-06-18', 71.0), ('2010-06-19', 77.0), ('2010-06-20', 76.0), ('2010-06-21', 74.0), ('2010-06-22', 74.0), ('2010-06-23', 74.0), ('2010-06-24', 74.0), ('2010-06-25', 71.0), ('2010-06-26', 80.0), ('2010-06-27', 77.0), ('2010-06-28', 72.0), ('2010-06-29', 72.0), ('2010-06-30', 73.0), ('2011-06-01', 72.0), ('2011-06-02', 73.0), ('2011-06-03', 73.0), ('2011-06-08', 77.0), ('2011-06-09', 76.0), ('2011-06-10', 79.0), ('2011-06-11', 78.0), ('2011-06-12', 82.0), ('2011-06-13', 79.0), ('2011-06-14', 76.0), ('2011-06-15', 74.0), ('2011-06-16', 74.0), ('2011-06-17', 73.0), ('2011-06-18', 79.0), ('2011-06-19', 77.0), ('2011-06-20', 73.0), ('2011-06-21', 72.0), ('2011-06-22', 72.0), ('2011-06-23', 73.0), ('2011-06-24', 74.0), ('2011-06-25', 77.0), ('2011-06-26', 73.0), ('2011-06-27', 73.0), ('2011-06-28', 72.0), ('2011-06-29', 72.0), ('2011-06-30', 67.0), ('2012-06-01', 71.0), ('2012-06-02', 73.0), ('2012-06-03', 78.0), ('2012-06-04', 71.0), ('2012-06-05', 72.0), ('2012-06-06', 70.0), ('2012-06-07', 72.0), ('2012-06-08', 73.0), ('2012-06-09', 74.0), ('2012-06-10', 81.0), ('2012-06-11', 72.0), ('2012-06-12', 77.0), ('2012-06-13', 69.0), ('2012-06-14', 69.0), ('2012-06-15', 69.0), ('2012-06-16', 78.0), ('2012-06-17', 78.0), ('2012-06-18', 73.0), ('2012-06-19', 72.0), ('2012-06-20', 71.0), ('2012-06-21', 72.0), ('2012-06-22', 72.0), ('2012-06-23', 73.0), ('2012-06-24', 71.0), ('2012-06-25', 70.0), ('2012-06-26', 71.0), ('2012-06-27', 68.0), ('2012-06-28', 72.0), ('2012-06-29', 71.0), ('2012-06-30', 78.0), ('2013-06-01', 81.0), ('2013-06-02', 74.0), ('2013-06-03', 74.0), ('2013-06-04', 73.0), ('2013-06-05', 73.0), ('2013-06-06', 72.0), ('2013-06-07', 73.0), ('2013-06-08', 77.0), ('2013-06-09', 73.0), ('2013-06-10', 73.0), ('2013-06-11', 75.0), ('2013-06-12', 72.0), ('2013-06-13', 72.0), ('2013-06-14', 73.0), ('2013-06-15', 73.0), ('2013-06-16', 75.0), ('2013-06-17', 72.0), ('2013-06-18', 73.0), ('2013-06-19', 70.0), ('2013-06-20', 69.0), ('2013-06-21', 68.0), ('2013-06-22', 75.0), ('2013-06-23', 74.0), ('2013-06-24', 73.0), ('2013-06-25', 70.0), ('2013-06-26', 70.0), ('2013-06-27', 71.0), ('2013-06-28', 70.0), ('2013-06-29', 70.0), ('2013-06-30', 75.0), ('2014-06-01', 74.0), ('2014-06-02', 72.0), ('2014-06-03', 74.0), ('2014-06-04', 73.0), ('2014-06-05', 73.0), ('2014-06-06', 70.0), ('2014-06-07', 73.0), ('2014-06-08', 75.0), ('2014-06-09', 73.0), ('2014-06-10', 69.0), ('2014-06-11', 80.0), ('2014-06-12', 73.0), ('2014-06-13', 72.0), ('2014-06-14', 80.0), ('2014-06-15', 80.0), ('2014-06-16', 69.0), ('2014-06-17', 70.0), ('2014-06-18', 69.0), ('2014-06-19', 72.0), ('2014-06-20', 68.0), ('2014-06-21', 74.0), ('2014-06-22', 75.0), ('2014-06-23', 70.0), ('2014-06-24', 70.0), ('2014-06-25', 72.0), ('2014-06-26', 71.0), ('2014-06-27', 70.0), ('2014-06-28', 77.0), ('2014-06-29', 73.0), ('2014-06-30', 71.0), ('2015-06-01', 76.0), ('2015-06-02', 68.0), ('2015-06-03', 71.0), ('2015-06-04', 70.0), ('2015-06-05', 68.0), ('2015-06-06', 79.0), ('2015-06-07', 75.0), ('2015-06-08', 71.0), ('2015-06-09', 69.0), ('2015-06-10', 71.0), ('2015-06-11', 72.0), ('2015-06-12', 69.0), ('2015-06-13', 72.0), ('2015-06-14', 75.0), ('2015-06-15', 71.0), ('2015-06-16', 72.0), ('2015-06-17', 70.0), ('2015-06-18', 71.0), ('2015-06-19', 72.0), ('2015-06-20', 75.0), ('2015-06-21', 75.0), ('2015-06-22', 71.0), ('2015-06-23', 73.0), ('2015-06-24', 71.0), ('2015-06-25', 71.0), ('2015-06-26', 74.0), ('2015-06-27', 74.0), ('2015-06-28', 78.0), ('2015-06-29', 72.0), ('2015-06-30', 69.0), ('2016-06-01', 69.0), ('2016-06-02', 67.0), ('2016-06-03', 68.0), ('2016-06-04', 65.0), ('2016-06-05', 70.0), ('2016-06-06', 72.0), ('2016-06-07', 74.0), ('2016-06-08', 73.0), ('2016-06-09', 71.0), ('2016-06-10', 75.0), ('2016-06-11', 72.0), ('2016-06-12', 75.0), ('2016-06-13', 76.0), ('2016-06-14', 75.0), ('2016-06-15', 76.0), ('2016-06-16', 75.0), ('2016-06-17', 71.0), ('2016-06-18', 75.0), ('2016-06-19', 75.0), ('2016-06-20', 72.0), ('2016-06-21', 72.0), ('2016-06-22', 75.0), ('2016-06-23', 73.0), ('2016-06-24', 76.0), ('2016-06-25', 74.0), ('2016-06-26', 77.0), ('2016-06-27', 72.0), ('2016-06-28', 71.0), ('2016-06-29', 76.0), ('2016-06-30', 77.0), ('2017-06-01', 80.0), ('2017-06-02', 76.0), ('2017-06-03', 76.0), ('2017-06-04', 77.0), ('2017-06-05', 75.0), ('2017-06-06', 75.0), ('2017-06-07', 75.0), ('2017-06-08', 75.0), ('2017-06-09', 72.0), ('2017-06-10', 74.0), ('2017-06-11', 74.0), ('2017-06-12', 74.0), ('2017-06-13', 76.0), ('2017-06-14', 74.0), ('2017-06-15', 75.0), ('2017-06-16', 73.0), ('2017-06-17', 79.0), ('2017-06-18', 75.0), ('2017-06-19', 72.0), ('2017-06-20', 72.0), ('2017-06-21', 74.0), ('2017-06-22', 72.0), ('2017-06-23', 72.0), ('2017-06-24', 77.0), ('2017-06-25', 71.0), ('2017-06-26', 73.0), ('2017-06-27', 76.0), ('2017-06-28', 77.0), ('2017-06-29', 76.0), ('2017-06-30', 76.0), ('2010-06-01', 74.0), ('2010-06-02', 76.0), ('2010-06-03', 78.0), ('2010-06-04', 74.0), ('2010-06-05', 79.0), ('2010-06-06', 79.0), ('2010-06-07', 69.0), ('2010-06-08', 77.0), ('2010-06-09', 80.0), ('2010-06-10', 75.0), ('2010-06-11', 75.0), ('2010-06-12', 78.0), ('2010-06-14', 71.0), ('2010-06-15', 76.0), ('2010-06-16', 74.0), ('2010-06-17', 78.0), ('2010-06-18', 72.0), ('2010-06-19', 78.0), ('2010-06-20', 85.0), ('2010-06-21', 77.0), ('2010-06-22', 76.0), ('2010-06-23', 76.0), ('2010-06-24', 75.0), ('2010-06-25', 71.0), ('2010-06-26', 70.0), ('2010-06-27', 80.0), ('2010-06-28', 77.0), ('2010-06-29', 70.0), ('2010-06-30', 73.0), ('2011-06-01', 73.0), ('2011-06-02', 76.0), ('2011-06-03', 70.0), ('2011-06-04', 71.0), ('2011-06-05', 80.0), ('2011-06-06', 70.0), ('2011-06-07', 68.0), ('2011-06-08', 74.0), ('2011-06-09', 75.0), ('2011-06-10', 75.0), ('2011-06-11', 78.0), ('2011-06-12', 76.0), ('2011-06-13', 69.0), ('2011-06-14', 73.0), ('2011-06-15', 76.0), ('2011-06-16', 72.0), ('2011-06-17', 72.0), ('2011-06-18', 80.0), ('2011-06-19', 76.0), ('2011-06-20', 71.0), ('2011-06-21', 68.0), ('2011-06-22', 71.0), ('2011-06-23', 72.0), ('2011-06-24', 75.0), ('2011-06-25', 80.0), ('2011-06-26', 75.0), ('2011-06-27', 74.0), ('2011-06-28', 74.0), ('2011-06-29', 69.0), ('2011-06-30', 70.0), ('2012-06-01', 73.0), ('2012-06-02', 76.0), ('2012-06-03', 76.0), ('2012-06-04', 74.0), ('2012-06-05', 71.0), ('2012-06-06', 72.0), ('2012-06-07', 73.0), ('2012-06-08', 77.0), ('2012-06-09', 77.0), ('2012-06-10', 77.0), ('2012-06-11', 79.0), ('2012-06-12', 77.0), ('2012-06-13', 77.0), ('2012-06-14', 78.0), ('2012-06-16', 78.0), ('2012-06-17', 81.0), ('2012-06-18', 76.0), ('2012-06-19', 74.0), ('2012-06-20', 71.0), ('2012-06-21', 78.0), ('2012-06-22', 71.0), ('2012-06-23', 76.0), ('2012-06-24', 81.0), ('2012-06-25', 73.0), ('2012-06-26', 72.0), ('2012-06-27', 72.0), ('2012-06-28', 74.0), ('2012-06-29', 72.0), ('2012-06-30', 79.0), ('2013-06-01', 80.0), ('2013-06-02', 81.0), ('2013-06-03', 75.0), ('2013-06-04', 80.0), ('2013-06-05', 73.0), ('2013-06-06', 74.0), ('2013-06-07', 73.0), ('2013-06-08', 81.0), ('2013-06-09', 79.0), ('2013-06-10', 78.0), ('2013-06-11', 74.0), ('2013-06-12', 74.0), ('2013-06-13', 73.0), ('2013-06-14', 73.0), ('2013-06-15', 77.0), ('2013-06-16', 77.0), ('2013-06-17', 74.0), ('2013-06-18', 73.0), ('2013-06-19', 70.0), ('2013-06-20', 69.0), ('2013-06-21', 72.0), ('2013-06-22', 79.0), ('2013-06-23', 77.0), ('2013-06-24', 70.0), ('2013-06-25', 71.0), ('2013-06-26', 67.0), ('2013-06-27', 66.0), ('2013-06-28', 78.0), ('2013-06-29', 80.0), ('2013-06-30', 80.0), ('2014-06-01', 79.0), ('2014-06-02', 68.0), ('2014-06-03', 67.0), ('2014-06-04', 71.0), ('2014-06-06', 72.0), ('2014-06-07', 79.0), ('2014-06-08', 80.0), ('2014-06-09', 72.0), ('2014-06-10', 68.0), ('2014-06-11', 84.0), ('2014-06-12', 82.0), ('2014-06-13', 79.0), ('2014-06-14', 78.0), ('2014-06-15', 81.0), ('2014-06-16', 77.0), ('2014-06-17', 71.0), ('2014-06-18', 69.0), ('2014-06-19', 73.0), ('2014-06-20', 68.0), ('2014-06-21', 75.0), ('2014-06-22', 79.0), ('2014-06-23', 70.0), ('2014-06-24', 70.0), ('2014-06-25', 73.0), ('2014-06-26', 71.0), ('2014-06-27', 68.0), ('2014-06-29', 78.0), ('2014-06-30', 69.0), ('2015-06-01', 70.0), ('2015-06-02', 67.0), ('2015-06-03', 68.0), ('2015-06-04', 68.0), ('2015-06-05', 71.0), ('2015-06-08', 74.0), ('2015-06-09', 69.0), ('2015-06-11', 69.0), ('2015-06-12', 66.0), ('2015-06-15', 67.0), ('2015-06-16', 69.0), ('2015-06-17', 72.0), ('2015-06-18', 72.0), ('2015-06-19', 72.0), ('2015-06-22', 70.0), ('2015-06-23', 75.0), ('2015-06-24', 73.0), ('2015-06-25', 70.0), ('2015-06-26', 71.0), ('2010-06-01', 70.0), ('2010-06-02', 78.0), ('2010-06-03', 73.0), ('2010-06-04', 68.0), ('2010-06-05', 79.0), ('2010-06-06', 74.0), ('2010-06-07', 75.0), ('2010-06-08', 71.0), ('2010-06-09', 75.0), ('2010-06-14', 76.0), ('2010-06-16', 73.0), ('2010-06-17', 73.0), ('2010-06-19', 74.0), ('2010-06-22', 72.0), ('2010-06-23', 72.0), ('2010-06-24', 72.0), ('2010-06-25', 70.0), ('2010-06-26', 69.0), ('2010-06-27', 73.0), ('2010-06-28', 71.0), ('2010-06-29', 73.0), ('2010-06-30', 72.0), ('2011-06-01', 69.0), ('2011-06-02', 72.0), ('2011-06-03', 66.0), ('2011-06-04', 67.0), ('2011-06-05', 71.0), ('2011-06-06', 71.0), ('2011-06-07', 72.0), ('2011-06-08', 72.0), ('2011-06-09', 71.0), ('2011-06-10', 71.0), ('2011-06-11', 71.0), ('2011-06-12', 69.0), ('2011-06-13', 68.0), ('2011-06-14', 71.0), ('2011-06-15', 73.0), ('2011-06-16', 72.0), ('2011-06-17', 74.0), ('2011-06-18', 70.0), ('2011-06-19', 72.0), ('2011-06-20', 71.0), ('2011-06-21', 71.0), ('2011-06-22', 70.0), ('2011-06-23', 70.0), ('2011-06-24', 69.0), ('2011-06-25', 74.0), ('2011-06-26', 69.0), ('2011-06-27', 72.0), ('2011-06-28', 70.0), ('2011-06-29', 72.0), ('2011-06-30', 72.0), ('2012-06-01', 69.0), ('2012-06-02', 73.0), ('2012-06-03', 71.0), ('2012-06-04', 72.0), ('2012-06-05', 71.0), ('2012-06-06', 72.0), ('2012-06-07', 71.0), ('2012-06-08', 74.0), ('2012-06-09', 74.0), ('2012-06-10', 75.0), ('2012-06-11', 69.0), ('2012-06-12', 75.0), ('2012-06-13', 70.0), ('2012-06-14', 73.0), ('2012-06-15', 72.0), ('2012-06-16', 70.0), ('2012-06-17', 73.0), ('2012-06-18', 71.0), ('2012-06-19', 70.0), ('2012-06-20', 69.0), ('2012-06-21', 71.0), ('2012-06-23', 68.0), ('2012-06-24', 68.0), ('2012-06-25', 68.0), ('2012-06-26', 69.0), ('2012-06-27', 69.0), ('2012-06-28', 69.0), ('2012-06-29', 71.0), ('2013-06-01', 78.0), ('2013-06-02', 72.0), ('2013-06-03', 73.0), ('2013-06-04', 71.0), ('2013-06-05', 72.0), ('2013-06-06', 72.0), ('2013-06-07', 71.0), ('2013-06-08', 74.0), ('2013-06-09', 71.0), ('2013-06-10', 72.0), ('2013-06-11', 72.0), ('2013-06-12', 74.0), ('2013-06-13', 72.0), ('2013-06-14', 69.0), ('2013-06-15', 70.0), ('2013-06-16', 76.0), ('2013-06-17', 70.0), ('2013-06-18', 71.0), ('2013-06-19', 73.0), ('2013-06-20', 68.0), ('2013-06-21', 68.0), ('2013-06-22', 69.0), ('2013-06-23', 70.0), ('2013-06-24', 69.0), ('2013-06-25', 69.0), ('2013-06-26', 70.0), ('2013-06-27', 71.0), ('2013-06-28', 70.0), ('2013-06-29', 67.0), ('2013-06-30', 72.0), ('2014-06-01', 72.0), ('2014-06-02', 75.0), ('2014-06-03', 73.0), ('2014-06-04', 73.0), ('2014-06-05', 72.0), ('2014-06-06', 71.0), ('2014-06-07', 77.0), ('2014-06-08', 73.0), ('2014-06-09', 70.0), ('2014-06-10', 70.0), ('2014-06-11', 76.0), ('2014-06-12', 72.0), ('2014-06-13', 73.0), ('2014-06-14', 77.0), ('2014-06-15', 73.0), ('2014-06-16', 70.0), ('2014-06-17', 68.0), ('2014-06-18', 69.0), ('2014-06-19', 70.0), ('2014-06-20', 70.0), ('2014-06-21', 73.0), ('2014-06-22', 70.0), ('2014-06-23', 69.0), ('2014-06-24', 71.0), ('2014-06-25', 70.0), ('2014-06-26', 70.0), ('2014-06-27', 72.0), ('2014-06-28', 71.0), ('2014-06-29', 70.0), ('2015-06-01', 69.0), ('2015-06-02', 72.0), ('2015-06-03', 72.0), ('2015-06-04', 71.0), ('2015-06-05', 76.0), ('2015-06-06', 71.0), ('2015-06-07', 72.0), ('2015-06-08', 70.0), ('2015-06-09', 72.0), ('2015-06-10', 72.0), ('2015-06-12', 71.0), ('2015-06-13', 72.0), ('2015-06-14', 69.0), ('2015-06-15', 70.0), ('2015-06-16', 73.0), ('2015-06-17', 70.0), ('2015-06-18', 72.0), ('2015-06-19', 72.0), ('2015-06-20', 71.0), ('2015-06-21', 73.0), ('2015-06-22', 72.0), ('2015-06-23', 73.0), ('2015-06-24', 72.0), ('2015-06-25', 71.0), ('2015-06-26', 70.0), ('2015-06-27', 75.0), ('2015-06-28', 72.0), ('2015-06-29', 70.0), ('2015-06-30', 72.0), ('2016-06-01', 70.0), ('2016-06-02', 71.0), ('2016-06-03', 70.0), ('2016-06-05', 73.0), ('2016-06-06', 72.0), ('2016-06-07', 71.0), ('2016-06-08', 71.0), ('2016-06-09', 69.0), ('2016-06-10', 70.0), ('2016-06-11', 73.0), ('2016-06-12', 75.0), ('2016-06-13', 73.0), ('2016-06-14', 72.0), ('2016-06-15', 70.0), ('2016-06-16', 70.0), ('2016-06-17', 69.0), ('2016-06-18', 73.0), ('2016-06-19', 74.0), ('2016-06-20', 72.0), ('2016-06-21', 71.0), ('2016-06-22', 73.0), ('2016-06-23', 72.0), ('2016-06-25', 73.0), ('2016-06-26', 74.0), ('2016-06-27', 72.0), ('2016-06-28', 74.0), ('2016-06-29', 72.0), ('2017-06-01', 74.0), ('2017-06-03', 74.0), ('2017-06-04', 77.0), ('2017-06-05', 74.0), ('2017-06-06', 74.0), ('2017-06-07', 79.0), ('2017-06-08', 75.0), ('2017-06-10', 72.0), ('2017-06-11', 76.0), ('2017-06-12', 74.0), ('2017-06-13', 73.0), ('2017-06-14', 74.0), ('2017-06-15', 77.0), ('2017-06-16', 74.0), ('2017-06-17', 72.0), ('2017-06-18', 77.0), ('2017-06-19', 76.0), ('2017-06-20', 74.0), ('2017-06-21', 75.0), ('2017-06-22', 76.0), ('2017-06-23', 71.0), ('2017-06-24', 73.0), ('2017-06-25', 79.0), ('2017-06-26', 79.0), ('2017-06-27', 74.0), ('2017-06-28', 74.0), ('2017-06-29', 76.0), ('2017-06-30', 75.0)]\n"
],
[
"# 4. Create a DataFrame from the list of temperatures for the month of June. \njune_df = pd.DataFrame(june, columns = ['date','June Temperature'])\njune_df.set_index(june_df['date'], inplace=True)\njune_df = june_df.sort_index()\nprint(june_df.to_string(index=False))",
" date June Temperature\n2010-06-01 78.0\n2010-06-01 73.0\n2010-06-01 77.0\n2010-06-01 69.0\n2010-06-01 76.0\n2010-06-01 71.0\n2010-06-01 74.0\n2010-06-01 70.0\n2010-06-01 74.0\n2010-06-02 78.0\n2010-06-02 70.0\n2010-06-02 76.0\n2010-06-02 76.0\n2010-06-02 71.0\n2010-06-02 76.0\n2010-06-02 76.0\n2010-06-02 72.0\n2010-06-02 76.0\n2010-06-03 74.0\n2010-06-03 78.0\n2010-06-03 73.0\n2010-06-03 72.0\n2010-06-03 77.0\n2010-06-03 76.0\n2010-06-03 75.0\n2010-06-03 78.0\n2010-06-03 67.0\n2010-06-04 73.0\n2010-06-04 75.0\n2010-06-04 75.0\n2010-06-04 76.0\n2010-06-04 73.0\n2010-06-04 68.0\n2010-06-04 72.0\n2010-06-04 70.0\n2010-06-04 74.0\n2010-06-05 73.0\n2010-06-05 77.0\n2010-06-05 78.0\n2010-06-05 76.0\n2010-06-05 72.0\n2010-06-05 79.0\n2010-06-05 79.0\n2010-06-05 74.0\n2010-06-06 79.0\n2010-06-06 73.0\n2010-06-06 75.0\n2010-06-06 74.0\n2010-06-06 75.0\n2010-06-06 77.0\n2010-06-06 77.0\n2010-06-06 78.0\n2010-06-07 75.0\n2010-06-07 77.0\n2010-06-07 76.0\n2010-06-07 74.0\n2010-06-07 71.0\n2010-06-07 77.0\n2010-06-07 75.0\n2010-06-07 69.0\n2010-06-08 75.0\n2010-06-08 78.0\n2010-06-08 77.0\n2010-06-08 77.0\n2010-06-08 71.0\n2010-06-08 73.0\n2010-06-08 75.0\n2010-06-08 79.0\n2010-06-09 78.0\n2010-06-09 78.0\n2010-06-09 79.0\n2010-06-09 75.0\n2010-06-09 80.0\n2010-06-09 75.0\n2010-06-09 73.0\n2010-06-09 72.0\n2010-06-09 73.0\n2010-06-10 72.0\n2010-06-10 75.0\n2010-06-10 77.0\n2010-06-10 79.0\n2010-06-10 75.0\n2010-06-10 78.0\n2010-06-10 75.0\n2010-06-10 75.0\n2010-06-11 78.0\n2010-06-11 83.0\n2010-06-11 81.0\n2010-06-11 76.0\n2010-06-11 76.0\n2010-06-11 77.0\n2010-06-11 75.0\n2010-06-12 70.0\n2010-06-12 74.0\n2010-06-12 74.0\n2010-06-12 78.0\n2010-06-12 76.0\n2010-06-12 75.0\n2010-06-12 78.0\n2010-06-13 79.0\n2010-06-13 74.0\n2010-06-13 76.0\n2010-06-13 77.0\n2010-06-13 78.0\n2010-06-13 78.0\n2010-06-14 73.0\n2010-06-14 71.0\n2010-06-14 77.0\n2010-06-14 74.0\n2010-06-14 70.0\n2010-06-14 76.0\n2010-06-14 73.0\n2010-06-15 74.0\n2010-06-15 78.0\n2010-06-15 76.0\n2010-06-15 76.0\n2010-06-15 69.0\n2010-06-15 73.0\n2010-06-15 73.0\n2010-06-15 75.0\n2010-06-16 74.0\n2010-06-16 73.0\n2010-06-16 76.0\n2010-06-16 70.0\n2010-06-16 76.0\n2010-06-16 78.0\n2010-06-16 74.0\n2010-06-16 77.0\n2010-06-17 76.0\n2010-06-17 77.0\n2010-06-17 72.0\n2010-06-17 75.0\n2010-06-17 78.0\n2010-06-17 75.0\n2010-06-17 73.0\n2010-06-17 75.0\n2010-06-17 75.0\n2010-06-18 72.0\n2010-06-18 73.0\n2010-06-18 70.0\n2010-06-18 70.0\n2010-06-18 73.0\n2010-06-18 77.0\n2010-06-18 70.0\n2010-06-18 71.0\n2010-06-19 74.0\n2010-06-19 82.0\n2010-06-19 75.0\n2010-06-19 74.0\n2010-06-19 76.0\n2010-06-19 74.0\n2010-06-19 78.0\n2010-06-19 77.0\n2010-06-20 73.0\n2010-06-20 78.0\n2010-06-20 85.0\n2010-06-20 77.0\n2010-06-20 78.0\n2010-06-20 75.0\n2010-06-20 76.0\n2010-06-21 77.0\n2010-06-21 78.0\n2010-06-21 75.0\n2010-06-21 76.0\n2010-06-21 74.0\n2010-06-21 74.0\n2010-06-21 76.0\n2010-06-22 75.0\n2010-06-22 72.0\n2010-06-22 79.0\n2010-06-22 74.0\n2010-06-22 78.0\n2010-06-22 69.0\n2010-06-22 74.0\n2010-06-22 76.0\n2010-06-22 77.0\n2010-06-23 72.0\n2010-06-23 76.0\n2010-06-23 74.0\n2010-06-23 75.0\n2010-06-23 78.0\n2010-06-23 74.0\n2010-06-23 73.0\n2010-06-23 77.0\n2010-06-23 77.0\n2010-06-24 73.0\n2010-06-24 78.0\n2010-06-24 75.0\n2010-06-24 74.0\n2010-06-24 75.0\n2010-06-24 76.0\n2010-06-24 78.0\n2010-06-24 74.0\n2010-06-24 72.0\n2010-06-25 77.0\n2010-06-25 74.0\n2010-06-25 71.0\n2010-06-25 77.0\n2010-06-25 74.0\n2010-06-25 70.0\n2010-06-25 74.0\n2010-06-25 70.0\n2010-06-25 71.0\n2010-06-26 70.0\n2010-06-26 76.0\n2010-06-26 69.0\n2010-06-26 75.0\n2010-06-26 72.0\n2010-06-26 80.0\n2010-06-27 78.0\n2010-06-27 80.0\n2010-06-27 73.0\n2010-06-27 75.0\n2010-06-27 77.0\n2010-06-27 76.0\n2010-06-28 73.0\n2010-06-28 76.0\n2010-06-28 71.0\n2010-06-28 73.0\n2010-06-28 78.0\n2010-06-28 76.0\n2010-06-28 74.0\n2010-06-28 77.0\n2010-06-28 72.0\n2010-06-29 70.0\n2010-06-29 78.0\n2010-06-29 78.0\n2010-06-29 73.0\n2010-06-29 73.0\n2010-06-29 72.0\n2010-06-29 75.0\n2010-06-29 76.0\n2010-06-29 72.0\n2010-06-30 76.0\n2010-06-30 74.0\n2010-06-30 76.0\n2010-06-30 72.0\n2010-06-30 75.0\n2010-06-30 78.0\n2010-06-30 73.0\n2010-06-30 73.0\n2010-06-30 78.0\n2011-06-01 74.0\n2011-06-01 75.0\n2011-06-01 77.0\n2011-06-01 79.0\n2011-06-01 72.0\n2011-06-01 69.0\n2011-06-01 73.0\n2011-06-01 72.0\n2011-06-02 78.0\n2011-06-02 76.0\n2011-06-02 73.0\n2011-06-02 76.0\n2011-06-02 73.0\n2011-06-02 77.0\n2011-06-02 75.0\n2011-06-02 72.0\n2011-06-03 66.0\n2011-06-03 73.0\n2011-06-03 70.0\n2011-06-03 67.0\n2011-06-03 65.0\n2011-06-03 71.0\n2011-06-03 73.0\n2011-06-03 72.0\n2011-06-04 69.0\n2011-06-04 69.0\n2011-06-04 67.0\n2011-06-04 71.0\n2011-06-04 71.0\n2011-06-04 70.0\n2011-06-05 80.0\n2011-06-05 76.0\n2011-06-05 69.0\n2011-06-05 71.0\n2011-06-05 72.0\n2011-06-05 76.0\n2011-06-06 73.0\n2011-06-06 70.0\n2011-06-06 76.0\n2011-06-06 77.0\n2011-06-06 74.0\n2011-06-06 71.0\n2011-06-06 76.0\n2011-06-07 74.0\n2011-06-07 72.0\n2011-06-07 68.0\n2011-06-07 70.0\n2011-06-07 74.0\n2011-06-07 71.0\n2011-06-07 77.0\n2011-06-08 76.0\n2011-06-08 72.0\n2011-06-08 75.0\n2011-06-08 74.0\n2011-06-08 77.0\n2011-06-08 74.0\n2011-06-08 72.0\n2011-06-08 77.0\n2011-06-08 73.0\n2011-06-09 74.0\n2011-06-09 77.0\n2011-06-09 76.0\n2011-06-09 75.0\n2011-06-09 76.0\n2011-06-09 71.0\n2011-06-09 76.0\n2011-06-09 74.0\n2011-06-09 76.0\n2011-06-10 72.0\n2011-06-10 75.0\n2011-06-10 79.0\n2011-06-10 71.0\n2011-06-10 77.0\n2011-06-10 75.0\n2011-06-10 78.0\n2011-06-11 77.0\n2011-06-11 73.0\n2011-06-11 71.0\n2011-06-11 78.0\n2011-06-11 76.0\n2011-06-11 78.0\n2011-06-12 69.0\n2011-06-12 73.0\n2011-06-12 75.0\n2011-06-12 82.0\n2011-06-12 77.0\n2011-06-12 76.0\n2011-06-12 74.0\n2011-06-13 68.0\n2011-06-13 73.0\n2011-06-13 69.0\n2011-06-13 76.0\n2011-06-13 77.0\n2011-06-13 76.0\n2011-06-13 79.0\n2011-06-13 78.0\n2011-06-13 72.0\n2011-06-14 77.0\n2011-06-14 75.0\n2011-06-14 78.0\n2011-06-14 71.0\n2011-06-14 73.0\n2011-06-14 76.0\n2011-06-14 73.0\n2011-06-14 76.0\n2011-06-14 75.0\n2011-06-15 76.0\n2011-06-15 73.0\n2011-06-15 76.0\n2011-06-15 74.0\n2011-06-15 73.0\n2011-06-15 76.0\n2011-06-15 75.0\n2011-06-15 77.0\n2011-06-15 76.0\n2011-06-16 74.0\n2011-06-16 73.0\n2011-06-16 77.0\n2011-06-16 74.0\n2011-06-16 75.0\n2011-06-16 72.0\n2011-06-16 72.0\n2011-06-16 76.0\n2011-06-17 74.0\n2011-06-17 76.0\n2011-06-17 72.0\n2011-06-17 73.0\n2011-06-17 76.0\n2011-06-17 78.0\n2011-06-17 72.0\n2011-06-17 74.0\n2011-06-18 79.0\n2011-06-18 78.0\n2011-06-18 70.0\n2011-06-18 76.0\n2011-06-18 80.0\n2011-06-18 74.0\n2011-06-18 75.0\n2011-06-19 76.0\n2011-06-19 72.0\n2011-06-19 74.0\n2011-06-19 75.0\n2011-06-19 77.0\n2011-06-19 79.0\n2011-06-19 76.0\n2011-06-20 71.0\n2011-06-20 76.0\n2011-06-20 73.0\n2011-06-20 73.0\n2011-06-20 74.0\n2011-06-20 77.0\n2011-06-20 78.0\n2011-06-20 71.0\n2011-06-21 72.0\n2011-06-21 68.0\n2011-06-21 72.0\n2011-06-21 71.0\n2011-06-21 72.0\n2011-06-21 73.0\n2011-06-21 75.0\n2011-06-21 76.0\n2011-06-21 74.0\n2011-06-22 74.0\n2011-06-22 77.0\n2011-06-22 71.0\n2011-06-22 74.0\n2011-06-22 72.0\n2011-06-22 75.0\n2011-06-22 70.0\n2011-06-22 76.0\n2011-06-23 70.0\n2011-06-23 72.0\n2011-06-23 72.0\n2011-06-23 74.0\n2011-06-23 73.0\n2011-06-23 73.0\n2011-06-23 70.0\n2011-06-24 75.0\n2011-06-24 75.0\n2011-06-24 74.0\n2011-06-24 75.0\n2011-06-24 75.0\n2011-06-24 74.0\n2011-06-24 69.0\n2011-06-24 77.0\n2011-06-25 74.0\n2011-06-25 77.0\n2011-06-25 80.0\n2011-06-25 74.0\n2011-06-25 76.0\n2011-06-25 78.0\n2011-06-25 76.0\n2011-06-26 72.0\n2011-06-26 75.0\n2011-06-26 74.0\n2011-06-26 73.0\n2011-06-26 69.0\n2011-06-26 76.0\n2011-06-27 76.0\n2011-06-27 74.0\n2011-06-27 74.0\n2011-06-27 72.0\n2011-06-27 77.0\n2011-06-27 73.0\n2011-06-27 74.0\n2011-06-27 74.0\n2011-06-28 75.0\n2011-06-28 74.0\n2011-06-28 75.0\n2011-06-28 72.0\n2011-06-28 74.0\n2011-06-28 70.0\n2011-06-28 74.0\n2011-06-28 71.0\n2011-06-29 72.0\n2011-06-29 69.0\n2011-06-29 76.0\n2011-06-29 74.0\n2011-06-29 75.0\n2011-06-29 77.0\n2011-06-29 72.0\n2011-06-30 70.0\n2011-06-30 74.0\n2011-06-30 75.0\n2011-06-30 75.0\n2011-06-30 72.0\n2011-06-30 67.0\n2012-06-01 71.0\n2012-06-01 71.0\n2012-06-01 74.0\n2012-06-01 69.0\n2012-06-01 76.0\n2012-06-01 74.0\n2012-06-01 73.0\n2012-06-01 75.0\n2012-06-02 75.0\n2012-06-02 71.0\n2012-06-02 73.0\n2012-06-02 76.0\n2012-06-02 73.0\n2012-06-02 76.0\n2012-06-03 71.0\n2012-06-03 78.0\n2012-06-03 75.0\n2012-06-03 76.0\n2012-06-03 75.0\n2012-06-03 77.0\n2012-06-04 74.0\n2012-06-04 71.0\n2012-06-04 72.0\n2012-06-04 77.0\n2012-06-04 75.0\n2012-06-04 72.0\n2012-06-04 74.0\n2012-06-05 72.0\n2012-06-05 71.0\n2012-06-05 74.0\n2012-06-05 74.0\n2012-06-05 71.0\n2012-06-05 77.0\n2012-06-05 72.0\n2012-06-06 72.0\n2012-06-06 76.0\n2012-06-06 73.0\n2012-06-06 72.0\n2012-06-06 75.0\n2012-06-06 70.0\n2012-06-06 74.0\n2012-06-07 72.0\n2012-06-07 78.0\n2012-06-07 71.0\n2012-06-07 74.0\n2012-06-07 76.0\n2012-06-07 73.0\n2012-06-07 73.0\n2012-06-08 77.0\n2012-06-08 73.0\n2012-06-08 74.0\n2012-06-08 76.0\n2012-06-08 73.0\n2012-06-08 75.0\n2012-06-08 77.0\n2012-06-09 73.0\n2012-06-09 76.0\n2012-06-09 74.0\n2012-06-09 74.0\n2012-06-09 74.0\n2012-06-09 77.0\n2012-06-10 75.0\n2012-06-10 77.0\n2012-06-10 75.0\n2012-06-10 77.0\n2012-06-10 81.0\n2012-06-10 73.0\n2012-06-11 72.0\n2012-06-11 76.0\n2012-06-11 69.0\n2012-06-11 72.0\n2012-06-11 76.0\n2012-06-11 79.0\n2012-06-12 77.0\n2012-06-12 76.0\n2012-06-12 77.0\n2012-06-12 76.0\n2012-06-12 75.0\n2012-06-12 77.0\n2012-06-12 72.0\n2012-06-13 77.0\n2012-06-13 76.0\n2012-06-13 77.0\n2012-06-13 70.0\n2012-06-13 69.0\n2012-06-13 77.0\n2012-06-13 70.0\n2012-06-14 79.0\n2012-06-14 77.0\n2012-06-14 73.0\n2012-06-14 72.0\n2012-06-14 78.0\n2012-06-14 73.0\n2012-06-14 69.0\n2012-06-14 78.0\n2012-06-15 72.0\n2012-06-15 72.0\n2012-06-15 74.0\n2012-06-15 73.0\n2012-06-15 69.0\n2012-06-15 78.0\n2012-06-16 70.0\n2012-06-16 78.0\n2012-06-16 78.0\n2012-06-16 77.0\n2012-06-16 72.0\n2012-06-16 78.0\n2012-06-17 73.0\n2012-06-17 80.0\n2012-06-17 77.0\n2012-06-17 74.0\n2012-06-17 78.0\n2012-06-17 81.0\n2012-06-18 71.0\n2012-06-18 76.0\n2012-06-18 77.0\n2012-06-18 73.0\n2012-06-18 73.0\n2012-06-18 75.0\n2012-06-18 75.0\n2012-06-19 73.0\n2012-06-19 72.0\n2012-06-19 72.0\n2012-06-19 76.0\n2012-06-19 72.0\n2012-06-19 70.0\n2012-06-19 74.0\n2012-06-20 69.0\n2012-06-20 70.0\n2012-06-20 76.0\n2012-06-20 75.0\n2012-06-20 71.0\n2012-06-20 72.0\n2012-06-20 71.0\n2012-06-21 76.0\n2012-06-21 76.0\n2012-06-21 71.0\n2012-06-21 75.0\n2012-06-21 73.0\n2012-06-21 74.0\n2012-06-21 76.0\n2012-06-21 78.0\n2012-06-21 72.0\n2012-06-22 76.0\n2012-06-22 78.0\n2012-06-22 73.0\n2012-06-22 72.0\n2012-06-22 71.0\n2012-06-22 75.0\n2012-06-22 72.0\n2012-06-23 68.0\n2012-06-23 76.0\n2012-06-23 76.0\n2012-06-23 77.0\n2012-06-23 73.0\n2012-06-23 73.0\n2012-06-24 77.0\n2012-06-24 71.0\n2012-06-24 76.0\n2012-06-24 68.0\n2012-06-24 69.0\n2012-06-24 81.0\n2012-06-24 76.0\n2012-06-25 70.0\n2012-06-25 74.0\n2012-06-25 72.0\n2012-06-25 72.0\n2012-06-25 77.0\n2012-06-25 68.0\n2012-06-25 73.0\n2012-06-25 73.0\n2012-06-26 69.0\n2012-06-26 75.0\n2012-06-26 72.0\n2012-06-26 71.0\n2012-06-26 71.0\n2012-06-26 71.0\n2012-06-26 72.0\n2012-06-26 73.0\n2012-06-26 72.0\n2012-06-27 71.0\n2012-06-27 72.0\n2012-06-27 75.0\n2012-06-27 68.0\n2012-06-27 73.0\n2012-06-27 74.0\n2012-06-27 69.0\n2012-06-27 76.0\n2012-06-28 76.0\n2012-06-28 72.0\n2012-06-28 74.0\n2012-06-28 75.0\n2012-06-28 73.0\n2012-06-28 69.0\n2012-06-28 77.0\n2012-06-28 75.0\n2012-06-29 71.0\n2012-06-29 76.0\n2012-06-29 71.0\n2012-06-29 73.0\n2012-06-29 79.0\n2012-06-29 76.0\n2012-06-29 72.0\n2012-06-29 76.0\n2012-06-29 71.0\n2012-06-30 78.0\n2012-06-30 79.0\n2012-06-30 72.0\n2012-06-30 77.0\n2012-06-30 78.0\n2012-06-30 81.0\n2013-06-01 80.0\n2013-06-01 76.0\n2013-06-01 79.0\n2013-06-01 78.0\n2013-06-01 75.0\n2013-06-01 81.0\n2013-06-02 76.0\n2013-06-02 78.0\n2013-06-02 81.0\n2013-06-02 75.0\n2013-06-02 72.0\n2013-06-02 74.0\n2013-06-03 74.0\n2013-06-03 77.0\n2013-06-03 73.0\n2013-06-03 78.0\n2013-06-03 78.0\n2013-06-03 79.0\n2013-06-03 75.0\n2013-06-03 75.0\n2013-06-04 78.0\n2013-06-04 73.0\n2013-06-04 76.0\n2013-06-04 78.0\n2013-06-04 78.0\n2013-06-04 80.0\n2013-06-04 71.0\n2013-06-04 75.0\n2013-06-05 73.0\n2013-06-05 73.0\n2013-06-05 78.0\n2013-06-05 78.0\n2013-06-05 74.0\n2013-06-05 78.0\n2013-06-05 79.0\n2013-06-05 72.0\n2013-06-06 79.0\n2013-06-06 72.0\n2013-06-06 72.0\n2013-06-06 78.0\n2013-06-06 79.0\n2013-06-06 76.0\n2013-06-06 74.0\n2013-06-06 74.0\n2013-06-07 77.0\n2013-06-07 75.0\n2013-06-07 73.0\n2013-06-07 71.0\n2013-06-07 74.0\n2013-06-07 75.0\n2013-06-07 76.0\n2013-06-07 73.0\n2013-06-08 77.0\n2013-06-08 75.0\n2013-06-08 78.0\n2013-06-08 77.0\n2013-06-08 74.0\n2013-06-08 79.0\n2013-06-08 81.0\n2013-06-09 73.0\n2013-06-09 79.0\n2013-06-09 71.0\n2013-06-09 79.0\n2013-06-09 79.0\n2013-06-09 75.0\n2013-06-09 77.0\n2013-06-10 74.0\n2013-06-10 77.0\n2013-06-10 77.0\n2013-06-10 73.0\n2013-06-10 78.0\n2013-06-10 77.0\n2013-06-10 72.0\n2013-06-10 75.0\n2013-06-11 77.0\n2013-06-11 74.0\n2013-06-11 72.0\n2013-06-11 77.0\n2013-06-11 75.0\n2013-06-11 75.0\n2013-06-11 76.0\n2013-06-12 74.0\n2013-06-12 73.0\n2013-06-12 72.0\n2013-06-12 77.0\n2013-06-12 75.0\n2013-06-12 78.0\n2013-06-12 81.0\n2013-06-12 74.0\n2013-06-13 72.0\n2013-06-13 77.0\n2013-06-13 76.0\n2013-06-13 77.0\n2013-06-13 73.0\n2013-06-13 72.0\n2013-06-13 74.0\n2013-06-13 78.0\n2013-06-14 75.0\n2013-06-14 76.0\n2013-06-14 75.0\n2013-06-14 69.0\n2013-06-14 76.0\n2013-06-14 72.0\n2013-06-14 73.0\n2013-06-14 73.0\n2013-06-15 78.0\n2013-06-15 78.0\n2013-06-15 73.0\n2013-06-15 77.0\n2013-06-15 70.0\n2013-06-15 74.0\n2013-06-16 75.0\n2013-06-16 76.0\n2013-06-16 78.0\n2013-06-16 78.0\n2013-06-16 74.0\n2013-06-16 81.0\n2013-06-16 77.0\n2013-06-17 74.0\n2013-06-17 76.0\n2013-06-17 74.0\n2013-06-17 76.0\n2013-06-17 72.0\n2013-06-17 70.0\n2013-06-17 73.0\n2013-06-17 77.0\n2013-06-18 76.0\n2013-06-18 73.0\n2013-06-18 74.0\n2013-06-18 73.0\n2013-06-18 71.0\n2013-06-18 77.0\n2013-06-18 76.0\n2013-06-18 78.0\n2013-06-19 70.0\n2013-06-19 78.0\n2013-06-19 76.0\n2013-06-19 74.0\n2013-06-19 73.0\n2013-06-19 70.0\n2013-06-19 80.0\n2013-06-19 77.0\n2013-06-20 71.0\n2013-06-20 69.0\n2013-06-20 72.0\n2013-06-20 68.0\n2013-06-20 70.0\n2013-06-20 73.0\n2013-06-20 69.0\n2013-06-20 72.0\n2013-06-21 73.0\n2013-06-21 76.0\n2013-06-21 68.0\n2013-06-21 68.0\n2013-06-21 78.0\n2013-06-21 72.0\n2013-06-21 70.0\n2013-06-22 77.0\n2013-06-22 75.0\n2013-06-22 74.0\n2013-06-22 79.0\n2013-06-22 75.0\n2013-06-22 69.0\n2013-06-23 77.0\n2013-06-23 74.0\n2013-06-23 74.0\n2013-06-23 74.0\n2013-06-23 75.0\n2013-06-23 70.0\n2013-06-24 73.0\n2013-06-24 75.0\n2013-06-24 71.0\n2013-06-24 76.0\n2013-06-24 69.0\n2013-06-24 70.0\n2013-06-25 72.0\n2013-06-25 69.0\n2013-06-25 71.0\n2013-06-25 74.0\n2013-06-25 74.0\n2013-06-25 70.0\n2013-06-25 71.0\n2013-06-25 73.0\n2013-06-26 70.0\n2013-06-26 67.0\n2013-06-26 72.0\n2013-06-26 73.0\n2013-06-26 70.0\n2013-06-26 78.0\n2013-06-26 75.0\n2013-06-26 70.0\n2013-06-27 73.0\n2013-06-27 66.0\n2013-06-27 77.0\n2013-06-27 72.0\n2013-06-27 71.0\n2013-06-27 77.0\n2013-06-27 71.0\n2013-06-27 76.0\n2013-06-28 78.0\n2013-06-28 78.0\n2013-06-28 70.0\n2013-06-28 70.0\n2013-06-28 71.0\n2013-06-28 76.0\n2013-06-29 70.0\n2013-06-29 77.0\n2013-06-29 80.0\n2013-06-29 67.0\n2013-06-29 71.0\n2013-06-29 73.0\n2013-06-30 75.0\n2013-06-30 78.0\n2013-06-30 79.0\n2013-06-30 80.0\n2013-06-30 77.0\n2013-06-30 72.0\n2014-06-01 77.0\n2014-06-01 79.0\n2014-06-01 74.0\n2014-06-01 77.0\n2014-06-01 78.0\n2014-06-01 72.0\n2014-06-01 79.0\n2014-06-02 74.0\n2014-06-02 76.0\n2014-06-02 78.0\n2014-06-02 68.0\n2014-06-02 77.0\n2014-06-02 77.0\n2014-06-02 75.0\n2014-06-02 72.0\n2014-06-03 73.0\n2014-06-03 73.0\n2014-06-03 74.0\n2014-06-03 77.0\n2014-06-03 78.0\n2014-06-03 79.0\n2014-06-03 67.0\n2014-06-04 77.0\n2014-06-04 76.0\n2014-06-04 79.0\n2014-06-04 73.0\n2014-06-04 73.0\n2014-06-04 75.0\n2014-06-04 77.0\n2014-06-04 71.0\n2014-06-05 78.0\n2014-06-05 72.0\n2014-06-05 78.0\n2014-06-05 76.0\n2014-06-05 76.0\n2014-06-05 75.0\n2014-06-05 73.0\n2014-06-06 74.0\n2014-06-06 71.0\n2014-06-06 74.0\n2014-06-06 77.0\n2014-06-06 75.0\n2014-06-06 78.0\n2014-06-06 70.0\n2014-06-06 72.0\n2014-06-07 77.0\n2014-06-07 78.0\n2014-06-07 79.0\n2014-06-07 74.0\n2014-06-07 73.0\n2014-06-07 79.0\n2014-06-07 79.0\n2014-06-08 73.0\n2014-06-08 75.0\n2014-06-08 75.0\n2014-06-08 80.0\n2014-06-08 78.0\n2014-06-08 80.0\n2014-06-09 78.0\n2014-06-09 73.0\n2014-06-09 75.0\n2014-06-09 72.0\n2014-06-09 79.0\n2014-06-09 79.0\n2014-06-09 70.0\n2014-06-09 75.0\n2014-06-10 79.0\n2014-06-10 69.0\n2014-06-10 73.0\n2014-06-10 68.0\n2014-06-10 79.0\n2014-06-10 76.0\n2014-06-10 70.0\n2014-06-10 72.0\n2014-06-11 74.0\n2014-06-11 78.0\n2014-06-11 82.0\n2014-06-11 80.0\n2014-06-11 84.0\n2014-06-11 76.0\n2014-06-11 76.0\n2014-06-12 80.0\n2014-06-12 72.0\n2014-06-12 78.0\n2014-06-12 79.0\n2014-06-12 76.0\n2014-06-12 78.0\n2014-06-12 73.0\n2014-06-12 82.0\n2014-06-13 72.0\n2014-06-13 79.0\n2014-06-13 77.0\n2014-06-13 79.0\n2014-06-13 77.0\n2014-06-13 73.0\n2014-06-13 79.0\n2014-06-13 76.0\n2014-06-14 78.0\n2014-06-14 77.0\n2014-06-14 78.0\n2014-06-14 80.0\n2014-06-14 79.0\n2014-06-14 75.0\n2014-06-14 79.0\n2014-06-15 75.0\n2014-06-15 80.0\n2014-06-15 78.0\n2014-06-15 79.0\n2014-06-15 81.0\n2014-06-15 73.0\n2014-06-16 70.0\n2014-06-16 77.0\n2014-06-16 69.0\n2014-06-16 75.0\n2014-06-16 77.0\n2014-06-16 74.0\n2014-06-16 76.0\n2014-06-16 78.0\n2014-06-17 68.0\n2014-06-17 75.0\n2014-06-17 70.0\n2014-06-17 79.0\n2014-06-17 72.0\n2014-06-17 71.0\n2014-06-17 77.0\n2014-06-18 78.0\n2014-06-18 74.0\n2014-06-18 69.0\n2014-06-18 78.0\n2014-06-18 76.0\n2014-06-18 76.0\n2014-06-18 69.0\n2014-06-18 69.0\n2014-06-19 75.0\n2014-06-19 77.0\n2014-06-19 77.0\n2014-06-19 73.0\n2014-06-19 72.0\n2014-06-19 78.0\n2014-06-19 70.0\n2014-06-19 80.0\n2014-06-20 74.0\n2014-06-20 70.0\n2014-06-20 68.0\n2014-06-20 73.0\n2014-06-20 74.0\n2014-06-20 68.0\n2014-06-20 70.0\n2014-06-20 81.0\n2014-06-21 71.0\n2014-06-21 74.0\n2014-06-21 75.0\n2014-06-21 78.0\n2014-06-21 75.0\n2014-06-21 77.0\n2014-06-21 73.0\n2014-06-22 70.0\n2014-06-22 79.0\n2014-06-22 76.0\n2014-06-22 75.0\n2014-06-22 75.0\n2014-06-22 72.0\n2014-06-23 76.0\n2014-06-23 77.0\n2014-06-23 76.0\n2014-06-23 71.0\n2014-06-23 70.0\n2014-06-23 69.0\n2014-06-23 70.0\n2014-06-24 75.0\n2014-06-24 84.0\n2014-06-24 70.0\n2014-06-24 73.0\n2014-06-24 71.0\n2014-06-24 70.0\n2014-06-24 77.0\n2014-06-25 79.0\n2014-06-25 72.0\n2014-06-25 73.0\n2014-06-25 74.0\n2014-06-25 70.0\n2014-06-25 74.0\n2014-06-25 74.0\n2014-06-26 72.0\n2014-06-26 74.0\n2014-06-26 76.0\n2014-06-26 71.0\n2014-06-26 71.0\n2014-06-26 70.0\n2014-06-26 79.0\n2014-06-27 68.0\n2014-06-27 79.0\n2014-06-27 72.0\n2014-06-27 78.0\n2014-06-27 81.0\n2014-06-27 74.0\n2014-06-27 70.0\n2014-06-28 79.0\n2014-06-28 76.0\n2014-06-28 71.0\n2014-06-28 76.0\n2014-06-28 77.0\n2014-06-29 79.0\n2014-06-29 78.0\n2014-06-29 70.0\n2014-06-29 77.0\n2014-06-29 73.0\n2014-06-29 72.0\n2014-06-29 74.0\n2014-06-30 79.0\n2014-06-30 69.0\n2014-06-30 79.0\n2014-06-30 76.0\n2014-06-30 75.0\n2014-06-30 71.0\n2015-06-01 70.0\n2015-06-01 77.0\n2015-06-01 78.0\n2015-06-01 76.0\n2015-06-01 69.0\n2015-06-01 78.0\n2015-06-02 67.0\n2015-06-02 72.0\n2015-06-02 68.0\n2015-06-02 78.0\n2015-06-02 78.0\n2015-06-03 75.0\n2015-06-03 72.0\n2015-06-03 71.0\n2015-06-03 75.0\n2015-06-03 79.0\n2015-06-03 77.0\n2015-06-03 68.0\n2015-06-04 70.0\n2015-06-04 76.0\n2015-06-04 72.0\n2015-06-04 68.0\n2015-06-04 77.0\n2015-06-04 71.0\n2015-06-04 75.0\n2015-06-05 79.0\n2015-06-05 70.0\n2015-06-05 71.0\n2015-06-05 76.0\n2015-06-05 75.0\n2015-06-05 68.0\n2015-06-05 78.0\n2015-06-06 78.0\n2015-06-06 76.0\n2015-06-06 80.0\n2015-06-06 78.0\n2015-06-06 79.0\n2015-06-06 71.0\n2015-06-07 75.0\n2015-06-07 72.0\n2015-06-07 78.0\n2015-06-07 78.0\n2015-06-07 77.0\n2015-06-08 77.0\n2015-06-08 74.0\n2015-06-08 70.0\n2015-06-08 72.0\n2015-06-08 74.0\n2015-06-08 71.0\n2015-06-08 78.0\n2015-06-09 79.0\n2015-06-09 72.0\n2015-06-09 69.0\n2015-06-09 78.0\n2015-06-09 77.0\n2015-06-09 74.0\n2015-06-09 69.0\n2015-06-10 77.0\n2015-06-10 78.0\n2015-06-10 77.0\n2015-06-10 72.0\n2015-06-10 74.0\n2015-06-10 80.0\n2015-06-10 71.0\n2015-06-11 76.0\n2015-06-11 72.0\n2015-06-11 72.0\n2015-06-11 69.0\n2015-06-11 78.0\n2015-06-11 76.0\n2015-06-12 66.0\n2015-06-12 71.0\n2015-06-12 71.0\n2015-06-12 77.0\n2015-06-12 69.0\n2015-06-12 78.0\n2015-06-12 78.0\n2015-06-12 78.0\n2015-06-13 72.0\n2015-06-13 77.0\n2015-06-13 78.0\n2015-06-13 79.0\n2015-06-13 72.0\n2015-06-14 78.0\n2015-06-14 78.0\n2015-06-14 79.0\n2015-06-14 75.0\n2015-06-14 79.0\n2015-06-14 69.0\n2015-06-15 78.0\n2015-06-15 71.0\n2015-06-15 78.0\n2015-06-15 79.0\n2015-06-15 79.0\n2015-06-15 67.0\n2015-06-15 76.0\n2015-06-15 70.0\n2015-06-16 77.0\n2015-06-16 78.0\n2015-06-16 79.0\n2015-06-16 69.0\n2015-06-16 72.0\n2015-06-16 75.0\n2015-06-16 79.0\n2015-06-16 73.0\n2015-06-17 77.0\n2015-06-17 77.0\n2015-06-17 70.0\n2015-06-17 70.0\n2015-06-17 74.0\n2015-06-17 72.0\n2015-06-17 75.0\n2015-06-17 72.0\n2015-06-18 72.0\n2015-06-18 71.0\n2015-06-18 79.0\n2015-06-18 72.0\n2015-06-18 79.0\n2015-06-18 76.0\n2015-06-18 74.0\n2015-06-18 78.0\n2015-06-19 78.0\n2015-06-19 72.0\n2015-06-19 77.0\n2015-06-19 72.0\n2015-06-19 80.0\n2015-06-19 72.0\n2015-06-19 78.0\n2015-06-19 76.0\n2015-06-20 75.0\n2015-06-20 78.0\n2015-06-20 71.0\n2015-06-20 76.0\n2015-06-20 78.0\n2015-06-20 73.0\n2015-06-21 73.0\n2015-06-21 75.0\n2015-06-21 80.0\n2015-06-21 77.0\n2015-06-21 79.0\n2015-06-21 79.0\n2015-06-22 78.0\n2015-06-22 79.0\n2015-06-22 79.0\n2015-06-22 70.0\n2015-06-22 75.0\n2015-06-22 71.0\n2015-06-22 79.0\n2015-06-22 72.0\n2015-06-23 75.0\n2015-06-23 78.0\n2015-06-23 73.0\n2015-06-23 79.0\n2015-06-23 73.0\n2015-06-23 76.0\n2015-06-23 77.0\n2015-06-23 78.0\n2015-06-24 75.0\n2015-06-24 72.0\n2015-06-24 79.0\n2015-06-24 73.0\n2015-06-24 79.0\n2015-06-24 79.0\n2015-06-24 77.0\n2015-06-24 71.0\n2015-06-25 70.0\n2015-06-25 77.0\n2015-06-25 75.0\n2015-06-25 79.0\n2015-06-25 71.0\n2015-06-25 78.0\n2015-06-25 71.0\n2015-06-25 79.0\n2015-06-26 70.0\n2015-06-26 78.0\n2015-06-26 79.0\n2015-06-26 77.0\n2015-06-26 74.0\n2015-06-26 71.0\n2015-06-26 76.0\n2015-06-27 80.0\n2015-06-27 75.0\n2015-06-27 81.0\n2015-06-27 76.0\n2015-06-27 81.0\n2015-06-27 74.0\n2015-06-28 78.0\n2015-06-28 80.0\n2015-06-28 72.0\n2015-06-28 76.0\n2015-06-29 74.0\n2015-06-29 80.0\n2015-06-29 70.0\n2015-06-29 72.0\n2015-06-29 75.0\n2015-06-29 79.0\n2015-06-29 79.0\n2015-06-30 72.0\n2015-06-30 72.0\n2015-06-30 74.0\n2015-06-30 78.0\n2015-06-30 81.0\n2015-06-30 69.0\n2016-06-01 64.0\n2016-06-01 70.0\n2016-06-01 76.0\n2016-06-01 69.0\n2016-06-01 75.0\n2016-06-01 71.0\n2016-06-01 70.0\n2016-06-02 75.0\n2016-06-02 67.0\n2016-06-02 74.0\n2016-06-02 71.0\n2016-06-02 69.0\n2016-06-02 65.0\n2016-06-02 71.0\n2016-06-03 70.0\n2016-06-03 75.0\n2016-06-03 68.0\n2016-06-03 75.0\n2016-06-03 70.0\n2016-06-03 75.0\n2016-06-03 71.0\n2016-06-04 68.0\n2016-06-04 65.0\n2016-06-04 75.0\n2016-06-04 74.0\n2016-06-05 73.0\n2016-06-05 76.0\n2016-06-05 70.0\n2016-06-05 70.0\n2016-06-05 73.0\n2016-06-06 78.0\n2016-06-06 72.0\n2016-06-06 74.0\n2016-06-06 78.0\n2016-06-06 72.0\n2016-06-06 76.0\n2016-06-07 77.0\n2016-06-07 75.0\n2016-06-07 78.0\n2016-06-07 71.0\n2016-06-07 77.0\n2016-06-07 74.0\n2016-06-07 75.0\n2016-06-08 80.0\n2016-06-08 77.0\n2016-06-08 74.0\n2016-06-08 73.0\n2016-06-08 77.0\n2016-06-08 74.0\n2016-06-08 71.0\n2016-06-09 73.0\n2016-06-09 71.0\n2016-06-09 72.0\n2016-06-09 69.0\n2016-06-09 76.0\n2016-06-09 71.0\n2016-06-09 75.0\n2016-06-10 70.0\n2016-06-10 78.0\n2016-06-10 77.0\n2016-06-10 75.0\n2016-06-10 73.0\n2016-06-10 75.0\n2016-06-11 72.0\n2016-06-11 75.0\n2016-06-11 78.0\n2016-06-11 73.0\n2016-06-11 77.0\n2016-06-11 78.0\n2016-06-12 75.0\n2016-06-12 74.0\n2016-06-12 77.0\n2016-06-12 75.0\n2016-06-12 77.0\n2016-06-12 78.0\n2016-06-13 78.0\n2016-06-13 80.0\n2016-06-13 76.0\n2016-06-13 76.0\n2016-06-13 78.0\n2016-06-13 73.0\n2016-06-13 79.0\n2016-06-14 76.0\n2016-06-14 79.0\n2016-06-14 81.0\n2016-06-14 75.0\n2016-06-14 79.0\n2016-06-14 79.0\n2016-06-14 72.0\n2016-06-15 80.0\n2016-06-15 78.0\n2016-06-15 76.0\n2016-06-15 78.0\n2016-06-15 78.0\n2016-06-15 70.0\n2016-06-15 76.0\n2016-06-16 76.0\n2016-06-16 70.0\n2016-06-16 76.0\n2016-06-16 79.0\n2016-06-16 77.0\n2016-06-16 75.0\n2016-06-17 71.0\n2016-06-17 75.0\n2016-06-17 69.0\n2016-06-17 73.0\n2016-06-17 73.0\n2016-06-17 76.0\n2016-06-17 76.0\n2016-06-18 73.0\n2016-06-18 75.0\n2016-06-18 78.0\n2016-06-18 77.0\n2016-06-18 75.0\n2016-06-18 77.0\n2016-06-19 79.0\n2016-06-19 75.0\n2016-06-19 77.0\n2016-06-19 79.0\n2016-06-19 74.0\n2016-06-19 75.0\n2016-06-20 74.0\n2016-06-20 74.0\n2016-06-20 80.0\n2016-06-20 72.0\n2016-06-20 79.0\n2016-06-20 72.0\n2016-06-20 78.0\n2016-06-21 80.0\n2016-06-21 72.0\n2016-06-21 76.0\n2016-06-21 77.0\n2016-06-21 76.0\n2016-06-21 73.0\n2016-06-21 71.0\n2016-06-22 76.0\n2016-06-22 73.0\n2016-06-22 79.0\n2016-06-22 78.0\n2016-06-22 75.0\n2016-06-22 79.0\n2016-06-22 79.0\n2016-06-23 80.0\n2016-06-23 73.0\n2016-06-23 74.0\n2016-06-23 72.0\n2016-06-23 79.0\n2016-06-23 79.0\n2016-06-23 79.0\n2016-06-24 78.0\n2016-06-24 77.0\n2016-06-24 77.0\n2016-06-24 76.0\n2016-06-24 78.0\n2016-06-24 80.0\n2016-06-25 77.0\n2016-06-25 71.0\n2016-06-25 73.0\n2016-06-25 74.0\n2016-06-25 78.0\n2016-06-25 80.0\n2016-06-26 74.0\n2016-06-26 79.0\n2016-06-26 79.0\n2016-06-26 79.0\n2016-06-26 77.0\n2016-06-26 75.0\n2016-06-27 72.0\n2016-06-27 81.0\n2016-06-27 72.0\n2016-06-27 72.0\n2016-06-27 80.0\n2016-06-27 78.0\n2016-06-27 75.0\n2016-06-28 78.0\n2016-06-28 78.0\n2016-06-28 71.0\n2016-06-28 76.0\n2016-06-28 74.0\n2016-06-28 78.0\n2016-06-28 74.0\n2016-06-29 77.0\n2016-06-29 80.0\n2016-06-29 79.0\n2016-06-29 79.0\n2016-06-29 72.0\n2016-06-29 76.0\n2016-06-29 80.0\n2016-06-30 77.0\n2016-06-30 81.0\n2016-06-30 78.0\n2016-06-30 77.0\n2016-06-30 79.0\n2016-06-30 80.0\n2017-06-01 74.0\n2017-06-01 81.0\n2017-06-01 79.0\n2017-06-01 76.0\n2017-06-01 79.0\n2017-06-01 80.0\n2017-06-02 76.0\n2017-06-02 79.0\n2017-06-02 78.0\n2017-06-02 81.0\n2017-06-02 79.0\n2017-06-02 76.0\n2017-06-03 74.0\n2017-06-03 79.0\n2017-06-03 76.0\n2017-06-03 79.0\n2017-06-03 80.0\n2017-06-03 76.0\n2017-06-04 77.0\n2017-06-04 78.0\n2017-06-04 77.0\n2017-06-04 79.0\n2017-06-04 79.0\n2017-06-04 81.0\n2017-06-05 78.0\n2017-06-05 76.0\n2017-06-05 74.0\n2017-06-05 80.0\n2017-06-05 81.0\n2017-06-05 75.0\n2017-06-05 78.0\n2017-06-06 78.0\n2017-06-06 75.0\n2017-06-06 79.0\n2017-06-06 80.0\n2017-06-06 80.0\n2017-06-06 75.0\n2017-06-06 74.0\n2017-06-07 79.0\n2017-06-07 79.0\n2017-06-07 75.0\n2017-06-07 81.0\n2017-06-07 78.0\n2017-06-07 80.0\n2017-06-07 75.0\n2017-06-08 75.0\n2017-06-08 79.0\n2017-06-08 76.0\n2017-06-08 78.0\n2017-06-08 75.0\n2017-06-08 80.0\n2017-06-08 77.0\n2017-06-09 80.0\n2017-06-09 81.0\n2017-06-09 72.0\n2017-06-09 78.0\n2017-06-09 80.0\n2017-06-09 78.0\n2017-06-10 75.0\n2017-06-10 72.0\n2017-06-10 74.0\n2017-06-10 75.0\n2017-06-10 77.0\n2017-06-10 78.0\n2017-06-11 81.0\n2017-06-11 79.0\n2017-06-11 76.0\n2017-06-11 74.0\n2017-06-11 73.0\n2017-06-11 79.0\n2017-06-12 74.0\n2017-06-12 76.0\n2017-06-12 83.0\n2017-06-12 79.0\n2017-06-12 80.0\n2017-06-12 74.0\n2017-06-13 73.0\n2017-06-13 76.0\n2017-06-13 77.0\n2017-06-13 80.0\n2017-06-13 81.0\n2017-06-13 75.0\n2017-06-13 76.0\n2017-06-14 79.0\n2017-06-14 76.0\n2017-06-14 80.0\n2017-06-14 74.0\n2017-06-14 74.0\n2017-06-14 78.0\n2017-06-14 80.0\n2017-06-15 75.0\n2017-06-15 78.0\n2017-06-15 77.0\n2017-06-15 77.0\n2017-06-15 78.0\n2017-06-15 78.0\n2017-06-15 77.0\n2017-06-16 73.0\n2017-06-16 76.0\n2017-06-16 80.0\n2017-06-16 74.0\n2017-06-16 80.0\n2017-06-16 79.0\n2017-06-16 78.0\n2017-06-17 79.0\n2017-06-17 72.0\n2017-06-17 78.0\n2017-06-17 80.0\n2017-06-17 77.0\n2017-06-17 77.0\n2017-06-18 72.0\n2017-06-18 77.0\n2017-06-18 75.0\n2017-06-18 77.0\n2017-06-18 77.0\n2017-06-18 78.0\n2017-06-19 80.0\n2017-06-19 78.0\n2017-06-19 72.0\n2017-06-19 78.0\n2017-06-19 78.0\n2017-06-19 76.0\n2017-06-19 77.0\n2017-06-20 77.0\n2017-06-20 72.0\n2017-06-20 78.0\n2017-06-20 79.0\n2017-06-20 74.0\n2017-06-20 75.0\n2017-06-21 75.0\n2017-06-21 80.0\n2017-06-21 76.0\n2017-06-21 79.0\n2017-06-21 82.0\n2017-06-21 74.0\n2017-06-22 80.0\n2017-06-22 80.0\n2017-06-22 81.0\n2017-06-22 77.0\n2017-06-22 76.0\n2017-06-22 72.0\n2017-06-23 72.0\n2017-06-23 76.0\n2017-06-23 76.0\n2017-06-23 79.0\n2017-06-23 76.0\n2017-06-23 78.0\n2017-06-23 71.0\n2017-06-24 77.0\n2017-06-24 74.0\n2017-06-24 82.0\n2017-06-24 73.0\n2017-06-24 80.0\n2017-06-25 79.0\n2017-06-25 73.0\n2017-06-25 80.0\n2017-06-25 80.0\n2017-06-25 71.0\n2017-06-26 77.0\n2017-06-26 81.0\n2017-06-26 73.0\n2017-06-26 79.0\n2017-06-26 80.0\n2017-06-26 82.0\n2017-06-26 81.0\n2017-06-27 76.0\n2017-06-27 81.0\n2017-06-27 80.0\n2017-06-27 77.0\n2017-06-27 74.0\n2017-06-27 80.0\n2017-06-28 79.0\n2017-06-28 74.0\n2017-06-28 80.0\n2017-06-28 77.0\n2017-06-28 77.0\n2017-06-28 79.0\n2017-06-29 76.0\n2017-06-29 76.0\n2017-06-29 79.0\n2017-06-29 76.0\n2017-06-29 78.0\n2017-06-29 76.0\n2017-06-29 79.0\n2017-06-30 74.0\n2017-06-30 81.0\n2017-06-30 76.0\n2017-06-30 75.0\n2017-06-30 75.0\n2017-06-30 74.0\n2017-06-30 75.0\n"
],
[
"# 5. Calculate and print out the summary statistics for the June temperature DataFrame.\njune_df.describe()",
"_____no_output_____"
]
],
[
[
"## D2: Determine the Summary Statistics for December",
"_____no_output_____"
]
],
[
[
"# 6. Write a query that filters the Measurement table to retrieve the temperatures for the month of December.\ndec_results = session.query(Measurement.date, Measurement.tobs).filter(extract('month', Measurement.date) == 12)",
"_____no_output_____"
],
[
"# 7. Convert the December temperatures to a list.\ndecember = []\ndecember = session.query(Measurement.date, Measurement.tobs).filter(extract('month', Measurement.date) == 12).all()",
"_____no_output_____"
],
[
"# 8. Create a DataFrame from the list of temperatures for the month of December. \ndec_df = pd.DataFrame(december, columns = ['date','December Temperature'])\ndec_df.set_index(dec_df['date'], inplace=True)\ndec_df = dec_df.sort_index()\nprint(dec_df.to_string(index=False))",
" date December Temperature\n2010-12-01 76.0\n2010-12-01 73.0\n2010-12-01 72.0\n2010-12-01 78.0\n2010-12-01 72.0\n2010-12-01 70.0\n2010-12-01 71.0\n2010-12-01 73.0\n2010-12-02 71.0\n2010-12-02 71.0\n2010-12-02 73.0\n2010-12-02 75.0\n2010-12-02 74.0\n2010-12-02 72.0\n2010-12-02 74.0\n2010-12-02 70.0\n2010-12-03 67.0\n2010-12-03 74.0\n2010-12-03 70.0\n2010-12-03 74.0\n2010-12-03 71.0\n2010-12-03 74.0\n2010-12-03 72.0\n2010-12-03 73.0\n2010-12-04 75.0\n2010-12-04 72.0\n2010-12-04 77.0\n2010-12-04 74.0\n2010-12-04 74.0\n2010-12-04 78.0\n2010-12-04 74.0\n2010-12-05 66.0\n2010-12-05 69.0\n2010-12-05 71.0\n2010-12-05 78.0\n2010-12-05 73.0\n2010-12-05 73.0\n2010-12-05 69.0\n2010-12-06 65.0\n2010-12-06 61.0\n2010-12-06 61.0\n2010-12-06 64.0\n2010-12-06 66.0\n2010-12-06 78.0\n2010-12-06 66.0\n2010-12-06 61.0\n2010-12-06 64.0\n2010-12-07 58.0\n2010-12-07 71.0\n2010-12-07 69.0\n2010-12-07 64.0\n2010-12-07 76.0\n2010-12-07 66.0\n2010-12-07 62.0\n2010-12-07 64.0\n2010-12-07 66.0\n2010-12-08 60.0\n2010-12-08 65.0\n2010-12-08 63.0\n2010-12-08 67.0\n2010-12-08 67.0\n2010-12-08 73.0\n2010-12-08 62.0\n2010-12-08 66.0\n2010-12-08 68.0\n2010-12-09 77.0\n2010-12-09 71.0\n2010-12-09 75.0\n2010-12-09 76.0\n2010-12-09 73.0\n2010-12-09 70.0\n2010-12-09 74.0\n2010-12-09 77.0\n2010-12-09 77.0\n2010-12-10 64.0\n2010-12-10 63.0\n2010-12-10 65.0\n2010-12-10 68.0\n2010-12-10 64.0\n2010-12-10 65.0\n2010-12-10 66.0\n2010-12-10 67.0\n2010-12-10 65.0\n2010-12-11 64.0\n2010-12-11 72.0\n2010-12-11 72.0\n2010-12-11 69.0\n2010-12-11 69.0\n2010-12-11 69.0\n2010-12-11 67.0\n2010-12-11 72.0\n2010-12-12 70.0\n2010-12-12 68.0\n2010-12-12 65.0\n2010-12-12 75.0\n2010-12-12 69.0\n2010-12-12 71.0\n2010-12-12 70.0\n2010-12-13 70.0\n2010-12-13 69.0\n2010-12-13 69.0\n2010-12-13 66.0\n2010-12-13 80.0\n2010-12-13 68.0\n2010-12-13 68.0\n2010-12-13 75.0\n2010-12-13 67.0\n2010-12-14 71.0\n2010-12-14 71.0\n2010-12-14 71.0\n2010-12-14 67.0\n2010-12-14 74.0\n2010-12-14 74.0\n2010-12-14 69.0\n2010-12-14 78.0\n2010-12-14 71.0\n2010-12-15 74.0\n2010-12-15 70.0\n2010-12-15 74.0\n2010-12-15 72.0\n2010-12-15 74.0\n2010-12-15 73.0\n2010-12-15 71.0\n2010-12-15 72.0\n2010-12-15 66.0\n2010-12-16 68.0\n2010-12-16 68.0\n2010-12-16 65.0\n2010-12-16 68.0\n2010-12-16 67.0\n2010-12-16 65.0\n2010-12-16 68.0\n2010-12-16 66.0\n2010-12-16 63.0\n2010-12-17 66.0\n2010-12-17 68.0\n2010-12-17 68.0\n2010-12-17 63.0\n2010-12-17 66.0\n2010-12-17 66.0\n2010-12-17 64.0\n2010-12-17 68.0\n2010-12-18 68.0\n2010-12-18 65.0\n2010-12-18 69.0\n2010-12-18 77.0\n2010-12-18 70.0\n2010-12-18 65.0\n2010-12-18 69.0\n2010-12-19 67.0\n2010-12-19 70.0\n2010-12-19 71.0\n2010-12-19 70.0\n2010-12-19 69.0\n2010-12-19 69.0\n2010-12-19 69.0\n2010-12-20 72.0\n2010-12-20 71.0\n2010-12-20 73.0\n2010-12-20 73.0\n2010-12-20 68.0\n2010-12-20 70.0\n2010-12-21 71.0\n2010-12-21 71.0\n2010-12-21 69.0\n2010-12-21 70.0\n2010-12-21 72.0\n2010-12-21 70.0\n2010-12-22 68.0\n2010-12-22 69.0\n2010-12-22 67.0\n2010-12-22 71.0\n2010-12-22 68.0\n2010-12-22 71.0\n2010-12-23 70.0\n2010-12-23 73.0\n2010-12-23 69.0\n2010-12-23 67.0\n2010-12-23 71.0\n2010-12-23 67.0\n2010-12-23 71.0\n2010-12-24 71.0\n2010-12-24 69.0\n2010-12-24 67.0\n2010-12-24 70.0\n2010-12-24 70.0\n2010-12-24 69.0\n2010-12-24 69.0\n2010-12-25 73.0\n2010-12-25 74.0\n2010-12-25 71.0\n2010-12-25 69.0\n2010-12-25 70.0\n2010-12-26 72.0\n2010-12-26 72.0\n2010-12-26 74.0\n2010-12-26 71.0\n2010-12-26 74.0\n2010-12-26 68.0\n2010-12-26 71.0\n2010-12-27 76.0\n2010-12-27 76.0\n2010-12-27 71.0\n2010-12-27 72.0\n2010-12-27 76.0\n2010-12-27 76.0\n2010-12-27 74.0\n2010-12-28 70.0\n2010-12-28 72.0\n2010-12-28 72.0\n2010-12-28 70.0\n2010-12-28 71.0\n2010-12-28 71.0\n2010-12-28 69.0\n2010-12-29 73.0\n2010-12-29 71.0\n2010-12-29 73.0\n2010-12-29 72.0\n2010-12-29 75.0\n2010-12-29 74.0\n2010-12-29 71.0\n2010-12-30 70.0\n2010-12-30 70.0\n2010-12-30 70.0\n2010-12-30 73.0\n2010-12-30 73.0\n2010-12-30 75.0\n2010-12-30 75.0\n2010-12-31 69.0\n2010-12-31 74.0\n2010-12-31 75.0\n2010-12-31 76.0\n2010-12-31 77.0\n2010-12-31 75.0\n2010-12-31 72.0\n2011-12-01 69.0\n2011-12-01 69.0\n2011-12-01 71.0\n2011-12-01 66.0\n2011-12-01 70.0\n2011-12-01 67.0\n2011-12-01 69.0\n2011-12-01 69.0\n2011-12-02 69.0\n2011-12-02 68.0\n2011-12-02 68.0\n2011-12-02 73.0\n2011-12-02 69.0\n2011-12-02 69.0\n2011-12-02 69.0\n2011-12-02 68.0\n2011-12-03 73.0\n2011-12-03 71.0\n2011-12-03 75.0\n2011-12-03 73.0\n2011-12-03 69.0\n2011-12-03 72.0\n2011-12-03 74.0\n2011-12-04 73.0\n2011-12-04 70.0\n2011-12-04 74.0\n2011-12-04 74.0\n2011-12-04 71.0\n2011-12-04 73.0\n2011-12-04 71.0\n2011-12-05 71.0\n2011-12-05 71.0\n2011-12-05 73.0\n2011-12-05 73.0\n2011-12-05 71.0\n2011-12-05 68.0\n2011-12-05 71.0\n2011-12-05 73.0\n2011-12-06 73.0\n2011-12-06 70.0\n2011-12-06 67.0\n2011-12-06 72.0\n2011-12-06 72.0\n2011-12-06 73.0\n2011-12-06 70.0\n2011-12-06 72.0\n2011-12-07 72.0\n2011-12-07 71.0\n2011-12-07 70.0\n2011-12-07 70.0\n2011-12-07 69.0\n2011-12-07 68.0\n2011-12-07 65.0\n2011-12-07 73.0\n2011-12-08 72.0\n2011-12-08 70.0\n2011-12-08 71.0\n2011-12-08 73.0\n2011-12-08 71.0\n2011-12-08 70.0\n2011-12-08 72.0\n2011-12-08 67.0\n2011-12-09 65.0\n2011-12-09 70.0\n2011-12-09 70.0\n2011-12-09 68.0\n2011-12-09 71.0\n2011-12-09 74.0\n2011-12-09 66.0\n2011-12-10 72.0\n2011-12-10 75.0\n2011-12-10 72.0\n2011-12-10 72.0\n2011-12-10 72.0\n2011-12-10 73.0\n2011-12-10 69.0\n2011-12-11 71.0\n2011-12-11 73.0\n2011-12-11 71.0\n2011-12-11 70.0\n2011-12-11 73.0\n2011-12-11 72.0\n2011-12-12 70.0\n2011-12-12 74.0\n2011-12-12 76.0\n2011-12-12 72.0\n2011-12-12 74.0\n2011-12-12 73.0\n2011-12-12 69.0\n2011-12-12 77.0\n2011-12-13 67.0\n2011-12-13 72.0\n2011-12-13 69.0\n2011-12-13 71.0\n2011-12-13 71.0\n2011-12-13 69.0\n2011-12-13 70.0\n2011-12-13 73.0\n2011-12-14 71.0\n2011-12-14 71.0\n2011-12-14 68.0\n2011-12-14 73.0\n2011-12-14 71.0\n2011-12-14 65.0\n2011-12-14 70.0\n2011-12-14 70.0\n2011-12-15 69.0\n2011-12-15 70.0\n2011-12-15 73.0\n2011-12-15 68.0\n2011-12-15 74.0\n2011-12-15 72.0\n2011-12-15 73.0\n2011-12-15 72.0\n2011-12-16 66.0\n2011-12-16 69.0\n2011-12-16 71.0\n2011-12-16 71.0\n2011-12-16 73.0\n2011-12-16 70.0\n2011-12-16 69.0\n2011-12-16 71.0\n2011-12-17 71.0\n2011-12-17 71.0\n2011-12-17 69.0\n2011-12-17 73.0\n2011-12-17 67.0\n2011-12-17 73.0\n2011-12-18 74.0\n2011-12-18 71.0\n2011-12-18 69.0\n2011-12-18 69.0\n2011-12-18 72.0\n2011-12-18 73.0\n2011-12-18 72.0\n2011-12-19 72.0\n2011-12-19 68.0\n2011-12-19 74.0\n2011-12-19 72.0\n2011-12-19 76.0\n2011-12-19 73.0\n2011-12-19 70.0\n2011-12-20 74.0\n2011-12-20 70.0\n2011-12-20 72.0\n2011-12-20 72.0\n2011-12-20 71.0\n2011-12-20 72.0\n2011-12-20 70.0\n2011-12-21 71.0\n2011-12-21 72.0\n2011-12-21 69.0\n2011-12-21 69.0\n2011-12-21 69.0\n2011-12-21 68.0\n2011-12-21 72.0\n2011-12-22 69.0\n2011-12-22 71.0\n2011-12-22 70.0\n2011-12-22 72.0\n2011-12-22 72.0\n2011-12-22 71.0\n2011-12-22 73.0\n2011-12-23 69.0\n2011-12-23 72.0\n2011-12-23 71.0\n2011-12-23 71.0\n2011-12-23 73.0\n2011-12-23 70.0\n2011-12-24 72.0\n2011-12-24 70.0\n2011-12-24 69.0\n2011-12-24 70.0\n2011-12-24 71.0\n2011-12-24 65.0\n2011-12-24 70.0\n2011-12-25 73.0\n2011-12-25 73.0\n2011-12-25 73.0\n2011-12-25 69.0\n2011-12-25 74.0\n2011-12-25 75.0\n2011-12-26 71.0\n2011-12-26 73.0\n2011-12-26 69.0\n2011-12-26 72.0\n2011-12-26 75.0\n2011-12-26 72.0\n2011-12-26 71.0\n2011-12-27 72.0\n2011-12-27 71.0\n2011-12-27 70.0\n2011-12-27 69.0\n2011-12-27 72.0\n2011-12-27 76.0\n2011-12-27 71.0\n2011-12-28 69.0\n2011-12-28 72.0\n2011-12-28 71.0\n2011-12-28 72.0\n2011-12-28 73.0\n2011-12-28 69.0\n2011-12-29 72.0\n2011-12-29 73.0\n2011-12-29 70.0\n2011-12-29 71.0\n2011-12-29 77.0\n2011-12-29 73.0\n2011-12-29 70.0\n2011-12-30 73.0\n2011-12-30 70.0\n2011-12-30 66.0\n2011-12-30 72.0\n2011-12-30 71.0\n2011-12-30 65.0\n2011-12-30 71.0\n2011-12-31 67.0\n2011-12-31 67.0\n2011-12-31 67.0\n2011-12-31 63.0\n2011-12-31 65.0\n2011-12-31 65.0\n2011-12-31 71.0\n2012-12-01 65.0\n2012-12-01 72.0\n2012-12-01 74.0\n2012-12-01 68.0\n2012-12-01 77.0\n2012-12-01 65.0\n2012-12-02 70.0\n2012-12-02 72.0\n2012-12-02 72.0\n2012-12-02 71.0\n2012-12-02 77.0\n2012-12-02 67.0\n2012-12-03 78.0\n2012-12-03 78.0\n2012-12-03 76.0\n2012-12-03 75.0\n2012-12-03 79.0\n2012-12-03 74.0\n2012-12-03 76.0\n2012-12-03 74.0\n2012-12-03 76.0\n2012-12-04 73.0\n2012-12-04 73.0\n2012-12-04 68.0\n2012-12-04 67.0\n2012-12-04 68.0\n2012-12-04 71.0\n2012-12-04 76.0\n2012-12-04 70.0\n2012-12-05 72.0\n2012-12-05 73.0\n2012-12-05 72.0\n2012-12-05 74.0\n2012-12-05 71.0\n2012-12-05 72.0\n2012-12-05 71.0\n2012-12-05 76.0\n2012-12-06 71.0\n2012-12-06 73.0\n2012-12-06 73.0\n2012-12-06 69.0\n2012-12-06 68.0\n2012-12-06 64.0\n2012-12-06 75.0\n2012-12-06 71.0\n2012-12-06 69.0\n2012-12-07 68.0\n2012-12-07 68.0\n2012-12-07 72.0\n2012-12-07 71.0\n2012-12-07 75.0\n2012-12-07 66.0\n2012-12-07 69.0\n2012-12-07 71.0\n2012-12-08 76.0\n2012-12-08 69.0\n2012-12-08 75.0\n2012-12-08 69.0\n2012-12-08 70.0\n2012-12-08 69.0\n2012-12-09 73.0\n2012-12-09 80.0\n2012-12-09 74.0\n2012-12-09 75.0\n2012-12-09 71.0\n2012-12-09 74.0\n2012-12-10 72.0\n2012-12-10 72.0\n2012-12-10 76.0\n2012-12-10 76.0\n2012-12-10 73.0\n2012-12-10 73.0\n2012-12-10 76.0\n2012-12-10 75.0\n2012-12-11 74.0\n2012-12-11 73.0\n2012-12-11 72.0\n2012-12-11 72.0\n2012-12-11 72.0\n2012-12-11 74.0\n2012-12-11 75.0\n2012-12-11 76.0\n2012-12-12 73.0\n2012-12-12 73.0\n2012-12-12 67.0\n2012-12-12 72.0\n2012-12-12 73.0\n2012-12-12 71.0\n2012-12-12 74.0\n2012-12-12 73.0\n2012-12-13 70.0\n2012-12-13 73.0\n2012-12-13 73.0\n2012-12-13 72.0\n2012-12-13 73.0\n2012-12-13 67.0\n2012-12-13 72.0\n2012-12-13 68.0\n2012-12-14 74.0\n2012-12-14 73.0\n2012-12-14 72.0\n2012-12-14 73.0\n2012-12-14 69.0\n2012-12-14 74.0\n2012-12-14 74.0\n2012-12-14 74.0\n2012-12-15 74.0\n2012-12-15 73.0\n2012-12-15 66.0\n2012-12-15 73.0\n2012-12-15 71.0\n2012-12-15 75.0\n2012-12-16 73.0\n2012-12-16 73.0\n2012-12-16 76.0\n2012-12-16 74.0\n2012-12-16 66.0\n2012-12-16 72.0\n2012-12-17 73.0\n2012-12-17 69.0\n2012-12-17 72.0\n2012-12-17 73.0\n2012-12-17 73.0\n2012-12-17 74.0\n2012-12-17 72.0\n2012-12-18 71.0\n2012-12-18 72.0\n2012-12-18 77.0\n2012-12-18 66.0\n2012-12-18 72.0\n2012-12-18 70.0\n2012-12-18 74.0\n2012-12-19 73.0\n2012-12-19 72.0\n2012-12-19 70.0\n2012-12-19 68.0\n2012-12-19 70.0\n2012-12-19 76.0\n2012-12-19 74.0\n2012-12-20 70.0\n2012-12-20 70.0\n2012-12-20 71.0\n2012-12-20 65.0\n2012-12-20 71.0\n2012-12-20 71.0\n2012-12-21 70.0\n2012-12-21 72.0\n2012-12-21 75.0\n2012-12-21 68.0\n2012-12-21 70.0\n2012-12-21 73.0\n2012-12-21 71.0\n2012-12-22 71.0\n2012-12-22 72.0\n2012-12-22 77.0\n2012-12-22 67.0\n2012-12-22 71.0\n2012-12-22 72.0\n2012-12-22 76.0\n2012-12-23 64.0\n2012-12-23 65.0\n2012-12-23 61.0\n2012-12-23 65.0\n2012-12-23 71.0\n2012-12-23 66.0\n2012-12-24 74.0\n2012-12-24 65.0\n2012-12-24 62.0\n2012-12-24 74.0\n2012-12-24 65.0\n2012-12-24 65.0\n2012-12-24 66.0\n2012-12-25 73.0\n2012-12-25 73.0\n2012-12-25 73.0\n2012-12-25 74.0\n2012-12-25 71.0\n2012-12-26 69.0\n2012-12-26 76.0\n2012-12-26 72.0\n2012-12-26 77.0\n2012-12-26 71.0\n2012-12-26 71.0\n2012-12-26 73.0\n2012-12-27 72.0\n2012-12-27 72.0\n2012-12-27 69.0\n2012-12-27 74.0\n2012-12-27 76.0\n2012-12-27 70.0\n2012-12-27 63.0\n2012-12-28 65.0\n2012-12-28 75.0\n2012-12-28 62.0\n2012-12-28 64.0\n2012-12-28 62.0\n2012-12-28 69.0\n2012-12-28 65.0\n2012-12-28 69.0\n2012-12-29 65.0\n2012-12-29 66.0\n2012-12-29 62.0\n2012-12-29 78.0\n2012-12-29 64.0\n2012-12-29 69.0\n2012-12-30 67.0\n2012-12-30 68.0\n2012-12-30 69.0\n2012-12-30 76.0\n2012-12-30 69.0\n2012-12-30 68.0\n2012-12-31 65.0\n2012-12-31 66.0\n2012-12-31 66.0\n2012-12-31 68.0\n2012-12-31 67.0\n2012-12-31 68.0\n2012-12-31 76.0\n2013-12-01 76.0\n2013-12-01 77.0\n2013-12-01 76.0\n2013-12-01 72.0\n2013-12-01 70.0\n2013-12-01 68.0\n2013-12-02 71.0\n2013-12-02 73.0\n2013-12-02 68.0\n2013-12-02 74.0\n2013-12-02 68.0\n2013-12-02 70.0\n2013-12-02 73.0\n2013-12-02 67.0\n2013-12-03 70.0\n2013-12-03 71.0\n2013-12-03 79.0\n2013-12-03 71.0\n2013-12-03 75.0\n2013-12-03 72.0\n2013-12-03 75.0\n2013-12-03 68.0\n2013-12-04 69.0\n2013-12-04 68.0\n2013-12-04 69.0\n2013-12-04 69.0\n2013-12-04 73.0\n2013-12-04 66.0\n2013-12-04 70.0\n2013-12-04 66.0\n2013-12-05 64.0\n2013-12-05 64.0\n2013-12-05 68.0\n2013-12-05 68.0\n2013-12-05 75.0\n2013-12-05 67.0\n2013-12-05 67.0\n2013-12-05 73.0\n2013-12-06 74.0\n2013-12-06 67.0\n2013-12-06 68.0\n2013-12-06 69.0\n2013-12-06 69.0\n2013-12-06 70.0\n2013-12-06 65.0\n2013-12-06 70.0\n2013-12-07 71.0\n2013-12-07 69.0\n2013-12-07 73.0\n2013-12-07 69.0\n2013-12-07 78.0\n2013-12-07 70.0\n2013-12-08 73.0\n2013-12-08 83.0\n2013-12-08 69.0\n2013-12-08 72.0\n2013-12-08 78.0\n2013-12-08 70.0\n2013-12-08 74.0\n2013-12-09 69.0\n2013-12-09 78.0\n2013-12-09 72.0\n2013-12-09 68.0\n2013-12-09 70.0\n2013-12-09 67.0\n2013-12-09 72.0\n2013-12-09 73.0\n2013-12-10 75.0\n2013-12-10 76.0\n2013-12-10 72.0\n2013-12-10 70.0\n2013-12-10 75.0\n2013-12-10 73.0\n2013-12-10 76.0\n2013-12-10 74.0\n2013-12-11 75.0\n2013-12-11 74.0\n2013-12-11 69.0\n2013-12-11 70.0\n2013-12-11 76.0\n2013-12-11 75.0\n2013-12-11 75.0\n2013-12-11 70.0\n2013-12-12 68.0\n2013-12-12 67.0\n2013-12-12 75.0\n2013-12-12 69.0\n2013-12-12 72.0\n2013-12-12 76.0\n2013-12-12 72.0\n2013-12-12 65.0\n2013-12-13 71.0\n2013-12-13 69.0\n2013-12-13 76.0\n2013-12-13 74.0\n2013-12-13 71.0\n2013-12-13 75.0\n2013-12-13 71.0\n2013-12-13 71.0\n2013-12-14 73.0\n2013-12-14 73.0\n2013-12-14 77.0\n2013-12-14 71.0\n2013-12-14 78.0\n2013-12-14 71.0\n2013-12-14 72.0\n2013-12-15 69.0\n2013-12-15 69.0\n2013-12-15 71.0\n2013-12-15 73.0\n2013-12-15 68.0\n2013-12-15 69.0\n2013-12-16 69.0\n2013-12-16 72.0\n2013-12-16 65.0\n2013-12-16 64.0\n2013-12-16 63.0\n2013-12-16 68.0\n2013-12-16 68.0\n2013-12-16 67.0\n2013-12-17 74.0\n2013-12-17 72.0\n2013-12-17 70.0\n2013-12-17 64.0\n2013-12-17 63.0\n2013-12-17 72.0\n2013-12-17 73.0\n2013-12-17 68.0\n2013-12-17 65.0\n2013-12-18 73.0\n2013-12-18 72.0\n2013-12-18 70.0\n2013-12-18 66.0\n2013-12-18 72.0\n2013-12-18 70.0\n2013-12-18 72.0\n2013-12-18 63.0\n2013-12-19 69.0\n2013-12-19 68.0\n2013-12-19 69.0\n2013-12-19 72.0\n2013-12-19 71.0\n2013-12-19 71.0\n2013-12-19 70.0\n2013-12-19 70.0\n2013-12-19 72.0\n2013-12-20 69.0\n2013-12-20 79.0\n2013-12-20 73.0\n2013-12-20 70.0\n2013-12-20 75.0\n2013-12-20 70.0\n2013-12-20 72.0\n2013-12-20 75.0\n2013-12-20 69.0\n2013-12-21 69.0\n2013-12-21 76.0\n2013-12-21 75.0\n2013-12-21 67.0\n2013-12-21 74.0\n2013-12-21 71.0\n2013-12-21 71.0\n2013-12-22 72.0\n2013-12-22 69.0\n2013-12-22 67.0\n2013-12-22 67.0\n2013-12-22 69.0\n2013-12-23 73.0\n2013-12-23 64.0\n2013-12-23 72.0\n2013-12-23 68.0\n2013-12-23 69.0\n2013-12-23 74.0\n2013-12-23 72.0\n2013-12-23 73.0\n2013-12-23 71.0\n2013-12-24 70.0\n2013-12-24 74.0\n2013-12-24 70.0\n2013-12-24 73.0\n2013-12-24 63.0\n2013-12-24 75.0\n2013-12-24 70.0\n2013-12-24 75.0\n2013-12-25 73.0\n2013-12-25 67.0\n2013-12-25 73.0\n2013-12-25 68.0\n2013-12-25 73.0\n2013-12-25 76.0\n2013-12-26 74.0\n2013-12-26 71.0\n2013-12-26 70.0\n2013-12-26 72.0\n2013-12-26 67.0\n2013-12-26 74.0\n2013-12-26 77.0\n2013-12-26 68.0\n2013-12-27 70.0\n2013-12-27 75.0\n2013-12-27 71.0\n2013-12-27 73.0\n2013-12-27 69.0\n2013-12-27 79.0\n2013-12-27 73.0\n2013-12-27 74.0\n2013-12-28 75.0\n2013-12-28 73.0\n2013-12-28 71.0\n2013-12-28 68.0\n2013-12-28 73.0\n2013-12-28 74.0\n2013-12-28 74.0\n2013-12-28 74.0\n2013-12-29 75.0\n2013-12-29 71.0\n2013-12-29 68.0\n2013-12-29 70.0\n2013-12-29 72.0\n2013-12-29 73.0\n2013-12-29 73.0\n2013-12-30 64.0\n2013-12-30 72.0\n2013-12-30 72.0\n2013-12-30 70.0\n2013-12-30 69.0\n2013-12-30 72.0\n2013-12-30 74.0\n2013-12-31 65.0\n2013-12-31 67.0\n2013-12-31 75.0\n2013-12-31 71.0\n2013-12-31 68.0\n2013-12-31 73.0\n2014-12-01 73.0\n2014-12-01 68.0\n2014-12-01 74.0\n2014-12-01 72.0\n2014-12-01 70.0\n2014-12-01 72.0\n2014-12-01 70.0\n2014-12-01 74.0\n2014-12-02 73.0\n2014-12-02 70.0\n2014-12-02 75.0\n2014-12-02 72.0\n2014-12-02 62.0\n2014-12-02 74.0\n2014-12-02 71.0\n2014-12-02 68.0\n2014-12-03 68.0\n2014-12-03 68.0\n2014-12-03 71.0\n2014-12-03 74.0\n2014-12-03 73.0\n2014-12-03 67.0\n2014-12-03 72.0\n2014-12-03 69.0\n2014-12-04 67.0\n2014-12-04 72.0\n2014-12-04 70.0\n2014-12-04 67.0\n2014-12-04 73.0\n2014-12-04 77.0\n2014-12-04 66.0\n2014-12-04 72.0\n2014-12-05 70.0\n2014-12-05 73.0\n2014-12-05 71.0\n2014-12-05 70.0\n2014-12-05 76.0\n2014-12-05 68.0\n2014-12-05 75.0\n2014-12-05 70.0\n2014-12-06 70.0\n2014-12-06 66.0\n2014-12-06 65.0\n2014-12-06 76.0\n2014-12-06 72.0\n2014-12-06 68.0\n2014-12-06 76.0\n2014-12-07 66.0\n2014-12-07 64.0\n2014-12-07 62.0\n2014-12-07 67.0\n2014-12-07 65.0\n2014-12-07 67.0\n2014-12-08 62.0\n2014-12-08 69.0\n2014-12-08 77.0\n2014-12-08 69.0\n2014-12-08 62.0\n2014-12-08 78.0\n2014-12-08 67.0\n2014-12-09 70.0\n2014-12-09 70.0\n2014-12-09 70.0\n2014-12-09 69.0\n2014-12-09 65.0\n2014-12-09 64.0\n2014-12-09 75.0\n2014-12-09 76.0\n2014-12-10 76.0\n2014-12-10 75.0\n2014-12-10 63.0\n2014-12-10 67.0\n2014-12-10 78.0\n2014-12-10 70.0\n2014-12-10 61.0\n2014-12-10 72.0\n2014-12-11 69.0\n2014-12-11 74.0\n2014-12-11 70.0\n2014-12-11 74.0\n2014-12-11 75.0\n2014-12-11 71.0\n2014-12-11 74.0\n2014-12-11 74.0\n2014-12-12 70.0\n2014-12-12 71.0\n2014-12-12 71.0\n2014-12-12 68.0\n2014-12-12 64.0\n2014-12-12 68.0\n2014-12-12 71.0\n2014-12-13 74.0\n2014-12-13 67.0\n2014-12-13 71.0\n2014-12-13 74.0\n2014-12-13 73.0\n2014-12-13 78.0\n2014-12-14 73.0\n2014-12-14 68.0\n2014-12-14 70.0\n2014-12-14 67.0\n2014-12-14 71.0\n2014-12-14 71.0\n2014-12-15 72.0\n2014-12-15 72.0\n2014-12-15 70.0\n2014-12-15 65.0\n2014-12-15 71.0\n2014-12-15 77.0\n2014-12-15 72.0\n2014-12-15 71.0\n2014-12-16 71.0\n2014-12-16 69.0\n2014-12-16 74.0\n2014-12-16 74.0\n2014-12-16 72.0\n2014-12-16 74.0\n2014-12-16 75.0\n2014-12-16 66.0\n2014-12-17 72.0\n2014-12-17 75.0\n2014-12-17 71.0\n2014-12-17 74.0\n2014-12-17 71.0\n2014-12-17 70.0\n2014-12-17 77.0\n2014-12-17 64.0\n2014-12-18 74.0\n2014-12-18 71.0\n2014-12-18 74.0\n2014-12-18 75.0\n2014-12-18 71.0\n2014-12-18 73.0\n2014-12-18 69.0\n2014-12-18 72.0\n2014-12-19 68.0\n2014-12-19 70.0\n2014-12-19 78.0\n2014-12-19 75.0\n2014-12-19 75.0\n2014-12-19 76.0\n2014-12-19 71.0\n2014-12-19 76.0\n2014-12-20 78.0\n2014-12-20 78.0\n2014-12-20 81.0\n2014-12-20 72.0\n2014-12-20 76.0\n2014-12-20 74.0\n2014-12-21 72.0\n2014-12-21 70.0\n2014-12-21 76.0\n2014-12-21 72.0\n2014-12-21 78.0\n2014-12-22 77.0\n2014-12-22 67.0\n2014-12-22 73.0\n2014-12-22 69.0\n2014-12-22 72.0\n2014-12-22 74.0\n2014-12-22 68.0\n2014-12-22 68.0\n2014-12-23 70.0\n2014-12-23 69.0\n2014-12-23 73.0\n2014-12-23 68.0\n2014-12-23 72.0\n2014-12-23 71.0\n2014-12-23 70.0\n2014-12-23 76.0\n2014-12-24 66.0\n2014-12-24 68.0\n2014-12-24 70.0\n2014-12-24 69.0\n2014-12-24 68.0\n2014-12-24 64.0\n2014-12-24 66.0\n2014-12-25 61.0\n2014-12-25 62.0\n2014-12-25 76.0\n2014-12-25 66.0\n2014-12-25 60.0\n2014-12-25 66.0\n2014-12-25 77.0\n2014-12-26 76.0\n2014-12-26 66.0\n2014-12-26 63.0\n2014-12-26 63.0\n2014-12-26 70.0\n2014-12-26 65.0\n2014-12-26 63.0\n2014-12-27 68.0\n2014-12-27 62.0\n2014-12-27 72.0\n2014-12-27 63.0\n2014-12-27 62.0\n2014-12-27 65.0\n2014-12-28 67.0\n2014-12-28 63.0\n2014-12-28 76.0\n2014-12-28 68.0\n2014-12-28 69.0\n2014-12-28 64.0\n2014-12-29 57.0\n2014-12-29 63.0\n2014-12-29 67.0\n2014-12-29 64.0\n2014-12-29 72.0\n2014-12-29 68.0\n2014-12-29 63.0\n2014-12-29 69.0\n2014-12-30 63.0\n2014-12-30 66.0\n2014-12-30 75.0\n2014-12-30 64.0\n2014-12-30 63.0\n2014-12-30 67.0\n2014-12-31 72.0\n2014-12-31 56.0\n2014-12-31 59.0\n2014-12-31 57.0\n2014-12-31 56.0\n2014-12-31 64.0\n2015-12-01 69.0\n2015-12-01 75.0\n2015-12-01 73.0\n2015-12-01 78.0\n2015-12-01 73.0\n2015-12-02 76.0\n2015-12-02 74.0\n2015-12-02 74.0\n2015-12-02 70.0\n2015-12-02 77.0\n2015-12-02 65.0\n2015-12-02 76.0\n2015-12-03 71.0\n2015-12-03 75.0\n2015-12-03 76.0\n2015-12-03 75.0\n2015-12-03 75.0\n2015-12-03 75.0\n2015-12-03 76.0\n2015-12-04 80.0\n2015-12-04 74.0\n2015-12-04 75.0\n2015-12-04 71.0\n2015-12-04 81.0\n2015-12-04 75.0\n2015-12-04 77.0\n2015-12-05 76.0\n2015-12-05 70.0\n2015-12-05 75.0\n2015-12-05 70.0\n2015-12-05 72.0\n2015-12-06 71.0\n2015-12-06 72.0\n2015-12-06 75.0\n2015-12-06 73.0\n2015-12-07 69.0\n2015-12-07 69.0\n2015-12-07 69.0\n2015-12-07 71.0\n2015-12-07 70.0\n2015-12-07 79.0\n2015-12-08 70.0\n2015-12-08 71.0\n2015-12-08 70.0\n2015-12-08 73.0\n2015-12-08 68.0\n2015-12-08 71.0\n2015-12-08 69.0\n2015-12-09 77.0\n2015-12-09 72.0\n2015-12-09 79.0\n2015-12-09 74.0\n2015-12-09 73.0\n2015-12-09 71.0\n2015-12-09 74.0\n2015-12-10 70.0\n2015-12-10 71.0\n2015-12-10 74.0\n2015-12-10 74.0\n2015-12-10 72.0\n2015-12-10 77.0\n2015-12-10 74.0\n2015-12-11 72.0\n2015-12-11 77.0\n2015-12-11 75.0\n2015-12-11 75.0\n2015-12-11 78.0\n2015-12-11 75.0\n2015-12-12 76.0\n2015-12-12 71.0\n2015-12-12 72.0\n2015-12-12 72.0\n2015-12-12 72.0\n2015-12-12 79.0\n2015-12-13 70.0\n2015-12-13 69.0\n2015-12-13 78.0\n2015-12-13 69.0\n2015-12-13 69.0\n2015-12-13 78.0\n2015-12-14 69.0\n2015-12-14 67.0\n2015-12-14 77.0\n2015-12-14 66.0\n2015-12-14 69.0\n2015-12-15 73.0\n2015-12-15 75.0\n2015-12-15 79.0\n2015-12-15 74.0\n2015-12-15 75.0\n2015-12-15 74.0\n2015-12-16 75.0\n2015-12-16 75.0\n2015-12-16 75.0\n2015-12-16 82.0\n2015-12-16 73.0\n2015-12-16 74.0\n2015-12-16 75.0\n2015-12-17 69.0\n2015-12-17 74.0\n2015-12-17 72.0\n2015-12-17 74.0\n2015-12-17 75.0\n2015-12-17 74.0\n2015-12-17 77.0\n2015-12-18 75.0\n2015-12-18 73.0\n2015-12-18 71.0\n2015-12-18 74.0\n2015-12-18 72.0\n2015-12-18 76.0\n2015-12-18 75.0\n2015-12-19 78.0\n2015-12-19 68.0\n2015-12-19 76.0\n2015-12-19 76.0\n2015-12-19 76.0\n2015-12-19 78.0\n2015-12-20 72.0\n2015-12-20 73.0\n2015-12-20 74.0\n2015-12-20 74.0\n2015-12-20 75.0\n2015-12-21 75.0\n2015-12-21 75.0\n2015-12-21 78.0\n2015-12-21 75.0\n2015-12-22 75.0\n2015-12-22 74.0\n2015-12-22 71.0\n2015-12-22 74.0\n2015-12-22 77.0\n2015-12-22 74.0\n2015-12-23 70.0\n2015-12-23 78.0\n2015-12-23 73.0\n2015-12-23 75.0\n2015-12-23 74.0\n2015-12-23 73.0\n2015-12-24 76.0\n2015-12-24 74.0\n2015-12-24 68.0\n2015-12-24 77.0\n2015-12-24 70.0\n2015-12-24 72.0\n2015-12-25 73.0\n2015-12-25 71.0\n2015-12-25 79.0\n2015-12-25 70.0\n2015-12-25 78.0\n2015-12-25 74.0\n2015-12-26 69.0\n2015-12-26 75.0\n2015-12-26 75.0\n2015-12-26 74.0\n2015-12-26 72.0\n2015-12-27 74.0\n2015-12-27 76.0\n2015-12-27 71.0\n2015-12-27 74.0\n2015-12-27 76.0\n2015-12-27 74.0\n2015-12-27 75.0\n2015-12-28 74.0\n2015-12-28 68.0\n2015-12-28 76.0\n2015-12-28 73.0\n2015-12-28 77.0\n2015-12-28 75.0\n2015-12-29 70.0\n2015-12-29 76.0\n2015-12-29 72.0\n2015-12-29 69.0\n2015-12-29 67.0\n2015-12-29 67.0\n2015-12-30 74.0\n2015-12-30 71.0\n2015-12-30 74.0\n2015-12-30 70.0\n2015-12-31 70.0\n2015-12-31 69.0\n2015-12-31 72.0\n2015-12-31 73.0\n2015-12-31 69.0\n2016-12-01 71.0\n2016-12-01 72.0\n2016-12-01 73.0\n2016-12-01 75.0\n2016-12-01 75.0\n2016-12-01 74.0\n2016-12-01 76.0\n2016-12-02 71.0\n2016-12-02 77.0\n2016-12-02 70.0\n2016-12-02 72.0\n2016-12-02 75.0\n2016-12-02 71.0\n2016-12-02 70.0\n2016-12-03 71.0\n2016-12-03 72.0\n2016-12-03 75.0\n2016-12-03 75.0\n2016-12-03 67.0\n2016-12-03 69.0\n2016-12-04 71.0\n2016-12-04 69.0\n2016-12-04 70.0\n2016-12-04 77.0\n2016-12-04 76.0\n2016-12-04 75.0\n2016-12-05 68.0\n2016-12-05 67.0\n2016-12-05 70.0\n2016-12-05 68.0\n2016-12-05 68.0\n2016-12-05 67.0\n2016-12-05 76.0\n2016-12-06 67.0\n2016-12-06 70.0\n2016-12-06 74.0\n2016-12-06 67.0\n2016-12-06 67.0\n2016-12-06 70.0\n2016-12-07 69.0\n2016-12-07 68.0\n2016-12-07 71.0\n2016-12-07 76.0\n2016-12-07 74.0\n2016-12-07 69.0\n2016-12-07 71.0\n2016-12-08 73.0\n2016-12-08 75.0\n2016-12-08 71.0\n2016-12-08 73.0\n2016-12-08 70.0\n2016-12-08 70.0\n2016-12-08 72.0\n2016-12-09 70.0\n2016-12-09 68.0\n2016-12-09 70.0\n2016-12-09 68.0\n2016-12-09 72.0\n2016-12-09 70.0\n2016-12-10 76.0\n2016-12-10 70.0\n2016-12-10 69.0\n2016-12-10 72.0\n2016-12-10 71.0\n2016-12-10 68.0\n2016-12-11 69.0\n2016-12-11 78.0\n2016-12-11 70.0\n2016-12-11 69.0\n2016-12-11 72.0\n2016-12-12 66.0\n2016-12-12 66.0\n2016-12-12 68.0\n2016-12-12 67.0\n2016-12-12 70.0\n2016-12-12 65.0\n2016-12-12 75.0\n2016-12-13 72.0\n2016-12-13 68.0\n2016-12-13 70.0\n2016-12-13 67.0\n2016-12-13 68.0\n2016-12-13 69.0\n2016-12-13 65.0\n2016-12-14 68.0\n2016-12-14 71.0\n2016-12-14 68.0\n2016-12-14 70.0\n2016-12-14 70.0\n2016-12-14 69.0\n2016-12-14 72.0\n2016-12-15 64.0\n2016-12-15 70.0\n2016-12-15 62.0\n2016-12-15 68.0\n2016-12-15 66.0\n2016-12-15 75.0\n2016-12-15 74.0\n2016-12-16 75.0\n2016-12-16 63.0\n2016-12-16 60.0\n2016-12-16 63.0\n2016-12-16 69.0\n2016-12-16 62.0\n2016-12-16 66.0\n2016-12-17 75.0\n2016-12-17 70.0\n2016-12-17 71.0\n2016-12-17 76.0\n2016-12-17 71.0\n2016-12-18 73.0\n2016-12-18 69.0\n2016-12-18 74.0\n2016-12-18 67.0\n2016-12-18 67.0\n2016-12-18 76.0\n2016-12-19 71.0\n2016-12-19 71.0\n2016-12-19 69.0\n2016-12-19 69.0\n2016-12-19 72.0\n2016-12-19 78.0\n2016-12-19 76.0\n2016-12-20 77.0\n2016-12-20 77.0\n2016-12-20 76.0\n2016-12-20 76.0\n2016-12-20 74.0\n2016-12-20 73.0\n2016-12-20 76.0\n2016-12-21 74.0\n2016-12-21 76.0\n2016-12-21 73.0\n2016-12-21 74.0\n2016-12-21 72.0\n2016-12-21 70.0\n2016-12-21 77.0\n2016-12-22 70.0\n2016-12-22 73.0\n2016-12-22 73.0\n2016-12-22 68.0\n2016-12-22 72.0\n2016-12-22 71.0\n2016-12-22 70.0\n2016-12-23 69.0\n2016-12-23 71.0\n2016-12-23 73.0\n2016-12-23 72.0\n2016-12-23 68.0\n2016-12-23 69.0\n2016-12-23 71.0\n2016-12-24 78.0\n2016-12-24 74.0\n2016-12-24 69.0\n2016-12-24 73.0\n2016-12-24 74.0\n2016-12-24 74.0\n2016-12-25 74.0\n2016-12-25 78.0\n2016-12-25 74.0\n2016-12-25 74.0\n2016-12-25 69.0\n2016-12-26 72.0\n2016-12-26 75.0\n2016-12-26 71.0\n2016-12-26 74.0\n2016-12-26 74.0\n2016-12-26 74.0\n2016-12-27 74.0\n2016-12-27 74.0\n2016-12-27 71.0\n2016-12-27 75.0\n2016-12-27 71.0\n2016-12-27 73.0\n2016-12-28 71.0\n2016-12-28 71.0\n2016-12-28 72.0\n2016-12-28 72.0\n2016-12-28 71.0\n2016-12-28 71.0\n2016-12-28 73.0\n2016-12-29 74.0\n2016-12-29 69.0\n2016-12-29 77.0\n2016-12-29 73.0\n2016-12-29 71.0\n2016-12-29 72.0\n2016-12-29 73.0\n2016-12-30 69.0\n2016-12-30 69.0\n2016-12-30 69.0\n2016-12-30 71.0\n2016-12-30 68.0\n2016-12-30 65.0\n2016-12-30 72.0\n2016-12-31 67.0\n2016-12-31 72.0\n2016-12-31 66.0\n2016-12-31 71.0\n2016-12-31 65.0\n"
],
[
"# 9. Calculate and print out the summary statistics for the Decemeber temperature DataFrame.\ndec_df.describe()",
"_____no_output_____"
],
[
"# D3:Statistical analysis ",
"_____no_output_____"
],
[
"# Plot the Data\nplt.boxplot([june_df['June Temperature'], dec_df['December Temperature']])\nplt.xticks([1, 2], ['June', 'December'])",
"_____no_output_____"
],
[
"# Additional queries \n# Precipitation\nprcp_jun = session.query(Measurement.date, Measurement.prcp).filter(extract('month', Measurement.date) == 6).all()\nprcp_dec = session.query(Measurement.date, Measurement.prcp).filter(extract('month', Measurement.date) == 12).all()",
"_____no_output_____"
],
[
"prcp_jun_df = pd.DataFrame(prcp_jun, columns=['date', 'June Precipitation'])\nprcp_jun_df.set_index(prcp_jun_df['date'], inplace=True)\nprcp_jun_df = prcp_jun_df.sort_index()\n\nprcp_dec_df = pd.DataFrame(prcp_dec, columns=['date', 'December Precipitation'])\nprcp_dec_df.set_index(prcp_dec_df['date'], inplace=True)\nprcp_dec_df = prcp_dec_df.sort_index()",
"_____no_output_____"
],
[
"prcp_jun_df.describe()",
"_____no_output_____"
],
[
"prcp_dec_df.describe()",
"_____no_output_____"
],
[
"prcp_jun_df.boxplot()",
"_____no_output_____"
],
[
"prcp_dec_df.boxplot()",
"_____no_output_____"
],
[
"ax1 = prcp_jun_df.plot()",
"_____no_output_____"
],
[
"ax2 = prcp_dec_df.plot()",
"_____no_output_____"
],
[
"git",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9a73c4f61088cdf4fb92d573b70526d70d5064
| 9,116 |
ipynb
|
Jupyter Notebook
|
MANUPULANDO, EDITANDO E MODIFICANDO ARQUIVOS NO COMPUTADOR/Aula - RPA Python.ipynb
|
DouglasCarvalhoPereira/Especializaco-Python
|
7dface66ef0c86e183e5a5f1a2c9e6f753647d69
|
[
"MIT"
] | null | null | null |
MANUPULANDO, EDITANDO E MODIFICANDO ARQUIVOS NO COMPUTADOR/Aula - RPA Python.ipynb
|
DouglasCarvalhoPereira/Especializaco-Python
|
7dface66ef0c86e183e5a5f1a2c9e6f753647d69
|
[
"MIT"
] | null | null | null |
MANUPULANDO, EDITANDO E MODIFICANDO ARQUIVOS NO COMPUTADOR/Aula - RPA Python.ipynb
|
DouglasCarvalhoPereira/Especializaco-Python
|
7dface66ef0c86e183e5a5f1a2c9e6f753647d69
|
[
"MIT"
] | null | null | null | 30.386667 | 207 | 0.518429 |
[
[
[
"# RPA com Python\n\n- O que é RPA?\n- Por que isso é diferente de Selenium/Web-Scraping e do que vimos até agora?\n - Pontos Positivos\n - Pontos Negativos\n- Biblioteca usada:\n - pip install pyautogui\n - https://pyautogui.readthedocs.io/en/latest/\n - Para os comandos de imagem pode ser que seja necessário instalar pip install pillow\n - Para resolver problemas de caracteres especiais vamos usar um macete com a pyperclip\n \n- Link com um resumo dos principais comandos: https://pyautogui.readthedocs.io/en/latest/quickstart.html",
"_____no_output_____"
],
[
"## Desafio\n\n- Vamos automatizar a extração de informações de um sistema e envio de um relatório por e-mail\n- No nosso caso, para todo mundo conseguir fazer o mesmo programa, o nosso \"sistema\" vai ser o Gmail, mas o mesmo processo pode ser feito com qualquer programa do seu computador e qualquer sistema\n - Passo 1: Entrar no sistema (entrar no Gmail)\n - Passo 2: Entrar em uma aba específica do sistema onde tem o nosso relatório (Aba Contatos)\n - Passo 3: Exportar o Relatório (Exportar Contatos)\n - Passo 4: Pegar o relatório exportado, tratar e pegar as informações que queremos\n - Passo 5: Preencher/Atualizar informações do sistema (No nosso caso, criar um e-mail e enviar)",
"_____no_output_____"
]
],
[
[
"import pyautogui\nimport time\n\n# pyautogui.write() -> escreve\n# pyautogui.click -> clica\n# pyautogui.locateOnScreen -> identifica uma imagem na sua tela\n# pyautogui.hotkey -> usa atalhos do teclado (combinação de teclas)\n# pyautogui.press -> aperta um botão do teclado\n# print(pyautogui.KEYBOARD_KEYS)\n\npyautogui.alert('O código vai começar. Não mexa em NADA enquanto o código tiver rodando. Quando finalizar, eu te aviso')\n\npyautogui.PAUSE = 1\n# apertar o windows do teclado\npyautogui.press('win')\n# digitar chrome\npyautogui.write(\"chrome\")\n# apertar enter\npyautogui.press('enter')\n\n# entrar no Gmail\npyautogui.write('gmail')\npyautogui.press('enter')\n\n#esperar carregar o google\nwhile not pyautogui.locateOnScreen('busca_google.png'):\n time.sleep(1)\n\n# localizar a imagem -> vai te dar 4 informações: posicao x, posicao y, largura e altura\nx, y, largura, altura = pyautogui.locateOnScreen('busca_google.png')\n# clicar no meio da imagem\npyautogui.click(x + largura/2, y + altura/2)\n\n\n#esperar o gmail\nwhile not pyautogui.locateOnScreen('logo_gmail.png'):\n time.sleep(1)\n\n# entrar em contatos\nx, y, largura, altura = pyautogui.locateOnScreen('pontinhos_menu.png')\npyautogui.click(x + largura/2, y + altura/2)\n\ntime.sleep(1)\nx, y, largura, altura = pyautogui.locateOnScreen('contatos.png')\npyautogui.click(x + largura/2, y + altura/2)\n\n#esperar o contatos\nwhile not pyautogui.locateOnScreen('tela_contatos.png'):\n time.sleep(1)\n\n# exportar os contatos\nx, y, largura, altura = pyautogui.locateOnScreen('exportar.png')\npyautogui.click(x + largura/2, y + altura/2)\nx, y, largura, altura = pyautogui.locateOnScreen('confirmar_exportar.png')\npyautogui.click(x + largura/2, y + altura/2)",
"_____no_output_____"
]
],
[
[
"### Agora vamos escrever o e-mail",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport pyperclip\n\ntime.sleep(2)\ndf = pd.read_csv(r'C://Users/joaop/Downloads/contacts.csv')\ndf = df.dropna(axis=1)\ndisplay(df)\n\npyautogui.hotkey('ctrl', 'pgup')\n\nfor email in df['E-mail 1 - Value']:\n #clicar no botão escrever\n time.sleep(1)\n x, y, largura, altura = pyautogui.locateOnScreen('escrever.png')\n pyautogui.click(x + largura/2, y + altura/2)\n time.sleep(1)\n # escrever o email\n pyautogui.write(email)\n # enter\n pyautogui.press('enter')\n #tab para o assunto do email\n pyautogui.press('tab')\n pyautogui.write('Lira Caloteiro')\n #tab para o corpo do email\n pyautogui.press('tab')\n texto = \"\"\"\n Coe João Lira,\n \n Para de dar calote na Hashtag e paga as parcelas aí, namoral.\n \n Abs e tmj\"\"\"\n pyperclip.copy(texto)\n pyautogui.hotkey('ctrl', 'v')\n pyautogui.hotkey('ctrl', 'enter')\n \n \npyautogui.alert('O código terminou, pode pegar o seu computador de volta')",
"_____no_output_____"
]
],
[
[
"### E se eu já tiver com a aba aberta, como que eu coloco ela na frente?",
"_____no_output_____"
]
],
[
[
"while not pyautogui.locateOnScreen('paint.png'):\n pyautogui.hotkey('alt', 'shift', 'tab') \nprint(\"Achei o paint\")",
"Achei o paint\n"
]
],
[
[
"### Como descobrir a posição do mouse do local que eu quero",
"_____no_output_____"
]
],
[
[
"#pyautogui.click(2470, 38)\nprint(pyautogui.position())",
"Point(x=3418, y=259)\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9a743577c9c0d141a32e98adafe5d09551e665
| 14,316 |
ipynb
|
Jupyter Notebook
|
08_errors/08-errors.ipynb
|
Py4Phy/PHY432-resources
|
c26d95eaf5c28e25da682a61190e12ad6758a938
|
[
"CC-BY-4.0"
] | null | null | null |
08_errors/08-errors.ipynb
|
Py4Phy/PHY432-resources
|
c26d95eaf5c28e25da682a61190e12ad6758a938
|
[
"CC-BY-4.0"
] | 1 |
2022-03-03T21:47:56.000Z
|
2022-03-03T21:47:56.000Z
|
08_errors/08-errors.ipynb
|
Py4Phy/PHY432-resources
|
c26d95eaf5c28e25da682a61190e12ad6758a938
|
[
"CC-BY-4.0"
] | null | null | null | 24.725389 | 257 | 0.511037 |
[
[
[
"# 08 Errors\n\n(See also *Computational Physics* (Landau, Páez, Bordeianu), Chapter 3)\n\n\nThese slides include material from *Computational Physics. eTextBook Python 3rd Edition.* Copyright © 2012 Landau, Rubin, Páez. Used under the Creative-Commons Attribution-NonCommerical-ShareAlike 3.0 Unported License. ",
"_____no_output_____"
],
[
"## Stupidity or Incompetence\n(e.g., [PEBCAK](https://en.wiktionary.org/wiki/PEBCAK))",
"_____no_output_____"
],
[
"## Random errors \n- cosmic rays\n- random bit flips",
"_____no_output_____"
],
[
"## Approximation errors\n\n\"**algorithmic errors**\"\n - simplifying and adapting mathematics to the computer\n - should decrease as $N$ increases\n\n#### Example:\nApproximate $\\sin(x)$ with its truncated series expansion:\n\\begin{align}\n\\sin x &= \\sum_{n=1}^{+\\infty} \\frac{(-1)^{n-1} x^{2n-1}}{(2n - 1)!}\\\\\n &\\approx \\sum_{n=1}^{N} \\frac{(-1)^{n-1} x^{2n-1}}{(2n - 1)!} + \\mathcal{E}(x, N)\n\\end{align}",
"_____no_output_____"
],
[
"## Round-off errors\n- finite precision for storing floating-point numbers (32 bit, 64 bit)\n- not known exactly (treat as uncertainty)\n- can *accumulate* and lead to *garbage*\n\n#### Example: \nAssume you can only store four decimals:\n\n\\begin{align}\n \\text{storage}:&\\quad \\frac{1}{3} = 0.3333_c \\quad\\text{and}\\quad \\frac{2}{3} = 0.6667_c\\\\\n \\text{exact}:&\\quad 2\\times\\frac{1}{3} - \\frac{2}{3} = 0\\\\\n \\text{computer}:&\\quad 2 \\times 0.3333 - 0.6667 = -0.0001 \\neq 0\n\\end{align}",
"_____no_output_____"
],
[
"... now imagine adding \"$2\\times\\frac{1}{3} - \\frac{2}{3}$\" in a loop 100,000 times.",
"_____no_output_____"
],
[
"## The problems with *subtractive cancelation* \nModel the computer representation $x_c$ of a number $x$ as\n\n$$\nx_c \\simeq x(1+\\epsilon_x)\n$$\n\nwith the *relative* error $|\\epsilon_x| \\approx \\epsilon_m$ (similar to machine precision).\n\nNote: The *absolute* error is $\\Delta x = x_c - x$ and is related to the relative error by $\\epsilon_x = \\Delta x/x$.",
"_____no_output_____"
],
[
"What happens when we subtract two numbers $b$ and $c$: \n\n$$a = b - c$$",
"_____no_output_____"
],
[
"\\begin{gather}\na_c = b_c - c_c = b(1+\\epsilon_b) - c(1+\\epsilon_c)\\\\\n\\frac{a_c}{a} = 1 + \\frac{b}{a}\\epsilon_b - \\frac{c}{a} \\epsilon_c\n\\end{gather}\n",
"_____no_output_____"
],
[
"No guarantee that the errors cancel, and the relative error on $a$\n\n$$\n\\epsilon_a = \\frac{a_c}{a} - 1 = \\frac{b}{a}\\epsilon_b - \\frac{c}{a} \\epsilon_c\n$$ \n\ncan be huge for small $a$!",
"_____no_output_____"
],
[
"### Subtracting two nearly equal numbers\n\n$$b \\approx c$$ is bad!",
"_____no_output_____"
],
[
"\\begin{align}\n\\frac{a_c}{a} &= 1 + \\frac{b}{a}(\\epsilon_b - \\epsilon_c) \\\\\n\\left| \\frac{a_c}{a} \\right| &\\leq 1 + \\left| \\frac{b}{a} \\right| (|\\epsilon_b| + |\\epsilon_a|)\n\\end{align}\n\ni.e. the large number $b/a$ magnifies the error. ",
"_____no_output_____"
],
[
"# Beware of subtractions!\n\n**If you subtract two large numbers and end up with a small one, then the small one is less significant than any of the large ones.**",
"_____no_output_____"
],
[
"## Round-off errors\n\nRepeated calculations of quantities with errors beget new errors: In general, analyze with the rules of *error propagation*: function $f(x_1, x_2, \\dots, x_N)$ with absolute errors on the $x_i$ of $\\Delta x_i$ (i.e., $x_i \\pm \\Delta x_i$):\n$$\n\\Delta f(x_1, x_2, \\dots; \\Delta x_1, \\Delta x_2, \\dots) =\n \\sqrt{\\sum_{i=1}^N \\left(\\Delta x_i \\frac{\\partial f}{\\partial x_i}\\right)^2}\n$$\n\nNote: relative error $$\\epsilon_i = \\frac{\\Delta x_i}{x_i}$$",
"_____no_output_____"
],
[
"Example: division $a = b/c$ (... with short cut)\n\\begin{align}\na_c &= \\frac{b_c}{c_c} = \\frac{b(1+\\epsilon_b)}{c(1+\\epsilon_b)} \\\\\n\\frac{a_c}{a} &= \\frac{1+\\epsilon_b}{1+\\epsilon_c} \n = \\frac{(1+\\epsilon_b)(1-\\epsilon_c)}{1-\\epsilon_c^2} \\approx (1+\\epsilon_b)(1-\\epsilon_c)\\\\\n &\\approx 1 + |\\epsilon_b| + |\\epsilon_c|\\\\\n\\epsilon_a = \\frac{a_c}{a} - 1 &\\approx |\\epsilon_b| + |\\epsilon_c|\n\\end{align}\n\n(neglected terms of order $\\mathcal{O}(\\epsilon^2)$); and same for multiplication.\n\n",
"_____no_output_____"
],
[
"**Errors accumulate with every operation.**",
"_____no_output_____"
],
[
"### Model for round-off error accumulation\nView error in each calculation as a step in a *random walk*. The total \"distance\" (i.e. total error) $R$ over $N$ steps of length $r$ (the individual, \"random\" errors), is on average",
"_____no_output_____"
],
[
"$$ R \\approx \\sqrt{N} r $$",
"_____no_output_____"
],
[
"Total relative error $\\epsilon_{\\text{ro}}$ after $N$ calculations with error of the order of the machine precision $\\epsilon_m$ is\n\n$$ \\epsilon_{\\text{ro}} \\approx \\sqrt{N} \\epsilon_m $$\n\n",
"_____no_output_____"
],
[
"(Only a model, depending on algorithm may be less or even $N!$...)",
"_____no_output_____"
],
[
"## Total error of an algorithm\nWhat you need to know to evaluate an algorithm:\n1. Does it converge? (What $N$ do I need?)\n2. How precise are the converged results (What is the error $\\epsilon_\\text{tot}$?)\n3. What is its run time? (How fast is it for a given problem size?)",
"_____no_output_____"
],
[
"The total error contains *approximation* and *round off* errors:\n\n\\begin{gather}\n\\epsilon_\\text{tot} = \\epsilon_\\text{app} + \\epsilon_\\text{ro}\n\\end{gather}\n",
"_____no_output_____"
],
[
"Model for the approximation error for an algorithm that takes $N$ steps (operations) to find a \"good\" answer:\n\n$$\n\\epsilon_\\text{app} \\simeq \\frac{\\alpha}{N^\\beta}\n$$\n",
"_____no_output_____"
],
[
"and round off error as\n\n$$\n\\epsilon_{\\text{ro}} \\approx \\sqrt{N} \\epsilon_m\n$$",
"_____no_output_____"
],
[
"Model for total error:\n$$\n\\epsilon_\\text{tot} = \\frac{\\alpha}{N^\\beta} + \\sqrt{N} \\epsilon_m\n$$",
"_____no_output_____"
],
[
"Analyze $\\log_{10} $ of the relative error (direct readout of number of significant decimals).\n\n<img style=\"align: center\" width=\"80%\" src=\"./images/CompPhys_total_error.png\" />\n\n<span style=\"font-size: small; text-align: right\">Image from Computational Physics. eTextBook Python 3rd Edition. Copyright © 2012 Landau, Rubin, Páez. Used under the Creative-Commons Attribution-NonCommerical-ShareAlike 3.0 Unported License.</span>",
"_____no_output_____"
],
[
"### Example analysis\n\\begin{gather}\n\\epsilon_\\text{app} = \\frac{1}{N^2}, \\quad \\epsilon_\\text{ro} = \\sqrt{N}\\epsilon_m\\\\\n\\epsilon_\\text{tot} = \\frac{1}{N^2} + \\sqrt{N}\\epsilon_m\n\\end{gather}",
"_____no_output_____"
],
[
"Total error is a *minimum* for\n\n\\begin{gather}\n\\frac{d\\epsilon_\\text{tot}}{dN} = -\\frac{2}{N^{3}} + \\frac{1}{2}\\frac{\\epsilon_m}{\\sqrt{N}} = 0, \\quad\\text{thus} \\quad\nN^{5/2} = 4 \\epsilon_m^{-1}\\\\\nN = \\left(\\frac{4}{\\epsilon_m}\\right)^{2/5}\n\\end{gather}\n\n",
"_____no_output_____"
],
[
"What is the best $N$ for single precision $\\epsilon_m \\approx 10^{-7}$?",
"_____no_output_____"
]
],
[
[
"import math\ndef N_opt(eps_m):\n return round(math.pow(4./eps_m, 2./5.))\ndef eps_app(N):\n return 1./(N*N)\ndef eps_ro(N, eps_m):\n return math.sqrt(N)*eps_m",
"_____no_output_____"
],
[
"epsilon_m = 1e-7 # single precision\n\nN = N_opt(epsilon_m)\nerr_app = eps_app(N)\nerr_ro = eps_ro(N, epsilon_m)\nprint(\"best N = {0} (for eps_m={1})\".format(N, epsilon_m))\nprint(\"eps_tot = {0:.3g}\".format(err_app + err_ro))\nprint(\"eps_app = {0:.3g}, eps_ro = {1:.3g}\".format(err_app, err_ro))",
"best N = 1099 (for eps_m=1e-07)\neps_tot = 4.14e-06\neps_app = 8.28e-07, eps_ro = 3.32e-06\n"
]
],
[
[
"Single precision $\\epsilon_m \\approx 10^{-7}$:\n\n$$\nN \\approx 1099\\\\\n\\epsilon_\\text{tot} \\approx 4 \\times 10^{-6} \\\\\n\\epsilon_\\text{app} = 8.28 \\times 10^{-7} \\\\\n\\epsilon_\\text{ro} = 3.32 \\times 10^{-6}\n$$\n",
"_____no_output_____"
],
[
"Here, most of the error is round-off error! What can you do?",
"_____no_output_____"
],
[
"* use double precision (delay round-off error)\n* use a better algorithm, e.g. $\\epsilon_\\text{app}\\simeq \\frac{2}{N^4}$ (uses fewer steps)",
"_____no_output_____"
],
[
"**Better algorithms are always a good idea :-)**\n\nRemember: trade-off between **approximation error** and **rounding error**.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a9a7c33083a378da497a94c4658758291129fc0
| 21,319 |
ipynb
|
Jupyter Notebook
|
homework3/Homework_3.ipynb
|
M0nica/datadatabases
|
c94c3b8894e5357fdd9d664e6e2240c46bc47049
|
[
"MIT"
] | null | null | null |
homework3/Homework_3.ipynb
|
M0nica/datadatabases
|
c94c3b8894e5357fdd9d664e6e2240c46bc47049
|
[
"MIT"
] | null | null | null |
homework3/Homework_3.ipynb
|
M0nica/datadatabases
|
c94c3b8894e5357fdd9d664e6e2240c46bc47049
|
[
"MIT"
] | null | null | null | 29.77514 | 538 | 0.523617 |
[
[
[
"# Homework assignment #3\n\nThese problem sets focus on using the Beautiful Soup library to scrape web pages.\n\n## Problem Set #1: Basic scraping\n\nI've made a web page for you to scrape. It's available [here](http://static.decontextualize.com/widgets2016.html). The page concerns the catalog of a famous [widget](http://en.wikipedia.org/wiki/Widget) company. You'll be answering several questions about this web page. In the cell below, I've written some code so that you end up with a variable called `html_str` that contains the HTML source code of the page, and a variable `document` that stores a Beautiful Soup object.",
"_____no_output_____"
]
],
[
[
"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nhtml_str = urlopen(\"http://static.decontextualize.com/widgets2016.html\").read()\ndocument = BeautifulSoup(html_str, \"html.parser\")",
"_____no_output_____"
]
],
[
[
"Now, in the cell below, use Beautiful Soup to write an expression that evaluates to the number of `<h3>` tags contained in `widgets2016.html`.",
"_____no_output_____"
]
],
[
[
"h3_tags = document.find_all('h3')\nprint(\"There are\", len(h3_tags), \"h3 tags in widgets2016.html\")",
"There are 4 h3 tags in widgets2016.html\n"
]
],
[
[
"Now, in the cell below, write an expression or series of statements that displays the telephone number beneath the \"Widget Catalog\" header.",
"_____no_output_____"
]
],
[
[
"#h2_tags = document.find_all('h2')\n\n#for h2_tag in h2_tags:\nphone_number = document.find_all('a')[1].string\n\n\nprint(phone_number)",
"212-555-9912\n"
]
],
[
[
"In the cell below, use Beautiful Soup to write some code that prints the names of all the widgets on the page. After your code has executed, `widget_names` should evaluate to a list that looks like this (though not necessarily in this order):\n\n```\nSkinner Widget\nWidget For Furtiveness\nWidget For Strawman\nJittery Widget\nSilver Widget\nDivided Widget\nManicurist Widget\nInfinite Widget\nYellow-Tipped Widget\nUnshakable Widget\nSelf-Knowledge Widget\nWidget For Cinema\n```",
"_____no_output_____"
]
],
[
[
"table_tags = document.find_all('table', {'class':'widgetlist'})\n#print(table_tag)\n\nfor table_tag in table_tags:\n tr_tag = table_tag.find_all('tr')\n# tds = table_tag.find_all('td')\n# print(tds)\n\n for tr in tr_tag:\n print(tr.find('td', {'class':'wname'}).string)",
"Skinner Widget\nWidget For Furtiveness\nWidget For Strawman\nJittery Widget\nSilver Widget\nDivided Widget\nManicurist Widget\nInfinite Widget\nYellow-Tipped Widget\nUnshakable Widget\nSelf-Knowledge Widget\nWidget For Cinema\n"
]
],
[
[
"## Problem set #2: Widget dictionaries\n\nFor this problem set, we'll continue to use the HTML page from the previous problem set. In the cell below, I've made an empty list and assigned it to a variable called `widgets`. Write code that populates this list with dictionaries, one dictionary per widget in the source file. The keys of each dictionary should be `partno`, `wname`, `price`, and `quantity`, and the value for each of the keys should be the value for the corresponding column for each row. After executing the cell, your list should look something like this:\n\n```\n[{'partno': 'C1-9476',\n 'price': '$2.70',\n 'quantity': u'512',\n 'wname': 'Skinner Widget'},\n {'partno': 'JDJ-32/V',\n 'price': '$9.36',\n 'quantity': '967',\n 'wname': u'Widget For Furtiveness'},\n ...several items omitted...\n {'partno': '5B-941/F',\n 'price': '$13.26',\n 'quantity': '919',\n 'wname': 'Widget For Cinema'}]\n```\n\nAnd this expression:\n\n widgets[5]['partno']\n \n... should evaluate to:\n\n LH-74/O\n ",
"_____no_output_____"
]
],
[
[
"widgets = []\n\n# your code here\n\ntable_tags = document.find_all('table', {'class':'widgetlist'})\n#print(table_tag)\n\nfor table_tag in table_tags:\n tr_tag = table_tag.find_all('tr')\n# tds = table_tag.find_all('td')\n# print(tds)\n\n for tr in tr_tag:\n wname = tr.find('td', {'class':'wname'}).string\n partno = tr.find('td', {'class':'partno'}).string\n price = tr.find('td', {'class':'price'}).string\n quantity = tr.find('td', {'class':'quantity'}).string\n widgets.append({'partno': partno, 'price':price , 'quantity':quantity, 'wname' : wname})\n# end your code\n\nwidgets \n\n\n#evaluates to LH-74/O\n# widgets[5]['partno']",
"_____no_output_____"
]
],
[
[
"In the cell below, duplicate your code from the previous question. Modify the code to ensure that the values for `price` and `quantity` in each dictionary are floating-point numbers and integers, respectively. I.e., after executing the cell, your code should display something like this:\n\n [{'partno': 'C1-9476',\n 'price': 2.7,\n 'quantity': 512,\n 'widgetname': 'Skinner Widget'},\n {'partno': 'JDJ-32/V',\n 'price': 9.36,\n 'quantity': 967,\n 'widgetname': 'Widget For Furtiveness'},\n ... some items omitted ...\n {'partno': '5B-941/F',\n 'price': 13.26,\n 'quantity': 919,\n 'widgetname': 'Widget For Cinema'}]\n\n(Hint: Use the `float()` and `int()` functions. You may need to use string slices to convert the `price` field to a floating-point number.)",
"_____no_output_____"
]
],
[
[
"widgets = []\n\n# your code here\ntable_tags = document.find_all('table', {'class':'widgetlist'})\n#print(table_tag)\n\nfor table_tag in table_tags:\n tr_tag = table_tag.find_all('tr')\n# tds = table_tag.find_all('td')\n# print(tds)\n\n for tr in tr_tag:\n wname = tr.find('td', {'class':'wname'}).string\n partno = tr.find('td', {'class':'partno'}).string\n price = tr.find('td', {'class':'price'}).string\n quantity = tr.find('td', {'class':'quantity'}).string\n \n \n # print(pricef)\n pricef = str(price[1:len(price)])\n pricef = float(pricef)\n \n #if you want to ad a '$' sign use the following code\n # priceToAppend = '$' + str(float(pricef))\n # else:\n priceToAppend = pricef\n widgets.append({'partno': partno, 'price': priceToAppend , 'quantity': int(quantity), 'wname' : wname})\n# end your code\n\nwidgets",
"_____no_output_____"
]
],
[
[
"Great! I hope you're having fun. In the cell below, write an expression or series of statements that uses the `widgets` list created in the cell above to calculate the total number of widgets that the factory has in its warehouse.\n\nExpected output: `7928`",
"_____no_output_____"
]
],
[
[
"total_widgets = 0\nfor widget in widgets:\n #print(widget['quantity'])\n total_widgets = total_widgets + widget['quantity']\ntotal_widgets",
"_____no_output_____"
]
],
[
[
"In the cell below, write some Python code that prints the names of widgets whose price is above $9.30.\n\nExpected output:\n\n```\nWidget For Furtiveness\nJittery Widget\nSilver Widget\nInfinite Widget\nWidget For Cinema\n```",
"_____no_output_____"
]
],
[
[
"for widget in widgets:\n price = widget['price']\n if price > 9.30:\n print(widget['wname'])",
"Widget For Furtiveness\nJittery Widget\nSilver Widget\nInfinite Widget\nWidget For Cinema\n"
]
],
[
[
"## Problem set #3: Sibling rivalries\n\nIn the following problem set, you will yet again be working with the data in `widgets2016.html`. In order to accomplish the tasks in this problem set, you'll need to learn about Beautiful Soup's `.find_next_sibling()` method. Here's some information about that method, cribbed from the notes:\n\nOften, the tags we're looking for don't have a distinguishing characteristic, like a class attribute, that allows us to find them using `.find()` and `.find_all()`, and the tags also aren't in a parent-child relationship. This can be tricky! For example, take the following HTML snippet, (which I've assigned to a string called `example_html`):",
"_____no_output_____"
]
],
[
[
"example_html = \"\"\"\n<h2>Camembert</h2>\n<p>A soft cheese made in the Camembert region of France.</p>\n\n<h2>Cheddar</h2>\n<p>A yellow cheese made in the Cheddar region of... France, probably, idk whatevs.</p>\n\"\"\"",
"_____no_output_____"
]
],
[
[
"If our task was to create a dictionary that maps the name of the cheese to the description that follows in the `<p>` tag directly afterward, we'd be out of luck. Fortunately, Beautiful Soup has a `.find_next_sibling()` method, which allows us to search for the next tag that is a sibling of the tag you're calling it on (i.e., the two tags share a parent), that also matches particular criteria. So, for example, to accomplish the task outlined above:",
"_____no_output_____"
]
],
[
[
"example_doc = BeautifulSoup(example_html, \"html.parser\")\ncheese_dict = {}\nfor h2_tag in example_doc.find_all('h2'):\n cheese_name = h2_tag.string\n cheese_desc_tag = h2_tag.find_next_sibling('p')\n cheese_dict[cheese_name] = cheese_desc_tag.string\n\ncheese_dict",
"_____no_output_____"
],
[
"widget_dict = {}\ndocument.find_all('h3')\n\nfor h3_tag in document.find_all('h3'):\n widget_name = h3_tag.string\n widget_desc_tag = h3_tag.find_next_sibling('table')\n\nwidgets = widget_desc_tag.find_all('td', {'class':'partno'})\n\nfor widget in widgets:\n print(widget.string)\n",
"MZ-556/B\nQV-730\nT1-9731\n5B-941/F\n"
],
[
"widget_dict = {}\ndocument.find_all('h3')\n\nfor h3_tag in document.find_all('h3'):\n widget_name = h3_tag.string\n widget_desc_tag = h3_tag.find_next_sibling('table')\n\nwidgets = widget_desc_tag.find_all('td', {'class':'partno'})\n\nfor widget in widgets:\n print(widget.string)\n",
"MZ-556/B\nQV-730\nT1-9731\n5B-941/F\n"
]
],
[
[
"With that knowledge in mind, let's go back to our widgets. In the cell below, write code that uses Beautiful Soup, and in particular the `.find_next_sibling()` method, to print the part numbers of the widgets that are in the table *just beneath* the header \"Hallowed Widgets.\"\n\nExpected output:\n\n```\nMZ-556/B\nQV-730\nT1-9731\n5B-941/F\n```",
"_____no_output_____"
],
[
"Okay, now, the final task. If you can accomplish this, you are truly an expert web scraper. I'll have little web scraper certificates made up and I'll give you one, if you manage to do this thing. And I know you can do it!\n\nIn the cell below, I've created a variable `category_counts` and assigned to it an empty dictionary. Write code to populate this dictionary so that its keys are \"categories\" of widgets (e.g., the contents of the `<h3>` tags on the page: \"Forensic Widgets\", \"Mood widgets\", \"Hallowed Widgets\") and the value for each key is the number of widgets that occur in that category. I.e., after your code has been executed, the dictionary `category_counts` should look like this:\n\n```\n{'Forensic Widgets': 3,\n 'Hallowed widgets': 4,\n 'Mood widgets': 2,\n 'Wondrous widgets': 3}\n```",
"_____no_output_____"
]
],
[
[
"category_counts = {}\n# your code here\n\nfor h3_tag in document.find_all('h3'):\n h3_name = h3_tag.string\n # print(h3_name)\n count = 0 \n h3_desc_tag = h3_tag.find_next_sibling('table')\n widgets = h3_desc_tag.find_all('td', {'class':'partno'})\n for widget in widgets:\n count = count + 1\n # print(count)\n category_counts[h3_name] = count\n# end your code\ncategory_counts",
"_____no_output_____"
]
],
[
[
"Congratulations! You're done.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a9a852017d4eb80eb37a2c03ee0fb4077ac3d15
| 13,690 |
ipynb
|
Jupyter Notebook
|
petalinux/xilinx-vmk180-trd/project-spec/meta-vmk180-multimedia-trd/recipes-apps/vmk180-trd/vmk180-trd/notebooks/vmk180-trd-nb4.ipynb
|
Xilinx/vmk180-trd
|
d58c63ce14fec9631a7cab3d57af88f4dfd5cae1
|
[
"Apache-2.0"
] | 2 |
2021-07-12T14:44:40.000Z
|
2021-09-18T00:43:58.000Z
|
petalinux/xilinx-vmk180-trd/project-spec/meta-vmk180-multimedia-trd/recipes-apps/vmk180-trd/vmk180-trd/notebooks/vmk180-trd-nb4.ipynb
|
Xilinx/vmk180-trd
|
d58c63ce14fec9631a7cab3d57af88f4dfd5cae1
|
[
"Apache-2.0"
] | null | null | null |
petalinux/xilinx-vmk180-trd/project-spec/meta-vmk180-multimedia-trd/recipes-apps/vmk180-trd/vmk180-trd/notebooks/vmk180-trd-nb4.ipynb
|
Xilinx/vmk180-trd
|
d58c63ce14fec9631a7cab3d57af88f4dfd5cae1
|
[
"Apache-2.0"
] | 2 |
2021-03-05T07:26:32.000Z
|
2021-08-16T18:51:26.000Z
| 26.634241 | 325 | 0.562308 |
[
[
[
"",
"_____no_output_____"
],
[
"# 1. Introduction",
"_____no_output_____"
],
[
"This notebook demonstrates how to create two parallel video pipelines using the GStreamer multimedia framework:\n* The first pipeline captures video from a V4L2 device and displays the output on a monitor using a DRM/KMS display device.\n* The second pipeline decodes a VP9 encoded video file and displays the output on the same monitor using the same DRM/KMS display device.\n\nThe display device contains a video mixer which allows targeting different video planes for the individual pipelines with programmable x/y-offsets as well as width and height.\n\nRefer to:\n* nb1 for more details on the video file decode pipeline\n* nb2 for more details on the V4L2 capture pipeline\n* nb3 for more details on the video mixer configuration and display pipeline\n\nIn this notebook, you will:\n1. Create two parallel GStreamer video pipelines using the ``parse_launch()`` API\n2. Create a GStreamer pipeline graph and view it inside this notebook.",
"_____no_output_____"
],
[
"# 2. Imports and Initialization",
"_____no_output_____"
],
[
"Import all python modules required for this notebook. ",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image, display, clear_output\nimport pydot\nimport sys\nimport time\nimport gi\ngi.require_version('Gst', '1.0')\ngi.require_version(\"GstApp\", \"1.0\")\nfrom gi.repository import GObject, GLib, Gst, GstApp",
"_____no_output_____"
]
],
[
[
"This is the VMK180 TRD notebook 4 (nb4).",
"_____no_output_____"
]
],
[
[
"nb = \"nb4\"",
"_____no_output_____"
]
],
[
[
"Create a directory for saving the pipeline graph as dot file. Set the GStreamer debug dot directory environement variable to point to that directory.",
"_____no_output_____"
]
],
[
[
"dotdir = \"/home/root/gst-dot/\" + nb\n!mkdir -p $dotdir\n%env GST_DEBUG_DUMP_DOT_DIR = $dotdir",
"env: GST_DEBUG_DUMP_DOT_DIR=/home/root/gst-dot/nb4\n"
]
],
[
[
"Initialize the GStreamer library. Optionally enable debug (default off) and set the debug level.",
"_____no_output_____"
]
],
[
[
"Gst.init(None)\nGst.debug_set_active(False)\nGst.debug_set_default_threshold(1)",
"_____no_output_____"
]
],
[
[
"# 3. Create String Representation of the First GStreamer Pipeline",
"_____no_output_____"
],
[
"The first pipeline consist of the following elements:\n* ``xlnxvideosrc``\n* ``caps``\n* ``kmssink``",
"_____no_output_____"
],
[
"Describe the ``xlnxvideosrc`` element and its properties as string representation.",
"_____no_output_____"
]
],
[
[
"src_types = [\"vivid\", \"usbcam\", \"mipi\"]\nsrc_type = src_types[1] # Change the source type to vivid, usbcam, or mipi via list index\n\nio_mode = \"mmap\"\nif src_type == \"mipi\":\n io_mode = \"dmabuf\"\n\nsrc_1 = \"xlnxvideosrc src-type=\" + src_type + \" io-mode=\" + io_mode",
"_____no_output_____"
]
],
[
[
"Describe the ``caps`` filter element as string representation.",
"_____no_output_____"
]
],
[
[
"width = 1280\nheight = 720\nfmt = \"YUY2\"\n\ncaps = \"video/x-raw, width=\" + str(width) + \", height=\" + str(height) + \", format=\" + fmt ",
"_____no_output_____"
]
],
[
[
"Describe the ``kmssink`` element and its properties as string representation.",
"_____no_output_____"
]
],
[
[
"driver_name = \"xlnx\"\nplane_id_1 = 39\nxoff_1 = 0\nyoff_1 = 0\nrender_rectangle_1 = \"<\" + str(xoff_1) + \",\" + str(yoff_1) + \",\" + str(width) + \",\" + str(height) + \">\"\n\nsink_1 = \"kmssink\" + \" driver-name=\" + driver_name + \" plane-id=\" + str(plane_id_1) + \" render-rectangle=\" + render_rectangle_1",
"_____no_output_____"
]
],
[
[
"Create a string representation of the first pipeline by concatenating the individual element strings.",
"_____no_output_____"
]
],
[
[
"pipe_1 = src_1 + \" ! \" + caps + \" ! \" + sink_1\nprint(pipe_1)",
"xlnxvideosrc src-type=usbcam io-mode=mmap ! video/x-raw, width=1280, height=720, format=YUY2 ! kmssink driver-name=xlnx plane-id=40 render-rectangle=<0,0,1280,720>\n"
]
],
[
[
"# 4. Create String Representation of the Second GStreamer Pipeline",
"_____no_output_____"
],
[
"The second pipeline consist of the following elements:\n* ``multifilesrc``\n* ``decodebin``\n* ``videoconvert``\n* ``kmssink``",
"_____no_output_____"
],
[
"Describe the ``multifilesrc`` element and its properties as string representation.",
"_____no_output_____"
]
],
[
[
"file_name = \"/usr/share/movies/Big_Buck_Bunny_4K.webm.360p.vp9.webm\"\nloop = True\n\nsrc_2 = \"multifilesrc location=\" + file_name + \" loop=\" + str(loop)",
"_____no_output_____"
]
],
[
[
"Describe the ``decodebin`` and ``videoconvert`` elements as string representations.",
"_____no_output_____"
]
],
[
[
"dec = \"decodebin\"\ncvt = \"videoconvert\"",
"_____no_output_____"
]
],
[
[
"Describe the ``kmssink`` element and its properties as string representation.\n\n**Note:** The same ``kmssink`` element and ``driver-name`` property are used as in pipeline 1, only the ``plane-id`` and the ``render-rectangle`` properties are set differently. The output of this pipeline is shown on a different plane and the x/y-offsets are set such that the planes of pipeline 1 and 2 don't overlap.",
"_____no_output_____"
]
],
[
[
"driver_name = \"xlnx\"\nplane_id_2 = 38\nxoff_2 = 0\nyoff_2 = 720\nwidth_2 = 640\nheight_2 = 360\nrender_rectangle_2 = \"<\" + str(xoff_2) + \",\" + str(yoff_2) + \",\" + str(width_2) + \",\" + str(height_2) + \">\"\n\nsink_2 = \"kmssink\" + \" driver-name=\" + driver_name + \" plane-id=\" + str(plane_id_2) + \" render-rectangle=\" + render_rectangle_2",
"_____no_output_____"
]
],
[
[
"Create a string representation of the second pipeline by concatenating the individual element strings.",
"_____no_output_____"
]
],
[
[
"pipe_2 = src_2 + \" ! \" + dec + \" ! \" + cvt + \" ! \"+ sink_2\nprint(pipe_2)",
"multifilesrc location=/usr/share/movies/Big_Buck_Bunny_4K.webm.360p.vp9.webm loop=True ! decodebin ! videoconvert ! kmssink driver-name=xlnx plane-id=35 render-rectangle=<0,720,640,360>\n"
]
],
[
[
"# 5. Create and Run the GStreamer Pipelines",
"_____no_output_____"
],
[
"Parse the string representations of the first and second pipeline as a single pipeline graph.",
"_____no_output_____"
]
],
[
[
"pipeline = Gst.parse_launch(pipe_1 + \" \" + pipe_2)",
"_____no_output_____"
]
],
[
[
"The ``bus_call`` function listens on the bus for ``EOS`` and ``ERROR`` events. If any of these events occur, stop the pipeline (set to ``NULL`` state) and quit the main loop.\n\nIn case of an ``ERROR`` event, parse and print the error message.",
"_____no_output_____"
]
],
[
[
"def bus_call(bus, message, loop):\n t = message.type\n if t == Gst.MessageType.EOS:\n sys.stdout.write(\"End-of-stream\\n\")\n pipeline.set_state(Gst.State.NULL)\n loop.quit()\n elif t == Gst.MessageType.ERROR:\n err, debug = message.parse_error()\n sys.stderr.write(\"Error: %s: %s\\n\" % (err, debug))\n pipeline.set_state(Gst.State.NULL)\n loop.quit()\n return True",
"_____no_output_____"
]
],
[
[
"Start the pipeline (set to ``PLAYING`` state), create the main loop and listen to messages on the bus. Register the ``bus_call`` callback function with the ``message`` signal of the bus. Start the main loop.\n\nThe video will be displayed on the monitor. \n\nTo stop the pipeline, click the square shaped icon labelled 'Interrupt the kernel' in the top menu bar. Create a dot graph of the pipeline topology before stopping the pipeline. Quit the main loop.",
"_____no_output_____"
]
],
[
[
"pipeline.set_state(Gst.State.PLAYING);\n\nloop = GLib.MainLoop()\nbus = pipeline.get_bus()\nbus.add_signal_watch()\nbus.connect(\"message\", bus_call, loop)\n\ntry:\n loop.run()\nexcept:\n sys.stdout.write(\"Interrupt caught\\n\")\n Gst.debug_bin_to_dot_file(pipeline, Gst.DebugGraphDetails.ALL, nb)\n pipeline.set_state(Gst.State.NULL)\n loop.quit()\n pass",
"_____no_output_____"
]
],
[
[
"# 6. View the Pipeline dot Graph",
"_____no_output_____"
],
[
"Register dot plugins for png export to work.",
"_____no_output_____"
]
],
[
[
"!dot -c",
"_____no_output_____"
]
],
[
[
"Convert the dot file to png and display the pipeline graph. The image will be displayed below the following code cell. Double click on the generate image file to zoom in.\n\n**Note:** This step may take a few seconds. Also, compared to previous notebooks, two disjoint graphs are displayed in the same image as we have created two parallel pipelines in this example.",
"_____no_output_____"
]
],
[
[
"dotfile = dotdir + \"/\" + nb + \".dot\"\ngraph = pydot.graph_from_dot_file(dotfile, 'utf-8')\ndisplay(Image(graph[0].create(None, 'png', 'utf-8')))",
"_____no_output_____"
]
],
[
[
"# 7. Summary",
"_____no_output_____"
],
[
"In this notebook you learned how to:\n1. Create two parallel GStreamer pipelines from a string representation using the ``parse_launch()`` API\n2. Export the pipeline topology as a dot file image and display it in the notebook",
"_____no_output_____"
],
[
"<center>Copyright© 2019 Xilinx</center>",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4a9a877d187a042012d45410ff177e96cda32891
| 64,045 |
ipynb
|
Jupyter Notebook
|
datasets/imagenette/multi_classification_image_imagenette_syncluster.ipynb
|
aimakerspace/synergos_simulator
|
a454d6aefbbb3d1ace12c5fe88d657498a879231
|
[
"Apache-2.0"
] | 5 |
2021-09-11T03:53:00.000Z
|
2022-01-07T03:39:09.000Z
|
datasets/imagenette/multi_classification_image_imagenette_syncluster.ipynb
|
aimakerspace/synergos_simulator
|
a454d6aefbbb3d1ace12c5fe88d657498a879231
|
[
"Apache-2.0"
] | null | null | null |
datasets/imagenette/multi_classification_image_imagenette_syncluster.ipynb
|
aimakerspace/synergos_simulator
|
a454d6aefbbb3d1ace12c5fe88d657498a879231
|
[
"Apache-2.0"
] | 3 |
2022-01-21T00:56:19.000Z
|
2022-01-25T03:28:55.000Z
| 36.142777 | 429 | 0.525224 |
[
[
[
"# Running a Federated Cycle with Synergos\n\nIn a federated learning system, there are many contributory participants, known as Worker nodes, which receive a global model to train on, with their own local dataset. The dataset does not leave the individual Worker nodes at any point, and remains private to the node.\n\nThe job to synchronize, orchestrate and initiate an federated learning cycle, falls on a Trusted Third Party (TTP). The TTP pushes out the global model architecture and parameters for the individual nodes to train on, calling upon the required data, based on tags, e.g \"training\", which points to relevant data on the individual nodes. At no point does the TTP receive, copy or access the Worker nodes' local datasets.\n\n\n\nThis tutorial aims to give you an understanding of how to use the synergos package to run a full federated learning cycle on a `Synergos Cluster` grid. \n\nIn a `Synergos Cluster` Grid, with the inclusion of a new director and queue component, you will be able to parallelize your jobs, where the number of concurrent jobs possible is equal to the number of sub-grids. This is done alongside all quality-of-life components supported in a `Synergos Plus` grid.\n\nIn this tutorial, you will go through the steps required by each participant (TTP and Worker), by simulating each of them locally with docker containers. Specifically, we will simulate a Director and 2 sub-grids, each of which has a TTP and 2 Workers, allowing us to perform 2 concurrent federated operations at any time.\n\nAt the end of this, we will have:\n- Connected the participants\n- Trained the model\n- Evaluate the model\n\n## About the Dataset and Task\n\nThe dataset used in this notebook is on a small subset of Imagenette images, comprising 3 classes, and all images are 28 x 28 pixels. The dataset is available in the same directory as this notebook. Within the dataset directory, `data1` is for Worker 1 and `data2` is for Worker 2. The task to be carried out will be a multi-classification.\n\nThe dataset we have provided is a processed subset of the [original Imagenette dataset](https://github.com/fastai/imagenette).",
"_____no_output_____"
],
[
"## Initiating the docker containers\n\nBefore we begin, we have to start the docker containers.",
"_____no_output_____"
],
[
"### A. Initialization via `Synergos Simulator`\n\nIn `Synergos Simulator`, a sandboxed environment has been created for you!\n\nBy running:\n\n `docker-compose -f docker-compose-syncluster.yml up --build`\n \nthe following components will be started:\n- Director\n- Sub-Grid 1\n - TTP_1 (Cluster)\n - Worker_1_n1\n - Worker_2_n1\n- Sub-Grid 2\n - TTP_2 (Cluster)\n - Worker_1_n2\n - Worker_2_n2\n- Synergos UI\n- Synergos Logger\n- Synergos MLOps\n- Synergos MQ\n\nRefer to [this](https://github.com/aimakerspace/synergos_simulator) for all the pre-allocated host & port mappings. ",
"_____no_output_____"
],
[
"### B. Manual Initialization\n\nFirstly, pull the required docker images with the following commands:\n\n1. Synergos Director:\n \n `docker pull gcr.io/synergos-aisg/synergos_director:v0.1.0`\n\n2. Synergos TTP (Cluster): \n\n `docker pull gcr.io/synergos-aisg/synergos_ttp_cluster:v0.1.0`\n\n3. Synergos Worker: \n\n `docker pull gcr.io/synergos-aisg/synergos_worker:v0.1.0`\n \n4. Synergos MLOps:\n\n `docker pull gcr.io/synergos-aisg/synergos_mlops:v0.1.0`\n \n5. Synergos MQ:\n\n `docker pull gcr.io/synergos-aisg/synergos_mq:v0.1.0`\n\nNext, in <u>separate</u> CLI terminals, run the following command(s):\n\n**Note: For Windows users, it is advisable to use powershell or command prompt based interfaces**\n\n#### Director\n\n```\ndocker run --rm\n -p 5000:5000 \n -v <directory imagenette/orchestrator_data>:/orchestrator/data\n -v <directory imagenette/orchestrator_outputs>:/orchestrator/outputs\n -v <directory imagenette/mlflow>:/mlflow\n --name director \n gcr.io/synergos-aisg/synergos_director:v0.1.0 \n --id ttp \n --logging_variant graylog <IP Synergos Logger> <TTP port>\n --queue rabbitmq <IP Synergos Logger> <AMQP port>\n```\n\n#### Sub-Grid 1\n\n- **TTP_1**\n```\ndocker run --rm\n -p 6000:5000 \n -p 9020:8020\n -v <directory imagenette/orchestrator_data>:/orchestrator/data\n -v <directory imagenette/orchestrator_outputs>:/orchestrator/outputs\n --name ttp_1 \n gcr.io/synergos-aisg/synergos_ttp_cluster:v0.1.0 \n --id ttp \n --logging_variant graylog <IP Synergos Logger> <TTP port>\n --queue rabbitmq <IP Synergos Logger> <AMQP port>\n```\n\n- **WORKER_1 Node 1**\n```\ndocker run --rm\n -p 5001:5000 \n -p 8021:8020\n -v <directory imagenette/data1>:/worker/data \n -v <directory imagenette/outputs_1>:/worker/outputs \n --name worker_1_n1 \n gcr.io/synergos-aisg/synergos_worker:v0.1.0 \n --id worker_1_n1 \n --logging_variant graylog <IP Synergos Logger> <Worker port>\n --queue rabbitmq <IP Synergos Logger> <AMQP port>\n```\n\n- **WORKER_2 Node 1**\n```\ndocker run --rm\n -p 5002:5000 \n -p 8022:8020\n -v <directory imagenette/data2>:/worker/data \n -v <directory imagenette/outputs_2>:/worker/outputs \n --name worker_2_n1\n gcr.io/synergos-aisg/synergos_worker:v0.1.0\n --id worker_2_n1 \n --logging_variant graylog <IP Synergos Logger> <Worker port>\n --queue rabbitmq <IP Synergos Logger> <AMQP port>\n```\n\n#### Sub-Grid 2\n\n- **TTP_2**\n```\ndocker run --rm\n -p 7000:5000 \n -p 10020:8020\n -v <directory imagenette/orchestrator_data>:/orchestrator/data\n -v <directory imagenette/orchestrator_outputs>:/orchestrator/outputs\n --name ttp_2 \n gcr.io/synergos-aisg/synergos_ttp_cluster:v0.1.0 \n --id ttp \n --logging_variant graylog <IP Synergos Logger> <TTP port>\n --queue rabbitmq <IP Synergos Logger> <AMQP port>\n```\n\n- **WORKER_1 Node 2**\n```\ndocker run --rm\n -p 5003:5000 \n -p 8023:8020\n -v <directory imagenette/data1>:/worker/data \n -v <directory imagenette/outputs_1>:/worker/outputs \n --name worker_1_n2 \n gcr.io/synergos-aisg/synergos_worker:v0.1.0 \n --id worker_1_n2 \n --logging_variant graylog <IP Synergos Logger> <Worker port>\n --queue rabbitmq <IP Synergos Logger> <AMQP port>\n```\n\n- **WORKER_2 Node 2**\n```\ndocker run --rm\n -p 5004:5000 \n -p 8024:8020\n -v <directory imagenette/data2>:/worker/data \n -v <directory imagenette/outputs_2>:/worker/outputs \n --name worker_2_n2\n gcr.io/synergos-aisg/synergos_worker:v0.1.0\n --id worker_2_n2 \n --logging_variant graylog <IP Synergos Logger> <Worker port>\n --queue rabbitmq <IP Synergos Logger> <AMQP port>\n```\n\n#### Synergos MLOps\n\n```\ndocker run --rm \n -p 5500:5500 \n -v /path/to/mlflow_test/:/mlflow # <-- IMPT! Same as orchestrator's\n --name synmlops \n gcr.io/synergos-aisg/synergos_mlops:v0.1.0\n```\n\n#### Synergos MQ\n\n```\ndocker run --rm \n -p 15672:15672 # UI port\n -p 5672:5672 # AMQP port\n --name synergos_mq \n gcr.io/synergos-aisg/synergos_mq:v0.1.0\n```\n\n#### Synergos UI\n\n- Refer to these [instructions](https://github.com/aimakerspace/synergos_ui) to deploy `Synergos UI`.\n\n#### Synergos Logger\n\n- Refer to these [instructions](https://github.com/aimakerspace/synergos_logger) to deploy `Synergos Logger`.\n",
"_____no_output_____"
],
[
"Once ready, for each terminal, you should see a REST server running on http://0.0.0.0:5000 of the container.\n\nYou are now ready for the next step.",
"_____no_output_____"
],
[
"## Configurations",
"_____no_output_____"
],
[
"### A. Configuring `Synergos Simulator`\n\nAll hosts & ports have already been pre-allocated!\n\nRefer to [this](https://github.com/aimakerspace/synergos_simulator) for all the pre-allocated host & port mappings.",
"_____no_output_____"
],
[
"### B. Configuring your manual setup\n\nIn a new terminal, run `docker inspect bridge` and find the IPv4Address for each container. Ideally, the containers should have the following addresses:\n- director address: `172.17.0.2`\n- Sub-Grid 1\n - ttp_1 address: `172.17.0.3`\n - worker_1_n1 address: `172.17.0.4`\n - worker_2_n1 address: `172.17.0.5`\n- Sub-Grid 2\n - ttp_2 address: `172.17.0.6`\n - worker_1_n2 address: `172.17.0.7`\n - worker_2_n2 address: `172.17.0.8`\n- UI address: `172.17.0.9`\n- Logger address: `172.17.0.14`\n- MLOps address: `172.17.0.15`\n- MQ address: `172.17.0.16`\n\nIf not, just note the relevant IP addresses for each docker container.\n\nRun the following cells below.\n\n**Note: For Windows users, `host` should be Docker Desktop VM's IP. Follow [this](https://stackoverflow.com/questions/58073936/how-to-get-ip-address-of-docker-desktop-vm) on instructions to find IP**",
"_____no_output_____"
]
],
[
[
"import time\nfrom synergos import Driver\n\nhost = \"172.20.0.2\"\nport = 5000\n\n# Initiate Driver\ndriver = Driver(host=host, port=port)",
"_____no_output_____"
]
],
[
[
"## Phase 1: Registration\n\nSubmitting Orchestrator & Participant metadata",
"_____no_output_____"
],
[
"#### 1A. Orchestrator creates a collaboration",
"_____no_output_____"
]
],
[
[
"collab_task = driver.collaborations\n\ncollab_task.configure_logger(\n host=\"172.20.0.14\", \n port=9000, \n sysmetrics_port=9100, \n director_port=9200, \n ttp_port=9300, \n worker_port=9400, \n ui_port=9000, \n secure=False\n)\n\ncollab_task.configure_mlops( \n host=\"172.20.0.15\", \n port=5500, \n ui_port=5500, \n secure=False\n)\n\ncollab_task.configure_mq( \n host=\"172.20.0.16\", \n port=5672, \n ui_port=15672, \n secure=False\n)\n\ncollab_task.create('imagenette_syncluster_collaboration')",
"_____no_output_____"
]
],
[
[
"#### 1B. Orchestrator creates a project",
"_____no_output_____"
]
],
[
[
"driver.projects.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n action=\"classify\",\n incentives={\n 'tier_1': [],\n 'tier_2': [],\n }\n)",
"_____no_output_____"
]
],
[
[
"#### 1C. Orchestrator creates an experiment",
"_____no_output_____"
]
],
[
[
"driver.experiments.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n model=[\n {\n \"activation\": \"relu\",\n \"is_input\": True,\n \"l_type\": \"Conv2d\",\n \"structure\": {\n \"in_channels\": 1, \n \"out_channels\": 4,\n \"kernel_size\": 3,\n \"stride\": 1,\n \"padding\": 1\n }\n },\n {\n \"activation\": None,\n \"is_input\": False,\n \"l_type\": \"Flatten\",\n \"structure\": {}\n },\n {\n \"activation\": \"softmax\",\n \"is_input\": False,\n \"l_type\": \"Linear\",\n \"structure\": {\n \"bias\": True,\n \"in_features\": 4 * 28 * 28,\n \"out_features\": 3\n }\n }\n\n ]\n)",
"_____no_output_____"
]
],
[
[
"#### 1D. Orchestrator creates a run",
"_____no_output_____"
]
],
[
[
"driver.runs.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n rounds=2, \n epochs=1,\n base_lr=0.0005,\n max_lr=0.005,\n criterion=\"NLLLoss\"\n)",
"_____no_output_____"
]
],
[
[
"#### 1E. Participants registers their servers' configurations and roles",
"_____no_output_____"
]
],
[
[
"participant_resp_1 = driver.participants.create(\n participant_id=\"worker_1\",\n)\n\ndisplay(participant_resp_1)\n\nparticipant_resp_2 = driver.participants.create(\n participant_id=\"worker_2\",\n)\n\ndisplay(participant_resp_2)",
"_____no_output_____"
],
[
"registration_task = driver.registrations\n\n# Add and register worker_1 node\nregistration_task.add_node(\n host='172.20.0.4',\n port=8020,\n f_port=5000,\n log_msgs=True,\n verbose=True\n)\n\nregistration_task.add_node(\n host='172.20.0.7',\n port=8020,\n f_port=5000,\n log_msgs=True,\n verbose=True\n)\n\nregistration_task.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n participant_id=\"worker_1\",\n role=\"host\"\n)",
"_____no_output_____"
],
[
"registration_task = driver.registrations\n\n# Add and register worker_2 node\nregistration_task.add_node(\n host='172.20.0.5',\n port=8020,\n f_port=5000,\n log_msgs=True,\n verbose=True\n)\n\nregistration_task.add_node(\n host='172.20.0.8',\n port=8020,\n f_port=5000,\n log_msgs=True,\n verbose=True\n)\n\nregistration_task.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n participant_id=\"worker_2\",\n role=\"guest\"\n)",
"_____no_output_____"
]
],
[
[
"#### 1F. Participants registers their tags for a specific project",
"_____no_output_____"
]
],
[
[
"# Worker 1 declares their data tags\ndriver.tags.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n participant_id=\"worker_1\",\n train=[[\"imagenette\", \"dataset\", \"data1\", \"train\"]],\n evaluate=[[\"imagenette\", \"dataset\", \"data1\", \"evaluate\"]]\n)",
"_____no_output_____"
],
[
"# Worker 2 declares their data tags\ndriver.tags.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n participant_id=\"worker_2\",\n train=[[\"imagenette\", \"dataset\", \"data2\", \"train\"]],\n evaluate=[[\"imagenette\", \"dataset\", \"data2\", \"evaluate\"]]\n)",
"_____no_output_____"
],
[
"stop!",
"_____no_output_____"
]
],
[
[
"## Phase 2: \nAlignment, Training & Optimisation",
"_____no_output_____"
],
[
"#### 2A. Perform multiple feature alignment to dynamically configure datasets and models for cross-grid compatibility",
"_____no_output_____"
]
],
[
[
"driver.alignments.create(\n collab_id='imagenette_syncluster_collaboration',\n project_id=\"imagenette_syncluster_project\",\n verbose=False,\n log_msg=False\n)",
"_____no_output_____"
],
[
"# Important! MUST wait for alignment process to first complete before proceeding on\nwhile True:\n \n align_resp = driver.alignments.read(\n collab_id='imagenette_syncluster_collaboration',\n project_id=\"imagenette_syncluster_project\"\n )\n \n align_data = align_resp.get('data')\n if align_data:\n display(align_resp)\n break\n \n time.sleep(5)",
"_____no_output_____"
]
],
[
[
"#### 2B. Trigger training across the federated grid",
"_____no_output_____"
]
],
[
[
"model_resp = driver.models.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n log_msg=False,\n verbose=False\n)\ndisplay(model_resp)",
"_____no_output_____"
],
[
"# Important! MUST wait for training process to first complete before proceeding on\nwhile True:\n \n train_resp = driver.models.read(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\"\n )\n \n train_data = train_resp.get('data')\n if train_data:\n display(train_data)\n break\n \n time.sleep(5)",
"_____no_output_____"
]
],
[
[
"#### 2C. Perform hyperparameter tuning once ideal model is found (experimental) ",
"_____no_output_____"
]
],
[
[
"optim_parameters = {\n 'search_space': {\n \"rounds\": {\"_type\": \"choice\", \"_value\": [1, 2]},\n \"epochs\": {\"_type\": \"choice\", \"_value\": [1, 2]},\n \"batch_size\": {\"_type\": \"choice\", \"_value\": [32, 64]},\n \"lr\": {\"_type\": \"choice\", \"_value\": [0.0001, 0.1]},\n \"criterion\": {\"_type\": \"choice\", \"_value\": [\"NLLLoss\"]},\n \"mu\": {\"_type\": \"uniform\", \"_value\": [0.0, 1.0]},\n \"base_lr\": {\"_type\": \"choice\", \"_value\": [0.00005]},\n \"max_lr\": {\"_type\": \"choice\", \"_value\": [0.2]}\n },\n 'backend': \"tune\",\n 'optimize_mode': \"max\",\n 'metric': \"accuracy\",\n 'trial_concurrency': 1,\n 'max_exec_duration': \"1h\",\n 'max_trial_num': 2,\n 'max_concurrent': 1,\n 'is_remote': True,\n 'use_annotation': True,\n 'auto_align': True,\n 'dockerised': True,\n 'verbose': True,\n 'log_msgs': True\n}\ndriver.optimizations.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n **optim_parameters\n)",
"_____no_output_____"
],
[
"driver.optimizations.read(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\"\n)",
"_____no_output_____"
]
],
[
[
"## Phase 3: EVALUATE \nValidation & Predictions",
"_____no_output_____"
],
[
"#### 3A. Perform validation(s) of combination(s)",
"_____no_output_____"
]
],
[
[
"# Orchestrator performs post-mortem validation\ndriver.validations.create(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n log_msg=False,\n verbose=False\n)",
"_____no_output_____"
],
[
"# Run this cell again after validation has completed to retrieve your validation statistics\n# NOTE: You do not need to wait for validation/prediction requests to complete to proceed\ndriver.validations.read(\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n)",
"_____no_output_____"
]
],
[
[
"#### 3B. Perform prediction(s) of combination(s)",
"_____no_output_____"
]
],
[
[
"# Worker 1 requests for inferences\ndriver.predictions.create(\n tags={\n \"imagenette_syncluster_project\": [\n [\"imagenette\", \"dataset\", \"data1\", \"predict\"]\n ]\n },\n participant_id=\"worker_1\",\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n log_msg=False,\n verbose=False\n)",
"_____no_output_____"
],
[
"# Run this cell again after prediction has completed to retrieve your predictions for worker 1\n# NOTE: You do not need to wait for validation/prediction requests to complete to proceed\ndriver.predictions.read(\n participant_id=\"worker_1\",\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n)",
"_____no_output_____"
],
[
"# Worker 2 requests for inferences\ndriver.predictions.create(\n tags={\n \"imagenette_syncluster_project\": [\n [\"imagenette\", \"dataset\", \"data2\", \"predict\"]\n ]\n },\n participant_id=\"worker_2\",\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n log_msg=False,\n verbose=False\n)",
"_____no_output_____"
],
[
"# Run this cell again after prediction has completed to retrieve your predictions for worker 2\n# NOTE: You do not need to wait for validation/prediction requests to complete to proceed\ndriver.predictions.read(\n participant_id=\"worker_2\",\n collab_id=\"imagenette_syncluster_collaboration\",\n project_id=\"imagenette_syncluster_project\",\n expt_id=\"imagenette_syncluster_experiment\",\n run_id=\"imagenette_syncluster_run\",\n)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a9a88523d30b7b53cd865c06bc718647da39468
| 546,033 |
ipynb
|
Jupyter Notebook
|
4. Fun with Keypoints-zh.ipynb
|
czyseo/P1_Facial_Keypoints
|
7fe4c5bea1098d6667afa32868f348f0cef7d5f8
|
[
"MIT"
] | null | null | null |
4. Fun with Keypoints-zh.ipynb
|
czyseo/P1_Facial_Keypoints
|
7fe4c5bea1098d6667afa32868f348f0cef7d5f8
|
[
"MIT"
] | null | null | null |
4. Fun with Keypoints-zh.ipynb
|
czyseo/P1_Facial_Keypoints
|
7fe4c5bea1098d6667afa32868f348f0cef7d5f8
|
[
"MIT"
] | null | null | null | 86.329328 | 75,516 | 0.755799 |
[
[
[
"## 人脸过滤器\n\n现在,使用已训练的人脸关键点检测器,就可以自动执行一些操作了,比如将过滤器添加到人脸。这个notebook是可选的,你可以根据在人眼周围检测到的关键点为图像中检测到的人脸添加太阳镜。打开`images/`目录,看一看我们还为你提供了哪些用于尝试的 .png!\n\n<img src=\"images/face_filter_ex.png\" width=60% height=60%/>\n\n下面,查看一下我们将要使用的太阳镜.png,然后开始行动吧!",
"_____no_output_____"
]
],
[
[
"# import necessary resources\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport cv2",
"_____no_output_____"
],
[
"# load in sunglasses image with cv2 and IMREAD_UNCHANGED\nsunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)\n\n# plot our image\nplt.imshow(sunglasses)\n\n# print out its dimensions\nprint('Image shape: ', sunglasses.shape)",
"Image shape: (1123, 3064, 4)\n"
]
],
[
[
"## 第四个维度\n\n你会注意到这个图像实际上有*4 个颜色通道*,与一般的RGB图像不同,因为一般的RGB图像只有3个颜色通道。这是由于我们设置了标记`cv2.IMREAD_UNCHANGED`,这个标记会告诉它使其读取另一个颜色通道。\n\n#### Alpha通道\n除了具有通常彩色图像的红色、蓝色和绿色通道,第4个通道表示图像中**每个像素的透明度级别**,这个通道通常被称为**alpha**通道。透明度通道的工作原理如下:像素越低,像素越透明。这里的下限(即完全透明)为零,因此任何设置为0的像素都不会被看到;上图中这些看起来像白色背景像素,但它们实际上是完全透明的。\n\n有了这个透明的通道,我们将这个太阳镜的矩形图像放在一张人脸图像上,仍然可以看到人脸区域在技术上被太阳镜图像的透明背景覆盖了!\n\n接下来,我们看看下一个Python单元格中太阳镜图像的alpha通道。因为图像背景中的许多像素的alpha值为0,所以如果我们想看到它们,就需要显式地输出非零值。",
"_____no_output_____"
]
],
[
[
"# print out the sunglasses transparency (alpha) channel\nalpha_channel = sunglasses[:,:,3]\nprint ('The alpha channel looks like this (black pixels = transparent): ')\nplt.imshow(alpha_channel, cmap='gray')",
"The alpha channel looks like this (black pixels = transparent): \n"
],
[
"# just to double check that there are indeed non-zero values\n# let's find and print out every value greater than zero\nvalues = np.where(alpha_channel != 0)\nprint ('The non-zero values of the alpha channel are: ')\nprint (values)",
"The non-zero values of the alpha channel are: \n(array([ 17, 17, 17, ..., 1109, 1109, 1109]), array([ 687, 688, 689, ..., 2376, 2377, 2378]))\n"
]
],
[
[
"#### 覆盖图像\n\n覆盖图像的意思是说,当我们将太阳镜图像放在另一个图像上时,我们可以把透明度通道当做一个过滤器:\n\n* 如果像素不透明(即 alpha_channel> 0),则将它们覆盖在新图像上\n\n#### 关键点位置\n\n在这个过程时,了解哪个关键点属于眼睛或嘴巴等,这一点对你很有帮助,因此在下图中我们还直接在图像上输出了每个人脸面部关键点的索引,这样就可以分辨哪些关键点适合眼睛、眉毛等,\n\n<img src=\"images/landmarks_numbered.jpg\" width=50% height=50%/>\n\n使用对应于人脸边缘的关键点来定义太阳镜的宽度,并使用眼睛的位置来定义位移,这个方法可能也会很有用。\n\n接下来,我们要加载一个示例图像。你会从下面提供的训练数据集中获得一个图像和一组关键点,但也可以使用自己的CNN模型为*任何*一张人脸图像生成关键点(如在Notebook 3中)并进行相同的覆盖处理!",
"_____no_output_____"
]
],
[
[
"# load in the data if you have not already!\n# otherwise, you may comment out this cell\n# -- DO NOT CHANGE THIS CELL -- #\n!mkdir /data\n!wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip\n!unzip -n /data/train-test-data.zip -d /data",
"--2019-01-12 17:59:04-- https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip\nResolving s3.amazonaws.com (s3.amazonaws.com)... 52.216.130.189\nConnecting to s3.amazonaws.com (s3.amazonaws.com)|52.216.130.189|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 338613624 (323M) [application/zip]\nSaving to: ‘/data/train-test-data.zip’\n\ntrain-test-data.zip 100%[===================>] 322.93M 76.3MB/s in 4.5s \n\n2019-01-12 17:59:08 (72.5 MB/s) - ‘/data/train-test-data.zip’ saved [338613624/338613624]\n\nArchive: /data/train-test-data.zip\n creating: /data/test/\n inflating: /data/test/Abdel_Aziz_Al-Hakim_00.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_01.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_10.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_11.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_40.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_41.jpg \n inflating: /data/test/Abdullah_Gul_10.jpg \n inflating: /data/test/Abdullah_Gul_11.jpg \n inflating: /data/test/Abdullah_Gul_30.jpg \n inflating: /data/test/Abdullah_Gul_31.jpg \n inflating: /data/test/Abdullah_Gul_50.jpg \n inflating: /data/test/Abdullah_Gul_51.jpg \n inflating: /data/test/Adam_Sandler_00.jpg \n inflating: /data/test/Adam_Sandler_01.jpg \n inflating: /data/test/Adam_Sandler_10.jpg \n inflating: /data/test/Adam_Sandler_11.jpg \n inflating: /data/test/Adam_Sandler_40.jpg \n inflating: /data/test/Adam_Sandler_41.jpg \n inflating: /data/test/Adrian_Nastase_10.jpg \n inflating: /data/test/Adrian_Nastase_11.jpg \n inflating: /data/test/Adrian_Nastase_40.jpg \n inflating: /data/test/Adrian_Nastase_41.jpg \n inflating: /data/test/Adrian_Nastase_50.jpg \n inflating: /data/test/Adrian_Nastase_51.jpg \n inflating: /data/test/Agbani_Darego_00.jpg \n inflating: /data/test/Agbani_Darego_01.jpg \n inflating: /data/test/Agbani_Darego_20.jpg \n inflating: /data/test/Agbani_Darego_21.jpg \n inflating: /data/test/Agbani_Darego_40.jpg \n inflating: /data/test/Agbani_Darego_41.jpg \n inflating: /data/test/Agbani_Darego_50.jpg \n inflating: /data/test/Agbani_Darego_51.jpg \n inflating: /data/test/Agnes_Bruckner_00.jpg \n inflating: /data/test/Agnes_Bruckner_01.jpg \n inflating: /data/test/Agnes_Bruckner_10.jpg \n inflating: /data/test/Agnes_Bruckner_11.jpg \n inflating: /data/test/Agnes_Bruckner_20.jpg \n inflating: /data/test/Agnes_Bruckner_21.jpg \n inflating: /data/test/Agnes_Bruckner_40.jpg \n inflating: /data/test/Agnes_Bruckner_41.jpg \n inflating: /data/test/Ahmad_Masood_00.jpg \n inflating: /data/test/Ahmad_Masood_01.jpg \n inflating: /data/test/Ahmad_Masood_30.jpg \n inflating: /data/test/Ahmad_Masood_31.jpg \n inflating: /data/test/Ahmad_Masood_40.jpg \n inflating: /data/test/Ahmad_Masood_41.jpg \n inflating: /data/test/Ahmed_Ahmed_00.jpg \n inflating: /data/test/Ahmed_Ahmed_01.jpg \n inflating: /data/test/Ahmed_Ahmed_10.jpg \n inflating: /data/test/Ahmed_Ahmed_11.jpg \n inflating: /data/test/Ahmed_Ahmed_40.jpg \n inflating: /data/test/Ahmed_Ahmed_41.jpg \n inflating: /data/test/Ahmed_Ahmed_50.jpg \n inflating: /data/test/Ahmed_Ahmed_51.jpg \n inflating: /data/test/Aidan_Quinn_00.jpg \n inflating: /data/test/Aidan_Quinn_01.jpg \n inflating: /data/test/Aidan_Quinn_10.jpg \n inflating: /data/test/Aidan_Quinn_11.jpg \n inflating: /data/test/Aidan_Quinn_20.jpg \n inflating: /data/test/Aidan_Quinn_21.jpg \n inflating: /data/test/Aidan_Quinn_30.jpg \n inflating: /data/test/Aidan_Quinn_31.jpg \n inflating: /data/test/Aishwarya_Rai_00.jpg \n inflating: /data/test/Aishwarya_Rai_01.jpg \n inflating: /data/test/Aishwarya_Rai_10.jpg \n inflating: /data/test/Aishwarya_Rai_11.jpg \n inflating: /data/test/Aishwarya_Rai_40.jpg \n inflating: /data/test/Aishwarya_Rai_41.jpg \n inflating: /data/test/Aishwarya_Rai_50.jpg \n inflating: /data/test/Aishwarya_Rai_51.jpg \n inflating: /data/test/Albert_Brooks_00.jpg \n inflating: /data/test/Albert_Brooks_01.jpg \n inflating: /data/test/Albert_Brooks_10.jpg \n inflating: /data/test/Albert_Brooks_11.jpg \n inflating: /data/test/Albert_Brooks_30.jpg \n inflating: /data/test/Albert_Brooks_31.jpg \n inflating: /data/test/Alejandro_Toledo_10.jpg \n inflating: /data/test/Alejandro_Toledo_11.jpg \n inflating: /data/test/Alejandro_Toledo_30.jpg \n inflating: /data/test/Alejandro_Toledo_31.jpg \n inflating: /data/test/Alejandro_Toledo_50.jpg \n inflating: /data/test/Alejandro_Toledo_51.jpg \n inflating: /data/test/Aleksander_Kwasniewski_00.jpg \n inflating: /data/test/Aleksander_Kwasniewski_01.jpg \n inflating: /data/test/Aleksander_Kwasniewski_10.jpg \n inflating: /data/test/Aleksander_Kwasniewski_11.jpg \n inflating: /data/test/Aleksander_Kwasniewski_20.jpg \n inflating: /data/test/Aleksander_Kwasniewski_21.jpg \n inflating: /data/test/Aleksander_Kwasniewski_30.jpg \n inflating: /data/test/Aleksander_Kwasniewski_31.jpg \n inflating: /data/test/Alex_Ferguson_00.jpg \n inflating: /data/test/Alex_Ferguson_01.jpg \n inflating: /data/test/Alex_Ferguson_10.jpg \n inflating: /data/test/Alex_Ferguson_11.jpg \n inflating: /data/test/Alex_Ferguson_50.jpg \n inflating: /data/test/Alex_Ferguson_51.jpg \n inflating: /data/test/Alexandra_Pelosi_00.jpg \n inflating: /data/test/Alexandra_Pelosi_01.jpg \n inflating: /data/test/Alexandra_Pelosi_10.jpg \n inflating: /data/test/Alexandra_Pelosi_11.jpg \n inflating: /data/test/Alexandra_Pelosi_30.jpg \n inflating: /data/test/Alexandra_Pelosi_31.jpg \n inflating: /data/test/Alfredo_di_Stefano_00.jpg \n inflating: /data/test/Alfredo_di_Stefano_01.jpg \n inflating: /data/test/Alfredo_di_Stefano_20.jpg \n inflating: /data/test/Alfredo_di_Stefano_21.jpg \n inflating: /data/test/Alfredo_di_Stefano_50.jpg \n inflating: /data/test/Alfredo_di_Stefano_51.jpg \n inflating: /data/test/Ali_Abbas_20.jpg \n inflating: /data/test/Ali_Abbas_21.jpg \n inflating: /data/test/Ali_Abbas_30.jpg \n inflating: /data/test/Ali_Abbas_31.jpg \n inflating: /data/test/Ali_Abbas_40.jpg \n inflating: /data/test/Ali_Abbas_41.jpg \n inflating: /data/test/Ali_Abbas_50.jpg \n inflating: /data/test/Ali_Abbas_51.jpg \n inflating: /data/test/Alicia_Silverstone_00.jpg \n inflating: /data/test/Alicia_Silverstone_01.jpg \n inflating: /data/test/Alicia_Silverstone_10.jpg \n inflating: /data/test/Alicia_Silverstone_11.jpg \n inflating: /data/test/Alicia_Silverstone_20.jpg \n inflating: /data/test/Alicia_Silverstone_21.jpg \n inflating: /data/test/Alicia_Silverstone_50.jpg \n inflating: /data/test/Alicia_Silverstone_51.jpg \n inflating: /data/test/Alma_Powell_00.jpg \n inflating: /data/test/Alma_Powell_01.jpg \n inflating: /data/test/Alma_Powell_10.jpg \n inflating: /data/test/Alma_Powell_11.jpg \n inflating: /data/test/Alma_Powell_40.jpg \n inflating: /data/test/Alma_Powell_41.jpg \n inflating: /data/test/Alma_Powell_50.jpg \n inflating: /data/test/Alma_Powell_51.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_00.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_01.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_10.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_11.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_20.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_21.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_30.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_31.jpg \n inflating: /data/test/Amelia_Vega_10.jpg \n inflating: /data/test/Amelia_Vega_11.jpg \n inflating: /data/test/Amelia_Vega_20.jpg \n inflating: /data/test/Amelia_Vega_21.jpg \n inflating: /data/test/Amelia_Vega_30.jpg \n inflating: /data/test/Amelia_Vega_31.jpg \n inflating: /data/test/Amelia_Vega_40.jpg \n inflating: /data/test/Amelia_Vega_41.jpg \n inflating: /data/test/Amy_Brenneman_10.jpg \n inflating: /data/test/Amy_Brenneman_11.jpg \n inflating: /data/test/Amy_Brenneman_30.jpg \n inflating: /data/test/Amy_Brenneman_31.jpg \n inflating: /data/test/Amy_Brenneman_50.jpg \n inflating: /data/test/Amy_Brenneman_51.jpg \n inflating: /data/test/Andrea_Bocelli_10.jpg \n inflating: /data/test/Andrea_Bocelli_11.jpg \n inflating: /data/test/Andrea_Bocelli_20.jpg \n inflating: /data/test/Andrea_Bocelli_21.jpg \n inflating: /data/test/Andrea_Bocelli_30.jpg \n inflating: /data/test/Andrea_Bocelli_31.jpg \n inflating: /data/test/Andy_Roddick_20.jpg \n inflating: /data/test/Andy_Roddick_21.jpg \n inflating: /data/test/Andy_Roddick_40.jpg \n inflating: /data/test/Andy_Roddick_41.jpg \n inflating: /data/test/Andy_Roddick_50.jpg \n inflating: /data/test/Andy_Roddick_51.jpg \n inflating: /data/test/Andy_Rooney_10.jpg \n inflating: /data/test/Andy_Rooney_11.jpg \n inflating: /data/test/Andy_Rooney_20.jpg \n inflating: /data/test/Andy_Rooney_21.jpg \n inflating: /data/test/Andy_Rooney_50.jpg \n inflating: /data/test/Andy_Rooney_51.jpg \n inflating: /data/test/Angel_Lockward_30.jpg \n inflating: /data/test/Angel_Lockward_31.jpg \n inflating: /data/test/Angel_Lockward_40.jpg \n inflating: /data/test/Angel_Lockward_41.jpg \n inflating: /data/test/Angel_Lockward_50.jpg \n inflating: /data/test/Angel_Lockward_51.jpg \n inflating: /data/test/Angela_Bassett_20.jpg \n inflating: /data/test/Angela_Bassett_21.jpg \n inflating: /data/test/Angela_Bassett_30.jpg \n inflating: /data/test/Angela_Bassett_31.jpg \n inflating: /data/test/Angela_Bassett_40.jpg \n inflating: /data/test/Angela_Bassett_41.jpg \n inflating: /data/test/Angelo_Reyes_20.jpg \n inflating: /data/test/Angelo_Reyes_21.jpg \n inflating: /data/test/Angelo_Reyes_30.jpg \n inflating: /data/test/Angelo_Reyes_31.jpg \n inflating: /data/test/Angelo_Reyes_50.jpg \n inflating: /data/test/Angelo_Reyes_51.jpg \n inflating: /data/test/Baburam_Bhattari_00.jpg \n inflating: /data/test/Baburam_Bhattari_01.jpg \n inflating: /data/test/Baburam_Bhattari_20.jpg \n inflating: /data/test/Baburam_Bhattari_21.jpg \n"
],
[
"# load in training data\nkey_pts_frame = pd.read_csv('/data/training_frames_keypoints.csv')\n\n# print out some stats about the data\nprint('Number of images: ', key_pts_frame.shape[0])",
"Number of images: 3462\n"
],
[
"# helper function to display keypoints\ndef show_keypoints(image, key_pts):\n \"\"\"Show image with keypoints\"\"\"\n plt.imshow(image)\n plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')\n",
"_____no_output_____"
],
[
"# a selected image\nn = 120\nimage_name = key_pts_frame.iloc[n, 0]\nimage = mpimg.imread(os.path.join('/data/training/', image_name))\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nprint('Image name: ', image_name)\n\nplt.figure(figsize=(5, 5))\nshow_keypoints(image, key_pts)\nplt.show()",
"Image name: Christopher_Walken_01.jpg\n"
]
],
[
[
"接下来,你会看到在加载的图像中将太阳镜放在这个人面部的一个示例。\n\n请注意,关键点会在上面的编号图像中逐个编号,因此`key_pts[0,:]`对应于标记图像中的第一个点(1)。",
"_____no_output_____"
]
],
[
[
"# Display sunglasses on top of the image in the appropriate place\n\n# copy of the face image for overlay\nimage_copy = np.copy(image)\n\n# top-left location for sunglasses to go\n# 17 = edge of left eyebrow\nx = int(key_pts[17, 0])\ny = int(key_pts[17, 1])\n\n# height and width of sunglasses\n# h = length of nose\nh = int(abs(key_pts[27,1] - key_pts[34,1]))\n# w = left to right eyebrow edges\nw = int(abs(key_pts[17,0] - key_pts[26,0]))\n\n# read in sunglasses\nsunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)\n# resize sunglasses\nnew_sunglasses = cv2.resize(sunglasses, (w, h), interpolation = cv2.INTER_CUBIC)\n\n# get region of interest on the face to change\nroi_color = image_copy[y:y+h,x:x+w]\n\n# find all non-transparent pts\nind = np.argwhere(new_sunglasses[:,:,3] > 0)\n\n# for each non-transparent point, replace the original image pixel with that of the new_sunglasses\nfor i in range(3):\n roi_color[ind[:,0],ind[:,1],i] = new_sunglasses[ind[:,0],ind[:,1],i] \n# set the area of the image to the changed region with sunglasses\nimage_copy[y:y+h,x:x+w] = roi_color\n\n\n# display the result!\nplt.imshow(image_copy)",
"_____no_output_____"
]
],
[
[
"#### 接下来的步骤\n\n查看`images/`目录,查看其他可用的覆盖图像.png!此外,你可能会注意到,太阳镜的覆盖层并不完美。因此,我们建议你尝试调整眼镜宽度和高度的比例,并研究如何在OpenCV中执行 [图像旋转](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html),从而将覆盖层与任何一种人脸表情都能够相匹配。",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a9a8870917bedc9bce1d8adea0e9a711b4f11ac
| 261,945 |
ipynb
|
Jupyter Notebook
|
cs224w/data gen1.ipynb
|
kidrabit/Data-Visualization-Lab-RND
|
baa19ee4e9f3422a052794e50791495632290b36
|
[
"Apache-2.0"
] | 1 |
2022-01-18T01:53:34.000Z
|
2022-01-18T01:53:34.000Z
|
cs224w/data gen1.ipynb
|
kidrabit/Data-Visualization-Lab-RND
|
baa19ee4e9f3422a052794e50791495632290b36
|
[
"Apache-2.0"
] | null | null | null |
cs224w/data gen1.ipynb
|
kidrabit/Data-Visualization-Lab-RND
|
baa19ee4e9f3422a052794e50791495632290b36
|
[
"Apache-2.0"
] | null | null | null | 72.581047 | 17,084 | 0.677028 |
[
[
[
"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom copy import deepcopy\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"",
"_____no_output_____"
],
[
"class RBF(nn.Module):\n def __init__(self):\n super(RBF, self).__init__()\n torch.cuda.manual_seed(0)\n \n self.rbf_clt = self.init_clt()\n self.rbf_std = self.init_std()\n \n\n def init_clt(self):\n return nn.Parameter(torch.rand(1))\n\n def init_std(self):\n return nn.Parameter(torch.rand(1))\n \n def rbf(self, x, cluster, std):\n return torch.exp(-(x - cluster) * (x - cluster) / 2 * (std * std))\n \n \n def forward(self, x): \n \n x = self.rbf(x, self.rbf_clt, self.rbf_std)\n \n return x",
"_____no_output_____"
],
[
"class RBFnetwork(nn.Module):\n def __init__(self, timelag):\n super(RBFnetwork, self).__init__()\n torch.cuda.manual_seed(0)\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n \n self.timelag = timelag\n self.init_weight = nn.Parameter(torch.rand(self.timelag))\n self.rbf_list = [RBF().to(device) for i in range(self.timelag)]\n \n def forward(self, x):\n\n for j in range(self.timelag):\n if j ==0:\n y = sum([self.init_weight[i] * self.rbf_list[i](x[j]) for i in range(self.timelag)]) \n else:\n y = torch.cat([y, sum([self.init_weight[i] * self.rbf_list[i](x[j]) for i in range(self.timelag)])])\n \n return y",
"_____no_output_____"
],
[
"def restore_parameters(model, best_model):\n '''Move parameter values from best_model to model.'''\n for params, best_params in zip(model.parameters(), best_model.parameters()):\n params.data = best_params\n \ndef train_RBFlayer(model, input_, target, lr, epochs, lookback = 5, device = device):\n model.to(device)\n loss_fn = nn.MSELoss(reduction='mean')\n optimizer = torch.optim.Adam(model.parameters(), lr = lr)\n \n train_loss_list = []\n \n best_it = None\n best_model = None\n best_loss = np.inf\n \n target_list = []\n \n for j in range(len(target) - 2):\n target_list.append((target[j+2] - target[j])/2)\n \n loss_list = []\n cause_list = []\n for epoch in range(epochs):\n cause = model(input_)\n cause_list.append(cause)\n grad = []\n \n \n for i in range(len(cause) - 2):\n grad.append((cause[i+2] - cause[i])/2)\n \n loss1 = sum([loss_fn(grad[i], target_list[i]) for i in range(len(grad))])\n loss2 = sum([loss_fn(cause[i], target[i]) for i in range(len(input_))])\n \n loss = loss1 + loss2\n \n loss.backward()\n optimizer.step()\n model.zero_grad()\n \n loss_list.append(loss)\n mean_loss = loss / len(grad)\n train_loss_list.append(mean_loss)\n \n if mean_loss < best_loss:\n best_loss = mean_loss\n best_it = epoch\n best_model = deepcopy(model)\n \n elif (epoch - best_it) == lookback:\n if verbose:\n print('Stopping early')\n break\n print(\"epoch {} cause loss {} :\".format(epoch, loss / len(input_)))\n print('gradient loss :', loss1/len(grad))\n print('value loss :', loss2/len(input_))\n \n best_cause = cause_list[best_it] \n restore_parameters(model, best_model)\n\n return best_model, loss_list, best_cause",
"_____no_output_____"
]
],
[
[
"# data generation",
"_____no_output_____"
]
],
[
[
"import random as rand\nimport numpy as np\n\ndef data_gen(timelag):\n data = []\n clt_list = []\n std_list = []\n for i in range(timelag):\n clt = rand.random()\n std = rand.random()\n \n data_i = np.exp(-(i - clt) * (i - clt) / 2 * (std * std))\n data.append(data_i)\n clt_list.append(clt)\n std_list.append(std)\n return torch.tensor(data, device = device).float(), torch.tensor(clt_list, device = device).float(), torch.tensor(std_list, device = device).float()",
"_____no_output_____"
],
[
"data, clt_list, std_list = data_gen(10)",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"clt_list",
"_____no_output_____"
],
[
"std_list",
"_____no_output_____"
]
],
[
[
"# test1",
"_____no_output_____"
]
],
[
[
"import time\n\ncause_list = []\n\nstart = time.time()\nmodel = RBFnetwork(10)\nbest_model, loss_list, best_cause = train_RBFlayer(model, data, data, 0.001, 1000, device)\ncause_list.append(best_cause.cpu().detach().numpy())\nprint(\"time :\", time.time() - start)\nprint('-------------------------------------------------------------------------------------------')\n",
"epoch 0 cause loss 17.742481231689453 :\ngradient loss : tensor(0.0978, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.6643, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 1 cause loss 17.660694122314453 :\ngradient loss : tensor(0.0977, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.5825, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 2 cause loss 17.579105377197266 :\ngradient loss : tensor(0.0977, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.5010, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 3 cause loss 17.49771499633789 :\ngradient loss : tensor(0.0977, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.4196, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 4 cause loss 17.41652488708496 :\ngradient loss : tensor(0.0976, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.3384, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 5 cause loss 17.335540771484375 :\ngradient loss : tensor(0.0976, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.2575, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 6 cause loss 17.254772186279297 :\ngradient loss : tensor(0.0975, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.1767, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 7 cause loss 17.174209594726562 :\ngradient loss : tensor(0.0975, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.0962, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 8 cause loss 17.093862533569336 :\ngradient loss : tensor(0.0975, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(17.0159, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 9 cause loss 17.013736724853516 :\ngradient loss : tensor(0.0974, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.9358, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 10 cause loss 16.93383026123047 :\ngradient loss : tensor(0.0974, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.8559, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 11 cause loss 16.854148864746094 :\ngradient loss : tensor(0.0974, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.7762, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 12 cause loss 16.774694442749023 :\ngradient loss : tensor(0.0973, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.6968, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 13 cause loss 16.69546890258789 :\ngradient loss : tensor(0.0973, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.6176, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 14 cause loss 16.61647605895996 :\ngradient loss : tensor(0.0973, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.5387, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 15 cause loss 16.537717819213867 :\ngradient loss : tensor(0.0972, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.4599, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 16 cause loss 16.45919418334961 :\ngradient loss : tensor(0.0972, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.3814, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 17 cause loss 16.38090705871582 :\ngradient loss : tensor(0.0972, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.3032, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 18 cause loss 16.302867889404297 :\ngradient loss : tensor(0.0971, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.2252, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 19 cause loss 16.225065231323242 :\ngradient loss : tensor(0.0971, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.1474, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 20 cause loss 16.147512435913086 :\ngradient loss : tensor(0.0971, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(16.0699, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 21 cause loss 16.07020378112793 :\ngradient loss : tensor(0.0970, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.9926, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 22 cause loss 15.993145942687988 :\ngradient loss : tensor(0.0970, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.9155, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 23 cause loss 15.916337966918945 :\ngradient loss : tensor(0.0970, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.8388, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 24 cause loss 15.839777946472168 :\ngradient loss : tensor(0.0969, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.7622, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 25 cause loss 15.763473510742188 :\ngradient loss : tensor(0.0969, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.6860, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 26 cause loss 15.68742847442627 :\ngradient loss : tensor(0.0969, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.6099, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 27 cause loss 15.611632347106934 :\ngradient loss : tensor(0.0968, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.5342, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 28 cause loss 15.536093711853027 :\ngradient loss : tensor(0.0968, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.4587, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 29 cause loss 15.4608154296875 :\ngradient loss : tensor(0.0968, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.3834, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 30 cause loss 15.38579273223877 :\ngradient loss : tensor(0.0967, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.3084, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 31 cause loss 15.31103229522705 :\ngradient loss : tensor(0.0967, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.2337, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 32 cause loss 15.236531257629395 :\ngradient loss : tensor(0.0967, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.1592, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 33 cause loss 15.162291526794434 :\ngradient loss : tensor(0.0966, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.0850, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 34 cause loss 15.088312149047852 :\ngradient loss : tensor(0.0966, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(15.0110, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 35 cause loss 15.014596939086914 :\ngradient loss : tensor(0.0966, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.9373, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 36 cause loss 14.941141128540039 :\ngradient loss : tensor(0.0965, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.8639, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 37 cause loss 14.867952346801758 :\ngradient loss : tensor(0.0965, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.7908, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 38 cause loss 14.795022964477539 :\ngradient loss : tensor(0.0965, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.7179, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 39 cause loss 14.722357749938965 :\ngradient loss : tensor(0.0964, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.6452, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 40 cause loss 14.649958610534668 :\ngradient loss : tensor(0.0964, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.5728, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 41 cause loss 14.57781982421875 :\ngradient loss : tensor(0.0964, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.5007, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 42 cause loss 14.505949974060059 :\ngradient loss : tensor(0.0963, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.4289, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 43 cause loss 14.434341430664062 :\ngradient loss : tensor(0.0963, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.3573, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 44 cause loss 14.362998008728027 :\ngradient loss : tensor(0.0963, device='cuda:0', grad_fn=<DivBackward0>)\nvalue loss : tensor(14.2860, device='cuda:0', grad_fn=<DivBackward0>)\nepoch 45 cause loss 14.291914939880371 :\n"
],
[
"import matplotlib.pyplot as plt\nplt.plot(cause_list[0])",
"_____no_output_____"
],
[
"plt.plot(cause_list[0])\nplt.plot(data.cpu().detach().numpy())\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a9a8d987ea6b2b102da0e565d57d29092e05690
| 3,372 |
ipynb
|
Jupyter Notebook
|
notebooks/check adequate sampling over big torsional barriers.ipynb
|
openforcefield/bayes-implicit-solvent
|
067239fcbb8af28eb6310d702804887662692ec2
|
[
"MIT"
] | 4 |
2019-11-12T16:23:26.000Z
|
2021-07-01T05:37:37.000Z
|
notebooks/check adequate sampling over big torsional barriers.ipynb
|
openforcefield/bayes-implicit-solvent
|
067239fcbb8af28eb6310d702804887662692ec2
|
[
"MIT"
] | 4 |
2019-01-18T22:05:03.000Z
|
2019-11-12T18:37:31.000Z
|
notebooks/check adequate sampling over big torsional barriers.ipynb
|
openforcefield/bayes-implicit-solvent
|
067239fcbb8af28eb6310d702804887662692ec2
|
[
"MIT"
] | 2 |
2019-12-02T20:23:56.000Z
|
2021-03-25T23:28:36.000Z
| 29.578947 | 135 | 0.537663 |
[
[
[
"See which torsions probably have the biggest torsional barriers, do some sort of check that big torsional barriers are crossed...",
"_____no_output_____"
]
],
[
[
"from simtk import unit\nfrom openmmtools.constants import kB\ntemperature = 298.15 * unit.kelvin\nkT = kB * temperature\n\ndef evaluate_torsion(theta, periodicity=1, phase=0, force_constant=1):\n return force_constant * (1 + np.cos(periodicity * theta - phase))\n\ndef plot_torsion_component(periodicity, phase, force_constant):\n x = np.linspace(0, 2 * np.pi, 1000)\n y = evaluate_torsion(x, periodicity, phase, force_constant)\n plt.plot(x / np.pi, y)\n return y\n\ndef compute_full_torsion(a, b, c, d):\n x = np.linspace(0, 2 * np.pi, 1000)\n terms = forces_per_torsion[(a,b,c,d)]\n u = sum([evaluate_torsion(x, periodicities[i], phases[i], ks[i]) for i in terms])\n return u\n\ndef plot_full_torsion(a,b,c,d):\n u = compute_full_torsion(a,b,c,d)\n x = np.linspace(0, 2 * np.pi, len(u))\n plt.plot(x / np.pi,u)\n return u\n\ndef plot_torsion_by_index(i):\n return plot_torsion_component(periodicities[i], phases[i], ks[i])\n\ndef plot_marginal(U, temperature = 300 * unit.kelvin):\n '''assume U is unit'd (compatible with kT) and has limits [0,2pi] '''\n x = np.linspace(0, 2 * np.pi, len(u))\n \n q = np.exp(-U / kT)\n Z = 2 * np.pi * np.sum(q) / len(q)\n p = q / Z\n plt.plot(x, p)\n return p",
"_____no_output_____"
],
[
"indices = []\nperiodicities = []\nphases = []\nks = []\n\nfor i in range(num_torsions):\n a, b, c, d, periodicity, phase, force_constant = f.getTorsionParameters(i)\n indices.append((a,b,c,d))\n periodicities.append(periodicity)\n phases.append(phase)\n ks.append(force_constant)\n \n# which quadruples have more than one torsion force acting on them?\n\nforces_per_torsion = dict()\nfor ind in indices:\n forces_per_torsion[ind] = []\n\nfor i,ind in enumerate(indices):\n forces_per_torsion[ind].append(i)\n \nmodes_per_torsion = dict()\nfor ind in indices:\n modes_per_torsion[ind] = 0\n \nfor i,ind in enumerate(indices):\n modes_per_torsion[ind] += periodicities[i]",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
]
] |
4a9a8f0bb2157902a28a2e79d3da9b031a76fa18
| 220,031 |
ipynb
|
Jupyter Notebook
|
statistics/Blatt1/Blatt1.ipynb
|
RunOrVeith/algorithm-tests
|
dc6bc3fb1cca771fcf6445ade417157080dfddb5
|
[
"MIT"
] | null | null | null |
statistics/Blatt1/Blatt1.ipynb
|
RunOrVeith/algorithm-tests
|
dc6bc3fb1cca771fcf6445ade417157080dfddb5
|
[
"MIT"
] | null | null | null |
statistics/Blatt1/Blatt1.ipynb
|
RunOrVeith/algorithm-tests
|
dc6bc3fb1cca771fcf6445ade417157080dfddb5
|
[
"MIT"
] | null | null | null | 205.828812 | 67,262 | 0.870945 |
[
[
[
"# Aufgaben Blatt1\n# Aufgabe 1",
"_____no_output_____"
]
],
[
[
"year = 1998:2017\nsnowcover = c(25.0, 23.9, 25.1, 24.4, 21.2, 26.1, 23.2, 25.5, 24.9, 24.0, 21.3, 23.8, 26.1, 26.0, 26.1, 25.1, 22.2, 23.4, 22.6, 24.6)\nsnow = data.frame(years=year, covers=snowcover)",
"_____no_output_____"
],
[
"plot(snowcover~year, snow)",
"_____no_output_____"
],
[
"plot(snowcover~year, snow, type=\"l\")\nabline(lm(snowcover~year), col=\"red\")",
"_____no_output_____"
],
[
"hist(snow$covers, xlab=\"Snow Height\", main=\"Snow heights frequencies\")",
"_____no_output_____"
],
[
"plot(log(snow$covers)~snow$years)",
"_____no_output_____"
],
[
"plot(log(snow$covers)~snow$years, type=\"l\")\nabline(lm(log(snow$covers)~snow$years), col=\"red\")",
"_____no_output_____"
],
[
"hist(log(snow$covers), xlab=\"Snow Height\", main=\"Snow heights frequencies\")",
"_____no_output_____"
],
[
"snow2 = read.table(\"./snow.csv\", header=TRUE, dec=\".\", sep=\",\")",
"_____no_output_____"
],
[
"snow2",
"_____no_output_____"
]
],
[
[
"## Aufgabe 2",
"_____no_output_____"
]
],
[
[
"data(ChickWeight)\nChickWeight",
"_____no_output_____"
],
[
"day10s = ChickWeight[ChickWeight[, \"Time\"] == 10,]\ndiets = split(day10s, day10s$Diet)\nmeans = lapply(diets, function(d) mean(d$weight))\nmeans\n ",
"_____no_output_____"
]
],
[
[
"## Aufgabe 3",
"_____no_output_____"
]
],
[
[
"p_smaller_1.4 = punif(1.4, 1., 2.)\nd_at_1.4 = dunif(1.4, min=1., max=2.)\nquantiles = qunif(c(0.25, 0.75), 1., 2.)\np_smaller_1.4\nd_at_1.4\nquantiles",
"_____no_output_____"
],
[
"ex_b<-function(n) {\n sample = runif(n, 1., 2.)\n quantile(sample, probs=c(0.25, 0.75))\n}\nex_b(20)\nex_b(100)\nex_b(1000)\n\n",
"_____no_output_____"
],
[
"qa<-function(samples) {\n # Builtin: IQR(samples)\n quantiles = quantile(samples, probs=c(0.25, 0.75), names=F)\n quantiles[2] - quantiles[1]\n}\nqa(runif(100, 1.0, 2.))",
"_____no_output_____"
]
],
[
[
"### Aufgabe d): Herleitung Formel:\n$$\\Phi^{-1}_{\\mu, \\sigma^2}(0.75) - \\Phi^{-1}_{\\mu, \\sigma^2}(0.25) = \\mu + \\sigma\\Phi^{-1}_{0,1}(0.75) - \\mu - \\sigma\\Phi^{-1}_{0,1}(0.25) = \\sigma(\\Phi^{-1}_{0,1}(0.75) - \\Phi^{-1}_{0,1}(0.25)) \\Rightarrow \\sigma = \\frac{\\Phi^{-1}_{\\mu, \\sigma^2}(0.75) - \\Phi^{-1}_{\\mu, \\sigma^2}(0.25)}{\\sigma(\\Phi^{-1}_{0,1}(0.75) - \\Phi^{-1}_{0,1}(0.25))}$$\nDann schätzen wir den Zähler mit dem empirischen Quartilsabstand.\n",
"_____no_output_____"
]
],
[
[
"approx_sd<-function(n_samples, mu, sd) {\n samples = rnorm(n_samples, mu, sd)\n emp = qa(samples)\n norm_q = qnorm(0.75) - qnorm(0.25)\n c(sd(samples), emp/norm_q)\n}\napprox_sd(10000, 1, 0.5)",
"_____no_output_____"
],
[
"compute_sample_variance<-function(n, m, mu, sd) {\n twoxn_samples = replicate(n, approx_sd(m, mu, sd))\n apply(twoxn_samples, MARGIN=1, FUN=var)\n \n} \ncompute_sample_variance(10000, 100, 1., 0.2)",
"_____no_output_____"
]
],
[
[
"## Zusatzaufgabe",
"_____no_output_____"
]
],
[
[
"fib.iterative<-function(ns) {\n n = max(ns)\n a = 1\n b = 0\n rets = ns\n for (i in 0:n) {\n c = a\n a = b\n b = b + c\n idx = which(ns == i)\n if (length(idx) > 0) {\n rets[idx] = b\n }\n \n }\n rets\n}\n\nprint(fib.iterative(c(30,40,50)))",
"[1] 1346269 165580141 20365011074\n"
],
[
"fib.recursive<-function(n) {\n if (n == 1 || n == 2) {\n return(1)\n }\n return(fib.recursive(n-1) + fib.recursive(n-2))\n}\n\nfib.recursive(30)\nfib.recursive(35)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9aa0329234c92e21073710931fee74354121dc
| 131,650 |
ipynb
|
Jupyter Notebook
|
Classification/KNN_Classification.ipynb
|
thirupathi-chintu/Machine-Learning-with-Python
|
0bb8753a5140c8e69a24f2ab95c7ef133ac025a6
|
[
"BSD-2-Clause"
] | 1,803 |
2018-11-26T20:53:23.000Z
|
2022-03-31T15:25:29.000Z
|
Classification/KNN_Classification.ipynb
|
thirupathi-chintu/Machine-Learning-with-Python
|
0bb8753a5140c8e69a24f2ab95c7ef133ac025a6
|
[
"BSD-2-Clause"
] | 8 |
2019-02-05T04:09:57.000Z
|
2022-02-19T23:46:27.000Z
|
Classification/KNN_Classification.ipynb
|
thirupathi-chintu/Machine-Learning-with-Python
|
0bb8753a5140c8e69a24f2ab95c7ef133ac025a6
|
[
"BSD-2-Clause"
] | 1,237 |
2018-11-28T19:48:55.000Z
|
2022-03-31T15:25:07.000Z
| 141.864224 | 32,972 | 0.848918 |
[
[
[
"# K-nearest neighbor Classification",
"_____no_output_____"
],
[
"### Import packages and data set",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\ndf = pd.read_csv(\"Classified Data\",index_col=0)\ndf.head()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1000 entries, 0 to 999\nData columns (total 11 columns):\nWTT 1000 non-null float64\nPTI 1000 non-null float64\nEQW 1000 non-null float64\nSBI 1000 non-null float64\nLQE 1000 non-null float64\nQWG 1000 non-null float64\nFDJ 1000 non-null float64\nPJF 1000 non-null float64\nHQE 1000 non-null float64\nNXJ 1000 non-null float64\nTARGET CLASS 1000 non-null int64\ndtypes: float64(10), int64(1)\nmemory usage: 93.8 KB\n"
],
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"### Check the spread of the features",
"_____no_output_____"
]
],
[
[
"l=list(df.columns)\nl[0:len(l)-2]",
"_____no_output_____"
]
],
[
[
"**Run a 'for' loop to draw boxlots of all the features for '0' and '1' TARGET CLASS**",
"_____no_output_____"
]
],
[
[
"for i in range(len(l)-1):\n sns.boxplot(x='TARGET CLASS',y=l[i], data=df)\n plt.figure()",
"_____no_output_____"
]
],
[
[
"### Scale the features using sklearn.preprocessing package",
"_____no_output_____"
],
[
"**Instantiate a scaler standardizing estimator**",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()",
"_____no_output_____"
]
],
[
[
"**Fit the features data only to this estimator (leaving the TARGET CLASS column) and transform**",
"_____no_output_____"
]
],
[
[
"scaler.fit(df.drop('TARGET CLASS',axis=1))\nscaled_features = scaler.transform(df.drop('TARGET CLASS',axis=1))",
"_____no_output_____"
],
[
"df_feat = pd.DataFrame(scaled_features,columns=df.columns[:-1])\ndf_feat.head()",
"_____no_output_____"
]
],
[
[
"### Train/Test split, model fit and prediction",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nX = df_feat\ny = df['TARGET CLASS']\nX_train, X_test, y_train, y_test = train_test_split(scaled_features,df['TARGET CLASS'],\n test_size=0.50, random_state=101)",
"_____no_output_____"
],
[
"from sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train,y_train)",
"_____no_output_____"
],
[
"pred = knn.predict(X_test)",
"_____no_output_____"
]
],
[
[
"**Evaluation of classification quality**",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import classification_report,confusion_matrix\nconf_mat=confusion_matrix(y_test,pred)\nprint(conf_mat)",
"[[233 17]\n [ 24 226]]\n"
],
[
"print(classification_report(y_test,pred))",
" precision recall f1-score support\n\n 0 0.91 0.93 0.92 250\n 1 0.93 0.90 0.92 250\n\navg / total 0.92 0.92 0.92 500\n\n"
],
[
"print(\"Misclassification error rate:\",round(np.mean(pred!=y_test),3))",
"Misclassification error rate: 0.082\n"
]
],
[
[
"**Choosing 'k' by elbow method**",
"_____no_output_____"
]
],
[
[
"error_rate = []\n\n# Will take some time\nfor i in range(1,60):\n \n knn = KNeighborsClassifier(n_neighbors=i)\n knn.fit(X_train,y_train)\n pred_i = knn.predict(X_test)\n error_rate.append(np.mean(pred_i != y_test))",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,6))\nplt.plot(range(1,60),error_rate,color='blue', linestyle='dashed', marker='o',\n markerfacecolor='red', markersize=8)\nplt.title('Error Rate vs. K Value', fontsize=20)\nplt.xlabel('K',fontsize=15)\nplt.ylabel('Error (misclassification) Rate',fontsize=15)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9ad2ed1ec8919f21d04967db8a6e7591424ddb
| 266,902 |
ipynb
|
Jupyter Notebook
|
TensorFlow_20.ipynb
|
heejinADP/TIL
|
6f9a95c9a21e38d641f69152271b98700941f7ca
|
[
"MIT"
] | null | null | null |
TensorFlow_20.ipynb
|
heejinADP/TIL
|
6f9a95c9a21e38d641f69152271b98700941f7ca
|
[
"MIT"
] | null | null | null |
TensorFlow_20.ipynb
|
heejinADP/TIL
|
6f9a95c9a21e38d641f69152271b98700941f7ca
|
[
"MIT"
] | null | null | null | 81.796506 | 31,800 | 0.742636 |
[
[
[
"import seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"**confusion matrix**",
"_____no_output_____"
]
],
[
[
"sns.set(font_scale=2)\n# 행은 실제값, 열은 예측값\narray = [[5,0,0,0], # A인데 A로 예측한 것이 5건\n [0,10,0,0], # B인데 B로 예측한 것이 10건\n [0,0,15,0],\n [0,0,0,5]]",
"_____no_output_____"
],
[
"df_cm = pd.DataFrame(array, index = [i for i in \"ABCD\"], columns = [i for i in \"ABCD\"])\ndf_cm",
"_____no_output_____"
],
[
"plt.figure(figsize = (7,5))\nplt.title('confusion matrix')\nsns.heatmap(df_cm, annot = True)\nplt.show()",
"_____no_output_____"
],
[
"array = [[9,1,0,0], \n [1,15,3,1], \n [5,0,24,1],\n [0,4,1,15]]",
"_____no_output_____"
],
[
"df_cm = pd.DataFrame(array, index = [i for i in \"ABCD\"], columns = [i for i in \"ABCD\"])\ndf_cm\nplt.figure(figsize = (7,5))\nplt.title('confusion matrix')\nsns.heatmap(df_cm, annot = True)\nplt.show()",
"_____no_output_____"
]
],
[
[
"* * * ",
"_____no_output_____"
],
[
"**mnist CLassifier - randomforest**",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\nfrom sklearn import tree\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"mnist = datasets.load_digits()\nfeatures, labels = mnist.data, mnist.target\nprint(np.shape((features)))\nprint(np.shape((labels)))",
"(1797, 64)\n(1797,)\n"
],
[
"def cross_validation(classifier, features, labels):\n cv_scores = []\n for i in range(10):\n scores = cross_val_score(classifier, features, labels, cv = 10, scoring='accuracy')\n cv_scores.append(scores.mean())\n return cv_scores\n \ndt_cv_scores = cross_validation(tree.DecisionTreeClassifier(), features, labels)\nrf_cv_scores = cross_validation(RandomForestClassifier(), features, labels)\n\ncv_list = [['random forest', rf_cv_scores],\n ['decision tree', dt_cv_scores]]\n\ndf = pd.DataFrame.from_items(cv_list)\ndf.plot()\nplt.show()\n\nprint(np.mean(dt_cv_scores))\nprint(np.mean(rf_cv_scores))",
"_____no_output_____"
]
],
[
[
"* * *",
"_____no_output_____"
],
[
"**KNN CLassifier**",
"_____no_output_____"
]
],
[
[
"import pandas\nwith open('DataSet/nba_2013.csv', 'r') as csvfile:\n nba = pandas.read_csv(csvfile)\nnba.head(15)",
"_____no_output_____"
],
[
"nba.columns",
"_____no_output_____"
],
[
"distance_columns = ['age', 'g', 'gs', 'mp', 'fg', 'fga',\n 'fg.', 'x3p', 'x3pa', 'x3p.', 'x2p', 'x2pa', 'x2p.', 'efg.', 'ft',\n 'fta', 'ft.', 'orb', 'drb', 'trb', 'ast', 'stl', 'blk', 'tov', 'pf',\n 'pts']\nlen(distance_columns)",
"_____no_output_____"
],
[
"import math\n\nselected_player = nba[nba[\"player\"]==\"LeBron James\"].iloc[0]\n\ndef euclidean_distance(row) :\n inner_value = 0\n for k in distance_columns :\n inner_value += (selected_player[k]-row[k])**2\n return math.sqrt(inner_value)\nLeBron_distance = nba.apply(euclidean_distance, axis = 1)\nLeBron_distance.head(15)",
"_____no_output_____"
],
[
"nba_numeric = nba[distance_columns]\nnba_numeric.head()",
"_____no_output_____"
],
[
"nba_normalized = (nba_numeric - nba_numeric.mean())/nba_numeric.std()\nnba_normalized.head()",
"_____no_output_____"
],
[
"from scipy.spatial import distance\n\nnba_normalized.fillna(0, inplace=True) # inplace = True : 기존객체(nba_normalized)에 지정된 값을 바꾸겠다\nnba_normalized[nba[\"player\"]==\"LeBron James\"]",
"_____no_output_____"
],
[
"LeBron_normalized = nba_normalized[nba[\"player\"]==\"LeBron James\"]\neuclidean_distances = nba_normalized.apply(lambda row : distance.euclidean(row, LeBron_normalized), axis =1)\neuclidean_distances.head(15)",
"_____no_output_____"
],
[
"distance_frame = pandas.DataFrame(data = {\"dist\":euclidean_distances, \"idx\":euclidean_distances.index})\ndistance_frame.head(15)",
"_____no_output_____"
],
[
"distance_frame.sort_values(\"dist\", inplace=True)\ndistance_frame.head(15)",
"_____no_output_____"
],
[
"distance_frame.iloc[1][\"idx\"]\ndistance_frame.iloc[1]",
"_____no_output_____"
],
[
"second_smallest = distance_frame.iloc[1][\"idx\"]\nmost_similar_to_Lebron = nba.loc[int(second_smallest)][\"player\"]\nprint(\"가장 비슷한 성적의 선수 : \", most_similar_to_Lebron)",
"가장 비슷한 성적의 선수 : Carmelo Anthony\n"
]
],
[
[
"* * * ",
"_____no_output_____"
],
[
"**K-means clustering**",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\nimport pandas as pd\niris = datasets.load_iris()",
"_____no_output_____"
],
[
"labels = pd.DataFrame(iris.target)\nlabels.head()",
"_____no_output_____"
],
[
"labels.columns = ['labels']\n\ndata = pd.DataFrame(iris.data)\ndata.columns = ['Sepal_Length', 'Sepal_width', 'Petal_Lenght', 'Petal_width']\ndata.head(15)",
"_____no_output_____"
],
[
"data = pd.concat([data,labels], axis = 1)\ndata.head(15)",
"_____no_output_____"
],
[
"feature = data[['Sepal_Length', 'Sepal_width']]\nfeature.head(15)",
"_____no_output_____"
],
[
"from sklearn.cluster import KMeans \nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"model = KMeans(n_clusters = 3, algorithm='auto')\nmodel.fit(feature)\npredict = pd.DataFrame(model.predict(feature))\npredict.columns = ['predict']\npredict.head()",
"_____no_output_____"
],
[
"r = pd.concat([feature, predict], axis =1)\nr.head()",
"_____no_output_____"
],
[
"plt.scatter(r['Sepal_Length'], r['Sepal_width'],c=r['predict'], alpha=0.5)\nplt.show()",
"_____no_output_____"
],
[
"centers = pd.DataFrame(model.cluster_centers_, \n columns = ['Sepal_Length', 'Sepal_width'])\ncenters",
"_____no_output_____"
],
[
"center_x = centers['Sepal_Length']\ncenter_y = centers['Sepal_width']\nplt.scatter(center_x, center_y, s=50, marker = 'D', c ='r')\nplt.scatter(r['Sepal_Length'], r['Sepal_width'],c=r['predict'], alpha=0.5)\nplt.show()",
"_____no_output_____"
]
],
[
[
"* * *",
"_____no_output_____"
],
[
"**pipeline**",
"_____no_output_____"
],
[
"scaler와 kmeans를 순차적으로 실행시키는 기능을 수행",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nmodel = KMeans(n_clusters = 3)\nscaler = StandardScaler()\npipeline = make_pipeline(scaler, model)\npipeline.fit(feature)\n\npredict = pd.DataFrame(pipeline.predict(feature))\nks = range(1,10)\ninertias = []\nfor k in ks:\n model = KMeans(n_clusters = k)\n model.fit(feature)\n inertias.append(model.inertias_)\n#inertia_:inertia(관성:응집) value를 이용해서 적정수준의 클러스터 개수 파악\n\nplt.plot(ks, inertias, '-o')\nplt.xlabel('number of clusters, k')\nplt.ylabel('inertia')\nplt.xtkcks(ks)\nplt.show()\n\nct = pd.crosstab(data['labels'], r['predict'])\nprint(ct)\n\nmake_pipeline()",
"_____no_output_____"
]
],
[
[
"* * *",
"_____no_output_____"
],
[
"**PCA**",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ndf = pd.DataFrame(columns=['calory', 'breakfast', 'lunch', 'dinner', 'exercise', 'body_shape'])\ndf.loc[0] = [1200, 1, 0, 0, 2, 'Skinny']\ndf.loc[1] = [2800, 1, 1, 1, 1, 'Normal']\ndf.loc[2] = [3500, 2, 2, 1, 0, 'Fat']\ndf.loc[3] = [1400, 0, 1, 0, 3, 'Skinny']\ndf.loc[4] = [5000, 2, 2, 2, 0, 'Fat']\ndf.loc[5] = [1300, 0, 0, 1, 2, 'Skinny']\ndf.loc[6] = [3000, 1, 0, 1, 1, 'Normal']\ndf.loc[7] = [4000, 2, 2, 2, 0, 'Fat']\ndf.loc[8] = [2600, 0, 2, 0, 0, 'Normal']\ndf.loc[9] = [3000, 1, 2, 1, 1, 'Fat']\n\ndf",
"_____no_output_____"
],
[
"X = df[['calory', 'breakfast', 'lunch', 'dinner', 'exercise']]\nprint(X)\nY = df[['body_shape']]\nprint(Y)",
" calory breakfast lunch dinner exercise\n0 1200 1 0 0 2\n1 2800 1 1 1 1\n2 3500 2 2 1 0\n3 1400 0 1 0 3\n4 5000 2 2 2 0\n5 1300 0 0 1 2\n6 3000 1 0 1 1\n7 4000 2 2 2 0\n8 2600 0 2 0 0\n9 3000 1 2 1 1\n body_shape\n0 Skinny\n1 Normal\n2 Fat\n3 Skinny\n4 Fat\n5 Skinny\n6 Normal\n7 Fat\n8 Normal\n9 Fat\n"
],
[
"from sklearn.preprocessing import StandardScaler\nx_std = StandardScaler().fit_transform(X)\nx_std",
"_____no_output_____"
],
[
"x_std.shape",
"_____no_output_____"
],
[
"features = x_std.T\nfeatures.shape",
"_____no_output_____"
],
[
"covariance_matrix = np.cov(features) # 공분산 : X = (10,5) => (5,10)\ncovariance_matrix",
"_____no_output_____"
],
[
"eig_vals, eig_vecs = np.linalg.eig(covariance_matrix)\nprint(\"고유벡터를 출력합니다 \\n%s\" % eig_vecs )",
"고유벡터를 출력합니다 \n[[ 0.508005 0.0169937 -0.84711404 0.11637853 0.10244985]\n [ 0.44660335 0.36890361 0.12808055 -0.63112016 -0.49973822]\n [ 0.38377913 -0.70804084 0.20681005 -0.40305226 0.38232213]\n [ 0.42845209 0.53194699 0.3694462 0.22228235 0.58954327]\n [-0.46002038 0.2816592 -0.29450345 -0.61341895 0.49601841]]\n"
],
[
"print(\"고유값을 출력합니다 : %s\" % eig_vals)\nprint(eig_vals[0]/sum(eig_vals))",
"고유값을 출력합니다 : [4.0657343 0.8387565 0.07629538 0.27758568 0.2971837 ]\n0.7318321731427544\n"
],
[
"x_std.shape",
"_____no_output_____"
],
[
"eig_vecs.T[0].shape",
"_____no_output_____"
],
[
"projected_X = x_std.dot(eig_vecs.T[0]) # 5차원 ->1차원\nprojected_X",
"_____no_output_____"
],
[
"res = pd.DataFrame(projected_X, columns = ['PC1'])\nres['y-axis'] = 0.0\nres['label'] = Y\nres",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport seaborn as sns\nsns.lmplot('PC1', 'y-axis', data = res, fit_reg = False, scatter_kws={\"s\":50}, hue = 'label')\nplt.title('PCA result')\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9aeebe4078af5e4538416ed3e936036255e0cb
| 96,375 |
ipynb
|
Jupyter Notebook
|
utils/evaluation/multi_cam_plotting.ipynb
|
surirohit/multi-camera-deeptam
|
37288ec8ac11b020418f88547b1e4d810343e63a
|
[
"Apache-2.0"
] | 5 |
2019-10-24T20:06:16.000Z
|
2020-10-16T06:17:46.000Z
|
utils/evaluation/multi_cam_plotting.ipynb
|
surirohit/multi-camera-deeptam
|
37288ec8ac11b020418f88547b1e4d810343e63a
|
[
"Apache-2.0"
] | null | null | null |
utils/evaluation/multi_cam_plotting.ipynb
|
surirohit/multi-camera-deeptam
|
37288ec8ac11b020418f88547b1e4d810343e63a
|
[
"Apache-2.0"
] | 2 |
2020-01-06T03:20:05.000Z
|
2021-02-10T09:44:29.000Z
| 245.229008 | 51,960 | 0.912633 |
[
[
[
"import os\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom matplotlib.pyplot import cm",
"_____no_output_____"
],
[
"from library.trajectory import Trajectory\n# uzh trajectory toolbox\nsys.path.append(os.path.abspath('library/rpg_trajectory_evaluation/src/rpg_trajectory_evaluation'))\nimport plot_utils as pu",
"_____no_output_____"
],
[
"%matplotlib inline\n\nrc('font', **{'family': 'serif', 'serif': ['Cardo']})\nrc('text', usetex=True)",
"_____no_output_____"
]
],
[
[
"### Parameters (to specify/set)",
"_____no_output_____"
]
],
[
[
"# directory where the data is saved\nDATA_DIR = '/home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/resources/data/cvg_cams'\n# directory to save the output\nRESULTS_DIR = os.path.abspath('eval')\n# format in which to save the plots\nFORMAT = '.png'",
"_____no_output_____"
],
[
"# set the camera indices to plot\nCAM_IDXS = [0, 2, 4, 6, 8]\n# set the reference camera (in case groundtruth is not available)\nREF_CAM_ID = 0",
"_____no_output_____"
],
[
"# evaluation parameters\nalign_type = 'none' # choose from ['posyaw', 'sim3', 'se3', 'none']\nalign_num_frames = -1",
"_____no_output_____"
]
],
[
[
"### Variables to allow the plots to look nice",
"_____no_output_____"
]
],
[
[
"N = len(CAM_IDXS)\n\nALGORITHM_CONFIGS = []\nfor i in range(N):\n ALGORITHM_CONFIGS.append('cam_%d' % CAM_IDXS[i])\n\n# These are the labels that will be displayed for items in ALGORITHM_CONFIGS\nPLOT_LABELS = { 'cam_0': 'Camera 0',\n 'cam_2': 'Camera 2',\n 'cam_4': 'Camera 4',\n 'cam_6': 'Camera 6',\n 'cam_8': 'Camera 8'}\n\nPLOT_LABELS['cam_%d' % REF_CAM_ID] = PLOT_LABELS['cam_%d' % REF_CAM_ID] + ' (ref)'",
"_____no_output_____"
],
[
"# assgin colors to different configurations\nCOLORS = {}\ncolor = iter(cm.plasma(np.linspace(0, 0.75, N)))\nfor i in range(N):\n COLORS['cam_%d' % CAM_IDXS[i]] = next(color)",
"_____no_output_____"
]
],
[
[
"### Defining the txt files with the pose information",
"_____no_output_____"
]
],
[
[
"# file name for reference trajectory\nref_traj_file = os.path.join(DATA_DIR, 'cam_%d' % REF_CAM_ID, 'groundtruth.txt')\n# file names for camera trajectories\nestimated_traj_files = []\nfor i in range(N):\n # path to camera trajectory\n estimated_traj_file = os.path.join(DATA_DIR, 'cam_%d' % CAM_IDXS[i], 'groundtruth.txt')\n assert os.path.exists(estimated_traj_file), \"No corresponding file exists: %s!\" % estimated_traj_file\n estimated_traj_files.append(estimated_traj_file)",
"_____no_output_____"
]
],
[
[
"# Main",
"_____no_output_____"
]
],
[
[
"print(\"Going to analyze the results in {0}.\".format(DATA_DIR))\nprint(\"The plots will saved in {0}.\".format(RESULTS_DIR))\n\nif not os.path.exists(plots_dir):\n os.makedirs(plots_dir)\nos.makedies(RESULTS_DIR, )",
"Going to analyze the results in /home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/resources/data/cvg_cams.\nThe plots will saved in /home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/utils/evaluation.\n"
],
[
"print(\"#####################################\")\nprint(\">>> Start loading and preprocessing all trajectories...\")\nprint(\"#####################################\")\n\nconfig_trajectories_list = []\nfor i in range(N):\n # create instance of trajectory object\n cur_traj = Trajectory(RESULTS_DIR, run_name='cam_%d' % CAM_IDXS[i], gt_traj_file=ref_traj_file, estimated_traj_file=estimated_traj_files[i], \\\n align_type=align_type, align_num_frames=align_num_frames)\n config_trajectories_list.append(cur_traj)",
"#####################################\n>>> Start loading and preprocessing all trajectories...\n#####################################\nLoading trajectory data...\nloading dataset in /home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/utils/evaluation\n...done.\nComputing preset subtrajectory lengths for relative errors...\nUse percentage [0.1, 0.2, 0.3, 0.4, 0.5] of trajectory length.\n...done. Computed preset subtrajecory lengths: [0.0, 0.0, 0.0, 0.0, 1.0]\nAliging the trajectory estimate to the groundtruth...\nAlignment type is none.\nTo align all frames.\n... trajectory alignment done.\nLoading trajectory data...\nloading dataset in /home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/utils/evaluation\n...done.\nComputing preset subtrajectory lengths for relative errors...\nUse percentage [0.1, 0.2, 0.3, 0.4, 0.5] of trajectory length.\n...done. Computed preset subtrajecory lengths: [0.0, 0.0, 0.0, 0.0, 1.0]\nAliging the trajectory estimate to the groundtruth...\nAlignment type is none.\nTo align all frames.\n... trajectory alignment done.\nLoading trajectory data...\nloading dataset in /home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/utils/evaluation\n...done.\nComputing preset subtrajectory lengths for relative errors...\nUse percentage [0.1, 0.2, 0.3, 0.4, 0.5] of trajectory length.\n...done. Computed preset subtrajecory lengths: [0.0, 0.0, 0.0, 0.0, 1.0]\nAliging the trajectory estimate to the groundtruth...\nAlignment type is none.\nTo align all frames.\n... trajectory alignment done.\nLoading trajectory data...\nloading dataset in /home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/utils/evaluation\n...done.\nComputing preset subtrajectory lengths for relative errors...\nUse percentage [0.1, 0.2, 0.3, 0.4, 0.5] of trajectory length.\n...done. Computed preset subtrajecory lengths: [0.0, 0.0, 0.0, 0.0, 0.0]\nAliging the trajectory estimate to the groundtruth...\nAlignment type is none.\nTo align all frames.\n... trajectory alignment done.\nLoading trajectory data...\nloading dataset in /home/mayankm/my_projects/multiview_deeptam_3DV/multi-camera-deeptam/utils/evaluation\n...done.\nComputing preset subtrajectory lengths for relative errors...\nUse percentage [0.1, 0.2, 0.3, 0.4, 0.5] of trajectory length.\n...done. Computed preset subtrajecory lengths: [0.0, 0.0, 0.0, 0.0, 0.0]\nAliging the trajectory estimate to the groundtruth...\nAlignment type is none.\nTo align all frames.\n... trajectory alignment done.\n"
],
[
"print(\"#####################################\")\nprint(\">>> Start plotting results....\")\nprint(\"#####################################\")\n\np_gt_0 = config_trajectories_list[0].p_gt\nfig1 = plt.figure(figsize=(10, 10))\nax1 = fig1.add_subplot(111, aspect='equal',\n xlabel='x [m]', ylabel='y [m]')\nfig2 = plt.figure(figsize=(8, 8))\nax2 = fig2.add_subplot(111, aspect='equal',\n xlabel='x [m]', ylabel='z [m]')\n\n# pu.plot_trajectory_top(ax1, p_gt_0, 'k', 'Groundtruth')\n# pu.plot_trajectory_side(ax2, p_gt_0,'k', 'Groundtruth')\n\nfor i in range(N):\n \n traj = config_trajectories_list[i]\n p_es_0 = traj.p_es_aligned\n alg = ALGORITHM_CONFIGS[i]\n print('Plotting for %s' % alg)\n \n # plot trajectory\n pu.plot_trajectory_top(ax1, p_es_0, COLORS[alg], PLOT_LABELS[alg])\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n fig1.tight_layout()\n\n # plot trajectory side\n pu.plot_trajectory_side(ax2, p_es_0, COLORS[alg], PLOT_LABELS[alg])\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n fig2.tight_layout()",
"No handles with labels found to put in legend.\n"
],
[
"fig1.savefig(RESULTS_DIR + '/plots/trajectory_top_' + align_type + FORMAT,bbox_inches=\"tight\")\nplt.close(fig1)\nfig2.savefig(RESULTS_DIR + '/plots/trajectory_side_' + align_type + FORMAT, bbox_inches=\"tight\")\nplt.close(fig2)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a9b07fee59169ecade86f8d2a7c1663bb2a8e1b
| 170,603 |
ipynb
|
Jupyter Notebook
|
Code/Chapter 6 - Second-Order Methods/6.1 Newton.ipynb
|
JuliaChem/ModernCompAlg
|
ef28f85d9b676f37e6ce9664ddfc7c2fd7261ee4
|
[
"MIT"
] | null | null | null |
Code/Chapter 6 - Second-Order Methods/6.1 Newton.ipynb
|
JuliaChem/ModernCompAlg
|
ef28f85d9b676f37e6ce9664ddfc7c2fd7261ee4
|
[
"MIT"
] | null | null | null |
Code/Chapter 6 - Second-Order Methods/6.1 Newton.ipynb
|
JuliaChem/ModernCompAlg
|
ef28f85d9b676f37e6ce9664ddfc7c2fd7261ee4
|
[
"MIT"
] | null | null | null | 304.105169 | 13,119 | 0.724735 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a9b106aa6379a618354f7aa8e81c155af2553d9
| 172,064 |
ipynb
|
Jupyter Notebook
|
notebooks/01_simulation_datasets/05_train-test_intra-species_diff-richness/02_sim_train_n1000_r6_rich0p1.ipynb
|
chrisLanderson/DeepMAsED
|
81cff4122be2fb91feeb7a36fa5d502bd57bedd8
|
[
"MIT"
] | 23 |
2019-09-11T10:48:22.000Z
|
2021-10-06T19:59:07.000Z
|
notebooks/01_simulation_datasets/05_train-test_intra-species_diff-richness/02_sim_train_n1000_r6_rich0p1.ipynb
|
chrisLanderson/DeepMAsED
|
81cff4122be2fb91feeb7a36fa5d502bd57bedd8
|
[
"MIT"
] | 8 |
2019-09-20T17:20:05.000Z
|
2020-12-02T09:41:19.000Z
|
notebooks/01_simulation_datasets/05_train-test_intra-species_diff-richness/02_sim_train_n1000_r6_rich0p1.ipynb
|
chrisLanderson/DeepMAsED
|
81cff4122be2fb91feeb7a36fa5d502bd57bedd8
|
[
"MIT"
] | 8 |
2019-09-10T15:31:04.000Z
|
2021-11-17T00:16:18.000Z
| 214.543641 | 66,234 | 0.878074 |
[
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Goal\" data-toc-modified-id=\"Goal-1\"><span class=\"toc-item-num\">1 </span>Goal</a></span></li><li><span><a href=\"#Var\" data-toc-modified-id=\"Var-2\"><span class=\"toc-item-num\">2 </span>Var</a></span><ul class=\"toc-item\"><li><span><a href=\"#Init\" data-toc-modified-id=\"Init-2.1\"><span class=\"toc-item-num\">2.1 </span>Init</a></span></li></ul></li><li><span><a href=\"#DeepMAsED-SM\" data-toc-modified-id=\"DeepMAsED-SM-3\"><span class=\"toc-item-num\">3 </span>DeepMAsED-SM</a></span><ul class=\"toc-item\"><li><span><a href=\"#Config\" data-toc-modified-id=\"Config-3.1\"><span class=\"toc-item-num\">3.1 </span>Config</a></span></li><li><span><a href=\"#Run\" data-toc-modified-id=\"Run-3.2\"><span class=\"toc-item-num\">3.2 </span>Run</a></span></li></ul></li><li><span><a href=\"#Summary\" data-toc-modified-id=\"Summary-4\"><span class=\"toc-item-num\">4 </span>Summary</a></span><ul class=\"toc-item\"><li><span><a href=\"#Communities\" data-toc-modified-id=\"Communities-4.1\"><span class=\"toc-item-num\">4.1 </span>Communities</a></span></li><li><span><a href=\"#Feature-tables\" data-toc-modified-id=\"Feature-tables-4.2\"><span class=\"toc-item-num\">4.2 </span>Feature tables</a></span><ul class=\"toc-item\"><li><span><a href=\"#No.-of-contigs\" data-toc-modified-id=\"No.-of-contigs-4.2.1\"><span class=\"toc-item-num\">4.2.1 </span>No. of contigs</a></span></li><li><span><a href=\"#Misassembly-types\" data-toc-modified-id=\"Misassembly-types-4.2.2\"><span class=\"toc-item-num\">4.2.2 </span>Misassembly types</a></span></li></ul></li></ul></li><li><span><a href=\"#sessionInfo\" data-toc-modified-id=\"sessionInfo-5\"><span class=\"toc-item-num\">5 </span>sessionInfo</a></span></li></ul></div>",
"_____no_output_____"
],
[
"# Goal\n\n* Replicate metagenome assemblies using intra-spec training genome dataset\n* Richness = 0.1 (10% of all ref genomes used)",
"_____no_output_____"
],
[
"# Var",
"_____no_output_____"
]
],
[
[
"ref_dir = '/ebio/abt3_projects/databases_no-backup/DeepMAsED/GTDB_ref_genomes/intraSpec/'\nref_file = file.path(ref_dir, 'GTDBr86_genome-refs_train_clean.tsv')\nwork_dir = '/ebio/abt3_projects/databases_no-backup/DeepMAsED/train_runs/intra-species/diff_richness/n1000_r6_rich0p1/'\n\n# params\npipeline_dir = '/ebio/abt3_projects/databases_no-backup/bin/deepmased/DeepMAsED-SM/'",
"_____no_output_____"
]
],
[
[
"## Init",
"_____no_output_____"
]
],
[
[
"library(dplyr)\nlibrary(tidyr)\nlibrary(ggplot2)\nlibrary(data.table)\nsource('/ebio/abt3_projects/software/dev/DeepMAsED/bin/misc_r_functions/init.R')",
"_____no_output_____"
],
[
"#' \"cat {file}\" in R\ncat_file = function(file_name){\n cmd = paste('cat', file_name, collapse=' ')\n system(cmd, intern=TRUE) %>% paste(collapse='\\n') %>% cat\n}",
"_____no_output_____"
]
],
[
[
"# DeepMAsED-SM",
"_____no_output_____"
],
[
"## Config",
"_____no_output_____"
]
],
[
[
"config_file = file.path(work_dir, 'config.yaml')\ncat_file(config_file)",
"# Input\ngenomes_file: /ebio/abt3_projects/databases_no-backup/DeepMAsED/GTDB_ref_genomes/intraSpec/GTDBr86_genome-refs_train_clean.tsv\n\n# Output location\noutput_dir: /ebio/abt3_projects/databases_no-backup/DeepMAsED/train_runs/intra-species/diff_richness/n1000_r6_rich0p1/\n\n\n# software parameters\n# Use \"Skip\" to skip steps. If no params for rule, use \"\"\nparams:\n # simulating metagenomes\n reps: 6\n MGSIM:\n genome_download: \"\"\n communities: --richness 0.1\n reads: --sr-seq-depth 1e6 --art-paired --art-mflen 250\n # coverage\n nonpareil: -T kmer\n nonpareil_summary: 1e9 # this is target seq. depth \n # assemblying metagenomes\n assemblers:\n metaspades: -k auto --only-assembler\n megahit: --min-count 3 --min-contig-len 1000 --presets meta-sensitive\n # assembly filtering\n contig_length_cutoff: 1000 # length in bp \n # assessing assembly errors\n minimap2: \"\"\n metaquast: --min-identity 95 --extensive-mis-size 100 --no-icarus --max-ref-number 0\n # mapping reads to contigs\n samtools: \"\"\n # creating DL features\n make_features: \"\"\n # state-of-the-art\n ## ALE\n ALE: \"\"\n ## VALET\n VALET: \"\"\n\n# snakemake pipeline\npipeline:\n snakemake_folder: ./\n script_folder: ./bin/scripts/\n temp_folder: /tmp/global/\n rnd_seed: 83941"
]
],
[
[
"## Run",
"_____no_output_____"
],
[
"```\n(snakemake_dev) @ rick:/ebio/abt3_projects/databases_no-backup/bin/deepmased/DeepMAsED-SM\n$ screen -L -S DM-intraS-rich0.1 ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/DeepMAsED/train_runs/intra-species/diff_richness/n1000_r6_rich0p1/config.yaml cluster.json /ebio/abt3_projects/databases_no-backup/DeepMAsED/train_runs/intra-species/diff_richness/n1000_r6_rich0p1/SGE_log 20\n```",
"_____no_output_____"
],
[
"# Summary",
"_____no_output_____"
],
[
"## Communities",
"_____no_output_____"
]
],
[
[
"comm_files = list.files(file.path(work_dir, 'MGSIM'), 'comm_wAbund.txt', full.names=TRUE, recursive=TRUE)\ncomm_files %>% length %>% print\ncomm_files %>% head",
"[1] 6\n"
],
[
"comms = list()\nfor(F in comm_files){\n df = read.delim(F, sep='\\t')\n df$Rep = basename(dirname(F))\n comms[[F]] = df\n}\ncomms = do.call(rbind, comms)\nrownames(comms) = 1:nrow(comms)\ncomms %>% dfhead",
"[1] 600 5\n"
],
[
"p = comms %>%\n mutate(Perc_rel_abund = ifelse(Perc_rel_abund == 0, 1e-5, Perc_rel_abund)) %>%\n group_by(Taxon) %>%\n summarize(mean_perc_abund = mean(Perc_rel_abund),\n sd_perc_abund = sd(Perc_rel_abund)) %>%\n ungroup() %>%\n mutate(neg_sd_perc_abund = mean_perc_abund - sd_perc_abund,\n pos_sd_perc_abund = mean_perc_abund + sd_perc_abund,\n neg_sd_perc_abund = ifelse(neg_sd_perc_abund <= 0, 1e-5, neg_sd_perc_abund)) %>%\n mutate(Taxon = Taxon %>% reorder(-mean_perc_abund)) %>%\n ggplot(aes(Taxon, mean_perc_abund)) +\n geom_linerange(aes(ymin=neg_sd_perc_abund, ymax=pos_sd_perc_abund),\n size=0.3, alpha=0.3) +\n geom_point(size=0.5, alpha=0.4, color='red') +\n labs(y='% abundance') +\n theme_bw() +\n theme(\n axis.text.x = element_blank(),\n panel.grid.major.x = element_blank(), \n panel.grid.major.y = element_blank(), \n panel.grid.minor.x = element_blank(),\n panel.grid.minor.y = element_blank()\n )\n\ndims(10,2.5)\nplot(p)",
"Warning message:\n“Removed 362 rows containing missing values (geom_linerange).”"
],
[
"dims(10,2.5)\nplot(p + scale_y_log10())",
"Warning message:\n“Removed 362 rows containing missing values (geom_linerange).”"
]
],
[
[
"## Feature tables",
"_____no_output_____"
]
],
[
[
"feat_files = list.files(file.path(work_dir, 'map'), 'features.tsv.gz', full.names=TRUE, recursive=TRUE)\nfeat_files %>% length %>% print\nfeat_files %>% head",
"[1] 12\n"
],
[
"feats = list()\nfor(F in feat_files){\n cmd = glue::glue('gunzip -c {F}', F=F)\n df = fread(cmd, sep='\\t') %>%\n distinct(contig, assembler, Extensive_misassembly)\n df$Rep = basename(dirname(dirname(F)))\n feats[[F]] = df\n}\nfeats = do.call(rbind, feats)\nrownames(feats) = 1:nrow(feats)\nfeats %>% dfhead",
"[1] 90981 4\n"
]
],
[
[
"### No. of contigs",
"_____no_output_____"
]
],
[
[
"feats_s = feats %>%\n group_by(assembler, Rep) %>%\n summarize(n_contigs = n_distinct(contig)) %>%\n ungroup \n\nfeats_s$n_contigs %>% summary",
"_____no_output_____"
]
],
[
[
"### Misassembly types",
"_____no_output_____"
]
],
[
[
"p = feats %>%\n mutate(Extensive_misassembly = ifelse(Extensive_misassembly == '', 'None',\n Extensive_misassembly)) %>%\n group_by(Extensive_misassembly, assembler, Rep) %>%\n summarize(n = n()) %>%\n ungroup() %>%\n ggplot(aes(Extensive_misassembly, n, color=assembler)) +\n geom_boxplot() +\n scale_y_log10() +\n labs(x='metaQUAST extensive mis-assembly', y='Count') +\n coord_flip() +\n theme_bw() +\n theme(\n axis.text.x = element_text(angle=45, hjust=1)\n )\n\ndims(8,4)\nplot(p)",
"_____no_output_____"
]
],
[
[
"# sessionInfo",
"_____no_output_____"
]
],
[
[
"sessionInfo()",
"_____no_output_____"
],
[
"pipelineInfo(pipeline_dir)",
"Cannot find README.md file in pipeline directory"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9b143c66747d91f7865a511abaa24fe05051ca
| 27,372 |
ipynb
|
Jupyter Notebook
|
reinforcement_learning/rl_objecttracker_robomaker_coach_gazebo/rl_objecttracker_coach_robomaker.ipynb
|
yifeim/amazon-sagemaker-examples
|
80df7d61a4bf14a11f0442020e2003a7c1f78115
|
[
"Apache-2.0"
] | 4 |
2018-12-03T08:14:15.000Z
|
2019-01-25T04:06:20.000Z
|
reinforcement_learning/rl_objecttracker_robomaker_coach_gazebo/rl_objecttracker_coach_robomaker.ipynb
|
reddyreddys255/amazon-sagemaker-examples
|
f00ffb3260752b19f0b8a7f5d94f7c07bad2ca55
|
[
"Apache-2.0"
] | null | null | null |
reinforcement_learning/rl_objecttracker_robomaker_coach_gazebo/rl_objecttracker_coach_robomaker.ipynb
|
reddyreddys255/amazon-sagemaker-examples
|
f00ffb3260752b19f0b8a7f5d94f7c07bad2ca55
|
[
"Apache-2.0"
] | 1 |
2019-08-06T07:27:17.000Z
|
2019-08-06T07:27:17.000Z
| 40.67162 | 619 | 0.584356 |
[
[
[
"# Distributed Object Tracker RL training with Amazon SageMaker RL and RoboMaker\n\n---\n## Introduction\n\n\nIn this notebook, we show you how you can apply reinforcement learning to train a Robot (named Waffle) track and follow another Robot (named Burger) by using the [Clipped PPO](https://coach.nervanasys.com/algorithms/policy_optimization/cppo/index.html) algorithm implementation in [coach](https://ai.intel.com/r-l-coach/) toolkit, [Tensorflow](https://www.tensorflow.org/) as the deep learning framework, and [AWS RoboMaker](https://console.aws.amazon.com/robomaker/home#welcome) as the simulation environment.\n\n\n\n---\n## How it works? \n\n\nThe reinforcement learning agent (i.e. Waffle) learns to track and follow Burger by interacting with its environment, e.g., visual world around it, by taking an action in a given state to maximize the expected reward. The agent learns the optimal plan of actions in training by trial-and-error through multiple episodes. \n \nThis notebook shows an example of distributed RL training across SageMaker and two RoboMaker simulation envrionments that perform the **rollouts** - execute a fixed number of episodes using the current model or policy. The rollouts collect agent experiences (state-transition tuples) and share this data with SageMaker for training. SageMaker updates the model policy which is then used to execute the next sequence of rollouts. This training loop continues until the model converges, i.e. the car learns to drive and stops going off-track. More formally, we can define the problem in terms of the following: \n\n1. **Objective**: Learn to drive toward and reach the Burger.\n2. **Environment**: A simulator with Burger hosted on AWS RoboMaker.\n3. **State**: The driving POV image captured by the Waffle's head camera.\n4. **Action**: Six discrete steering wheel positions at different angles (configurable)\n5. **Reward**: Reward is inversely proportional to distance from Burger. Waffle gets more reward as it get closer to the Burger. It gets a reward of 0 if the action takes it away from Burger. \n\n---\n## Prequisites\n### Imports\nTo get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.\n\nYou can run this notebook from your local host or from a SageMaker notebook instance. In both of these scenarios, you can run the following to launch a training job on `SageMaker` and a simulation job on `RoboMaker`.",
"_____no_output_____"
]
],
[
[
"import sagemaker\nimport boto3\nimport sys\nimport os\nimport glob\nimport re\nimport subprocess\nfrom IPython.display import Markdown\nimport time\nfrom time import gmtime, strftime\nsys.path.append(\"common\")\nfrom misc import get_execution_role\nfrom sagemaker.rl import RLEstimator, RLToolkit, RLFramework\nfrom markdown_helper import *",
"_____no_output_____"
]
],
[
[
"### Setup S3 bucket",
"_____no_output_____"
]
],
[
[
"# S3 bucket\nsage_session = sagemaker.session.Session()\ns3_bucket = sage_session.default_bucket()\ns3_output_path = 's3://{}/'.format(s3_bucket) # SDK appends the job name and output folder\nprint(\"S3 bucket path: {}\".format(s3_output_path))",
"_____no_output_____"
]
],
[
[
"### Define Variables\n\nWe define variables such as the job prefix for the training jobs and s3_prefix for storing metadata required for synchronization between the training and simulation jobs",
"_____no_output_____"
]
],
[
[
"# create unique job name \njob_name_prefix = 'rl-object-tracker'\n\n# create unique job name\njob_name = s3_prefix = job_name_prefix + \"-sagemaker-\" + strftime(\"%y%m%d-%H%M%S\", gmtime())\n\n# Duration of job in seconds (5 hours)\njob_duration_in_seconds = 3600 * 5\n\naws_region = sage_session.boto_region_name\nprint(\"S3 bucket path: {}{}\".format(s3_output_path, job_name))\n\n\nif aws_region not in [\"us-west-2\", \"us-east-1\", \"eu-west-1\"]:\n raise Exception(\"This notebook uses RoboMaker which is available only in US East (N. Virginia), US West (Oregon) and EU (Ireland). Please switch to one of these regions.\")\nprint(\"Model checkpoints and other metadata will be stored at: {}{}\".format(s3_output_path, job_name))",
"_____no_output_____"
]
],
[
[
"### Create an IAM role\nEither get the execution role when running from a SageMaker notebook `role = sagemaker.get_execution_role()` or, when running from local machine, use utils method `role = get_execution_role('role_name')` to create an execution role.",
"_____no_output_____"
]
],
[
[
"try:\n role = sagemaker.get_execution_role()\nexcept:\n role = get_execution_role('sagemaker')\n\nprint(\"Using IAM role arn: {}\".format(role))",
"_____no_output_____"
]
],
[
[
"### Permission setup for invoking AWS RoboMaker from this notebook",
"_____no_output_____"
],
[
"In order to enable this notebook to be able to execute AWS RoboMaker jobs, we need to add one trust relationship to the default execution role of this notebook.\n",
"_____no_output_____"
]
],
[
[
"display(Markdown(generate_help_for_robomaker_trust_relationship(role)))",
"_____no_output_____"
]
],
[
[
"## Configure VPC\n\nSince SageMaker and RoboMaker have to communicate with each other over the network, both of these services need to run in VPC mode. This can be done by supplying subnets and security groups to the job launching scripts. \nWe will use the default VPC configuration for this example.",
"_____no_output_____"
]
],
[
[
"ec2 = boto3.client('ec2')\ndefault_vpc = [vpc['VpcId'] for vpc in ec2.describe_vpcs()['Vpcs'] if vpc[\"IsDefault\"] == True][0]\n\ndefault_security_groups = [group[\"GroupId\"] for group in ec2.describe_security_groups()['SecurityGroups'] \\\n if group[\"GroupName\"] == \"default\" and group[\"VpcId\"] == default_vpc]\n\ndefault_subnets = [subnet[\"SubnetId\"] for subnet in ec2.describe_subnets()[\"Subnets\"] \\\n if subnet[\"VpcId\"] == default_vpc and subnet['DefaultForAz']==True]\n\nprint(\"Using default VPC:\", default_vpc)\nprint(\"Using default security group:\", default_security_groups)\nprint(\"Using default subnets:\", default_subnets)",
"_____no_output_____"
]
],
[
[
"A SageMaker job running in VPC mode cannot access S3 resourcs. So, we need to create a VPC S3 endpoint to allow S3 access from SageMaker container. To learn more about the VPC mode, please visit [this link.](https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)",
"_____no_output_____"
],
[
"> The cell below should be executed to create the VPC S3 endpoint only if your are running this example for the first time. If the execution fails due to insufficient premissions or some other reasons, please create a VPC S3 endpoint manually by following [create-s3-endpoint.md](create-s3-endpoint.md) (can be found in the same folder as this notebook). ",
"_____no_output_____"
]
],
[
[
"try:\n route_tables = [route_table[\"RouteTableId\"] for route_table in ec2.describe_route_tables()['RouteTables']\\\n if route_table['VpcId'] == default_vpc]\nexcept Exception as e:\n if \"UnauthorizedOperation\" in str(e):\n display(Markdown(generate_help_for_s3_endpoint_permissions(role)))\n else:\n display(Markdown(create_s3_endpoint_manually(aws_region, default_vpc)))\n raise e\n\nprint(\"Trying to attach S3 endpoints to the following route tables:\", route_tables)\n\nassert len(route_tables) >= 1, \"No route tables were found. Please follow the VPC S3 endpoint creation \"\\\n \"guide by clicking the above link.\"\n\ntry:\n ec2.create_vpc_endpoint(DryRun=False,\n VpcEndpointType=\"Gateway\",\n VpcId=default_vpc,\n ServiceName=\"com.amazonaws.{}.s3\".format(aws_region),\n RouteTableIds=route_tables)\n print(\"S3 endpoint created successfully!\")\nexcept Exception as e:\n if \"RouteAlreadyExists\" in str(e):\n print(\"S3 endpoint already exists.\")\n elif \"UnauthorizedOperation\" in str(e):\n display(Markdown(generate_help_for_s3_endpoint_permissions(role)))\n raise e\n else:\n display(Markdown(create_s3_endpoint_manually(aws_region, default_vpc)))\n raise e",
"_____no_output_____"
]
],
[
[
"## Setup the environment\n",
"_____no_output_____"
],
[
"The environment is defined in a Python file called “object_tracker_env.py” and the file can be found at `src/robomaker/environments/`. This file implements the gym interface for our Gazebo based RoboMakersimulator. This is a common environment file used by both SageMaker and RoboMaker. The environment variable - `NODE_TYPE` defines which node the code is running on. So, the expressions that have `rospy` dependencies are executed on RoboMaker only. \n\nWe can experiment with different reward functions by modifying `reward_function` in this file. Action space and steering angles can be changed by modifying the step method in `TurtleBot3ObjectTrackerAndFollowerDiscreteEnv` class.",
"_____no_output_____"
],
[
"### Configure the preset for RL algorithm\nThe parameters that configure the RL training job are defined in `src/robomaker/presets/object_tracker.py`. Using the preset file, you can define agent parameters to select the specific agent algorithm. We suggest using Clipped PPO for this example. \nYou can edit this file to modify algorithm parameters like learning_rate, neural network structure, batch_size, discount factor etc.",
"_____no_output_____"
]
],
[
[
"!pygmentize src/robomaker/presets/object_tracker.py",
"_____no_output_____"
]
],
[
[
"### Training Entrypoint\nThe training code is written in the file “training_worker.py” which is uploaded in the /src directory. At a high level, it does the following:\n- Uploads SageMaker node's IP address.\n- Starts a Redis server which receives agent experiences sent by rollout worker[s] (RoboMaker simulator).\n- Trains the model everytime after a certain number of episodes are received.\n- Uploads the new model weights on S3. The rollout workers then update their model to execute the next set of episodes.",
"_____no_output_____"
]
],
[
[
"# Uncomment the line below to see the training code\n#!pygmentize src/training_worker.py",
"_____no_output_____"
]
],
[
[
"## Train the model using Python SDK/ script mode",
"_____no_output_____"
]
],
[
[
"s3_location = \"s3://%s/%s\" % (s3_bucket, s3_prefix)\n!aws s3 rm --recursive {s3_location}\n\n\n# Make any changes to the envrironment and preset files below and upload these files if you want to use custom environment and preset\n!aws s3 cp src/robomaker/environments/ {s3_location}/environments/ --recursive --exclude \".ipynb_checkpoints*\"\n!aws s3 cp src/robomaker/presets/ {s3_location}/presets/ --recursive --exclude \".ipynb_checkpoints*\"",
"_____no_output_____"
]
],
[
[
"First, we define the following algorithm metrics that we want to capture from cloudwatch logs to monitor the training progress. These are algorithm specific parameters and might change for different algorithm. We use [Clipped PPO](https://coach.nervanasys.com/algorithms/policy_optimization/cppo/index.html) for this example.",
"_____no_output_____"
]
],
[
[
"metric_definitions = [\n # Training> Name=main_level/agent, Worker=0, Episode=19, Total reward=-102.88, Steps=19019, Training iteration=1\n {'Name': 'reward-training',\n 'Regex': '^Training>.*Total reward=(.*?),'},\n \n # Policy training> Surrogate loss=-0.32664725184440613, KL divergence=7.255815035023261e-06, Entropy=2.83156156539917, training epoch=0, learning_rate=0.00025\n {'Name': 'ppo-surrogate-loss',\n 'Regex': '^Policy training>.*Surrogate loss=(.*?),'},\n {'Name': 'ppo-entropy',\n 'Regex': '^Policy training>.*Entropy=(.*?),'},\n \n # Testing> Name=main_level/agent, Worker=0, Episode=19, Total reward=1359.12, Steps=20015, Training iteration=2\n {'Name': 'reward-testing',\n 'Regex': '^Testing>.*Total reward=(.*?),'},\n]",
"_____no_output_____"
]
],
[
[
"We use the RLEstimator for training RL jobs.\n\n1. Specify the source directory where the environment, presets and training code is uploaded.\n2. Specify the entry point as the training code\n3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.\n4. Define the training parameters such as the instance count, instance type, job name, s3_bucket and s3_prefix for storing model checkpoints and metadata. **Only 1 training instance is supported for now.**\n4. Set the RLCOACH_PRESET as \"object_tracker\" for this example.\n5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.",
"_____no_output_____"
]
],
[
[
"RLCOACH_PRESET = \"object_tracker\"\n\ninstance_type = \"ml.c5.4xlarge\"\n \nestimator = RLEstimator(entry_point=\"training_worker.py\",\n source_dir='src',\n dependencies=[\"common/sagemaker_rl\"],\n toolkit=RLToolkit.COACH,\n toolkit_version='0.11.0',\n framework=RLFramework.TENSORFLOW,\n role=role,\n train_instance_type=instance_type,\n train_instance_count=1,\n output_path=s3_output_path,\n base_job_name=job_name_prefix,\n train_max_run=job_duration_in_seconds,\n hyperparameters={\"s3_bucket\": s3_bucket,\n \"s3_prefix\": s3_prefix,\n \"aws_region\": aws_region,\n \"RLCOACH_PRESET\": RLCOACH_PRESET,\n },\n metric_definitions = metric_definitions,\n subnets=default_subnets,\n security_group_ids=default_security_groups,\n )\n\nestimator.fit(job_name=job_name, wait=False)",
"_____no_output_____"
]
],
[
[
"### Start the Robomaker job",
"_____no_output_____"
]
],
[
[
"from botocore.exceptions import UnknownServiceError\n\nrobomaker = boto3.client(\"robomaker\")",
"_____no_output_____"
]
],
[
[
"### Create Simulation Application\n\nWe first create a RoboMaker simulation application using the `object-tracker public bundle`. Please refer to [RoboMaker Sample Application Github Repository](https://github.com/aws-robotics/aws-robomaker-sample-application-objecttracker) if you want to learn more about this bundle or modify it.",
"_____no_output_____"
]
],
[
[
"bundle_s3_key = 'object-tracker/simulation_ws.tar.gz'\nbundle_source = {'s3Bucket': s3_bucket,\n 's3Key': bundle_s3_key,\n 'architecture': \"X86_64\"}\nsimulation_software_suite={'name': 'Gazebo',\n 'version': '7'}\nrobot_software_suite={'name': 'ROS',\n 'version': 'Kinetic'}\nrendering_engine={'name': 'OGRE',\n 'version': '1.x'}",
"_____no_output_____"
],
[
"simulation_application_bundle_location = \"https://s3-us-west-2.amazonaws.com/robomaker-applications-us-west-2-11d8d0439f6a/object-tracker/object-tracker-1.0.74.0.1.0.105.0/simulation_ws.tar.gz\"\n\n!wget {simulation_application_bundle_location}\n!aws s3 cp simulation_ws.tar.gz s3://{s3_bucket}/{bundle_s3_key}\n!rm simulation_ws.tar.gz",
"_____no_output_____"
],
[
"app_name = \"object-tracker-sample-application\" + strftime(\"%y%m%d-%H%M%S\", gmtime())\n\ntry:\n response = robomaker.create_simulation_application(name=app_name,\n sources=[bundle_source],\n simulationSoftwareSuite=simulation_software_suite,\n robotSoftwareSuite=robot_software_suite,\n renderingEngine=rendering_engine\n )\n simulation_app_arn = response[\"arn\"]\n print(\"Created a new simulation app with ARN:\", simulation_app_arn)\nexcept Exception as e:\n if \"AccessDeniedException\" in str(e):\n display(Markdown(generate_help_for_robomaker_all_permissions(role)))\n raise e\n else:\n raise e",
"_____no_output_____"
]
],
[
[
"### Launch the Simulation job on RoboMaker\n\nWe create [AWS RoboMaker](https://console.aws.amazon.com/robomaker/home#welcome) Simulation Jobs that simulates the environment and shares this data with SageMaker for training. ",
"_____no_output_____"
]
],
[
[
"num_simulation_workers = 1\n\nenvriron_vars = {\n \"MODEL_S3_BUCKET\": s3_bucket,\n \"MODEL_S3_PREFIX\": s3_prefix,\n \"ROS_AWS_REGION\": aws_region,\n \"MARKOV_PRESET_FILE\": \"object_tracker.py\",\n \"NUMBER_OF_ROLLOUT_WORKERS\": str(num_simulation_workers)}\n\nsimulation_application = {\"application\":simulation_app_arn,\n \"launchConfig\": {\"packageName\": \"object_tracker_simulation\",\n \"launchFile\": \"distributed_training.launch\",\n \"environmentVariables\": envriron_vars}\n }\n \nvpcConfig = {\"subnets\": default_subnets,\n \"securityGroups\": default_security_groups,\n \"assignPublicIp\": True}\n\nresponses = []\nfor job_no in range(num_simulation_workers):\n response = robomaker.create_simulation_job(iamRole=role,\n clientRequestToken=strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime()),\n maxJobDurationInSeconds=job_duration_in_seconds,\n failureBehavior=\"Continue\",\n simulationApplications=[simulation_application],\n vpcConfig=vpcConfig\n )\n responses.append(response)\n\nprint(\"Created the following jobs:\")\njob_arns = [response[\"arn\"] for response in responses]\nfor job_arn in job_arns:\n print(\"Job ARN\", job_arn) ",
"_____no_output_____"
]
],
[
[
"### Visualizing the simulations in RoboMaker",
"_____no_output_____"
],
[
"You can visit the RoboMaker console to visualize the simulations or run the following cell to generate the hyperlinks.",
"_____no_output_____"
]
],
[
[
"display(Markdown(generate_robomaker_links(job_arns, aws_region)))",
"_____no_output_____"
]
],
[
[
"### Clean Up",
"_____no_output_____"
],
[
"Execute the cells below if you want to kill RoboMaker and SageMaker job. It also removes RoboMaker resources created during the run.",
"_____no_output_____"
]
],
[
[
"for job_arn in job_arns:\n robomaker.cancel_simulation_job(job=job_arn)",
"_____no_output_____"
],
[
"sage_session.sagemaker_client.stop_training_job(TrainingJobName=estimator._current_job_name)",
"_____no_output_____"
]
],
[
[
"### Evaluation",
"_____no_output_____"
]
],
[
[
"envriron_vars = {\"MODEL_S3_BUCKET\": s3_bucket,\n \"MODEL_S3_PREFIX\": s3_prefix,\n \"ROS_AWS_REGION\": aws_region,\n \"NUMBER_OF_TRIALS\": str(20),\n \"MARKOV_PRESET_FILE\": \"%s.py\" % RLCOACH_PRESET\n }\n\nsimulation_application = {\"application\":simulation_app_arn,\n \"launchConfig\": {\"packageName\": \"object_tracker_simulation\",\n \"launchFile\": \"evaluation.launch\",\n \"environmentVariables\": envriron_vars}\n }\n \nvpcConfig = {\"subnets\": default_subnets,\n \"securityGroups\": default_security_groups,\n \"assignPublicIp\": True}\n\n\n\nresponse = robomaker.create_simulation_job(iamRole=role,\n clientRequestToken=strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime()),\n maxJobDurationInSeconds=job_duration_in_seconds,\n failureBehavior=\"Continue\",\n simulationApplications=[simulation_application],\n vpcConfig=vpcConfig\n )\nprint(\"Created the following job:\")\nprint(\"Job ARN\", response[\"arn\"])",
"_____no_output_____"
]
],
[
[
"### Clean Up Simulation Application Resource",
"_____no_output_____"
]
],
[
[
"robomaker.delete_simulation_application(application=simulation_app_arn)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9b25d7e5bc02942295f07bdd5a3b14d9df26d7
| 10,502 |
ipynb
|
Jupyter Notebook
|
ml/rnn_imdb.ipynb
|
shaowei-su/shaowei-su.github.io
|
8e9d21d4811a78a1f025eb48d73fabd1539a62ad
|
[
"MIT"
] | null | null | null |
ml/rnn_imdb.ipynb
|
shaowei-su/shaowei-su.github.io
|
8e9d21d4811a78a1f025eb48d73fabd1539a62ad
|
[
"MIT"
] | null | null | null |
ml/rnn_imdb.ipynb
|
shaowei-su/shaowei-su.github.io
|
8e9d21d4811a78a1f025eb48d73fabd1539a62ad
|
[
"MIT"
] | null | null | null | 23.814059 | 1,146 | 0.438774 |
[
[
[
"from keras.datasets import imdb",
"/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n"
],
[
"vocabulary_size = 5000",
"_____no_output_____"
],
[
"(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size)",
"_____no_output_____"
],
[
"X_train[0]",
"_____no_output_____"
],
[
"word_to_id = imdb.get_word_index()\nword_to_id = {k:(v+3) for k, v in word_to_id.items()}\nword_to_id[\"<PAD>\"] = 0\nword_to_id[\"<START>\"] = 1\nword_to_id[\"<UNK>\"] = 2",
"_____no_output_____"
],
[
"id_to_word = {value:key for key, value in word_to_id.items()}\nprint(' '.join(id_to_word[id] for id in X_train[0]))",
"<START> this film was just brilliant casting location scenery story direction everyone's really suited the part they played and you could just imagine being there robert <UNK> is an amazing actor and now the same being director <UNK> father came from the same scottish island as myself so i loved the fact there was a real connection with this film the witty remarks throughout the film were great it was just brilliant so much that i bought the film as soon as it was released for <UNK> and would recommend it to everyone to watch and the fly <UNK> was amazing really cried at the end it was so sad and you know what they say if you cry at a film it must have been good and this definitely was also <UNK> to the two little <UNK> that played the <UNK> of norman and paul they were just brilliant children are often left out of the <UNK> list i think because the stars that play them all grown up are such a big <UNK> for the whole film but these children are amazing and should be <UNK> for what they have done don't you think the whole story was so lovely because it was true and was someone's life after all that was <UNK> with us all\n"
],
[
"from keras.preprocessing import sequence\nmax_words = 500\nX_train = sequence.pad_sequences(X_train, maxlen=max_words)\nX_test = sequence.pad_sequences(X_test, maxlen=max_words)",
"_____no_output_____"
],
[
"from keras import Sequential\nfrom keras.layers import Embedding, SimpleRNN, Dense\nembedding_size = 32\nmodel = Sequential()\nmodel.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))\nmodel.add(SimpleRNN(100))\nmodel.add(Dense(1, activation='sigmoid'))",
"_____no_output_____"
],
[
"model.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (None, 500, 32) 160000 \n_________________________________________________________________\nsimple_rnn_1 (SimpleRNN) (None, 100) 13300 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 101 \n=================================================================\nTotal params: 173,401\nTrainable params: 173,401\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.layers",
"_____no_output_____"
],
[
"model.weights",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9b2b6c8cc143540c8c3b6e93a68f7a8e7fed62
| 627,023 |
ipynb
|
Jupyter Notebook
|
NeuralNetworks/DBN/DeepBelifNetworksIntro.ipynb
|
Harsh188/100_Days_of_ML
|
7f793813cc340ea6aeb4b028cb9be303afe25e36
|
[
"MIT"
] | 4 |
2021-05-25T21:59:54.000Z
|
2022-01-08T03:16:24.000Z
|
NeuralNetworks/DBN/DeepBelifNetworksIntro.ipynb
|
Harsh188/100_Days_of_ML
|
7f793813cc340ea6aeb4b028cb9be303afe25e36
|
[
"MIT"
] | null | null | null |
NeuralNetworks/DBN/DeepBelifNetworksIntro.ipynb
|
Harsh188/100_Days_of_ML
|
7f793813cc340ea6aeb4b028cb9be303afe25e36
|
[
"MIT"
] | 1 |
2021-03-07T07:11:31.000Z
|
2021-03-07T07:11:31.000Z
| 8,250.302632 | 625,484 | 0.966145 |
[
[
[
"# Deep Belief Networks",
"_____no_output_____"
],
[
"DBN is an unsupervised probabilistic deep learning algorithm.",
"_____no_output_____"
],
[
"## What are DBNs?\nDBNs are graphial models which learn to extract a deep hierarchial representation of the training data.",
"_____no_output_____"
],
[
"## Architecture\nIt is composed of multiple layers of latent variables with connections between the layers but not between units within each layer.\n",
"_____no_output_____"
],
[
"DBNs are composed of a stack of restricted boltzmann machine otherwise knwon as autoencoders.",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a9b2eedf6d45a6985aa8571704d13865f09df78
| 14,150 |
ipynb
|
Jupyter Notebook
|
Chapter01/03_Graphs_Benchmarks.ipynb
|
deusebio/Graph-Machine-Learning
|
548d031b29e090e8f9934836c17922c8a51fb459
|
[
"MIT"
] | null | null | null |
Chapter01/03_Graphs_Benchmarks.ipynb
|
deusebio/Graph-Machine-Learning
|
548d031b29e090e8f9934836c17922c8a51fb459
|
[
"MIT"
] | null | null | null |
Chapter01/03_Graphs_Benchmarks.ipynb
|
deusebio/Graph-Machine-Learning
|
548d031b29e090e8f9934836c17922c8a51fb459
|
[
"MIT"
] | null | null | null | 23.741611 | 270 | 0.543251 |
[
[
[
"# Benchmark and Repositories",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"import networkx as nx\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndefault_edge_color = 'gray'\ndefault_node_color = '#407cc9'\nenhanced_node_color = '#f5b042'\nenhanced_edge_color = '#cc2f04'",
"_____no_output_____"
],
[
"output_dir = \"./\"",
"_____no_output_____"
],
[
"import os",
"_____no_output_____"
],
[
"def draw_graph(G, node_names={}, filename=None, node_size=50, layout = None):\n pos_nodes = nx.spring_layout(G) if layout is None else layout(G)\n nx.draw(G, pos_nodes, with_labels=False, node_size=node_size, edge_color='gray')\n \n pos_attrs = {}\n for node, coords in pos_nodes.items():\n pos_attrs[node] = (coords[0], coords[1] + 0.08)\n \n nx.draw_networkx_labels(G, pos_attrs, labels=node_names, font_family='serif')\n \n plt.axis('off')\n axis = plt.gca()\n axis.set_xlim([1.2*x for x in axis.get_xlim()])\n axis.set_ylim([1.2*y for y in axis.get_ylim()])\n \n if filename:\n plt.savefig(os.path.join(output_dir, filename), format=\"png\")\n\n\n# draw enhanced path on the graph\ndef draw_enhanced_path(G, path_to_enhance, node_names={}, filename=None, layout=None):\n path_edges = list(zip(path,path[1:]))\n pos_nodes = nx.spring_layout(G) if layout is None else layout(G)\n \n plt.figure(figsize=(5,5),dpi=300)\n pos_nodes = nx.spring_layout(G)\n nx.draw(G, pos_nodes, with_labels=False, node_size=50, edge_color='gray')\n \n pos_attrs = {}\n for node, coords in pos_nodes.items():\n pos_attrs[node] = (coords[0], coords[1] + 0.08)\n \n nx.draw_networkx_labels(G, pos_attrs, labels=node_names, font_family='serif')\n nx.draw_networkx_edges(G,pos_nodes,edgelist=path_edges, edge_color='#cc2f04', style='dashed', width=2.0)\n \n plt.axis('off')\n axis = plt.gca()\n axis.set_xlim([1.2*x for x in axis.get_xlim()])\n axis.set_ylim([1.2*y for y in axis.get_ylim()])\n \n if filename:\n plt.savefig(os.path.join(output_dir, filename), format=\"png\")",
"_____no_output_____"
]
],
[
[
"### Simple Example of Graphs",
"_____no_output_____"
],
[
"We start with some simple graphs",
"_____no_output_____"
]
],
[
[
"complete = nx.complete_graph(n=7)\nlollipop = nx.lollipop_graph(m=7, n=3)\nbarbell = nx.barbell_graph(m1=7, m2=4)",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,6))\nplt.subplot(1,3,1)\ndraw_graph(complete)\nplt.title(\"Complete\")\nplt.subplot(1,3,2)\nplt.title(\"Lollipop\")\ndraw_graph(lollipop)\nplt.subplot(1,3,3)\nplt.title(\"Barbell\")\ndraw_graph(barbell)\nplt.savefig(os.path.join(output_dir, \"SimpleGraphs.png\"))",
"_____no_output_____"
],
[
"complete = nx.relabel_nodes(nx.complete_graph(n=7), lambda x: x + 0)\nlollipop = nx.relabel_nodes(nx.lollipop_graph(m=7, n=3), lambda x: x+100)\nbarbell = nx.relabel_nodes(nx.barbell_graph(m1=7, m2=4), lambda x: x+200)",
"_____no_output_____"
],
[
"def get_random_node(graph):\n return np.random.choice(graph.nodes)",
"_____no_output_____"
],
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"## We compose simple graphs into one",
"_____no_output_____"
]
],
[
[
"allGraphs = nx.compose_all([complete, barbell, lollipop])\nallGraphs.add_edge(get_random_node(lollipop), get_random_node(lollipop))\nallGraphs.add_edge(get_random_node(complete), get_random_node(barbell))",
"_____no_output_____"
],
[
"draw_graph(allGraphs, layout=nx.kamada_kawai_layout)",
"_____no_output_____"
]
],
[
[
"#### Model Barabasi Albert",
"_____no_output_____"
],
[
"In the following we create and analyse some simple graph generated by the Barabasi-Albert model ",
"_____no_output_____"
]
],
[
[
"BA_graph_small = nx.extended_barabasi_albert_graph(n=20,m=1,p=0,q=0)",
"_____no_output_____"
],
[
"draw_graph(BA_graph_small, layout=nx.circular_layout)",
"_____no_output_____"
]
],
[
[
"We analyse large Barabasi-Albert graphs to investigate their ability to generate power-law distribution for the degree of node",
"_____no_output_____"
]
],
[
[
"n = 1E5\nbag = nx.extended_barabasi_albert_graph(n,m=1,p=0,q=0)",
"_____no_output_____"
],
[
"degree = dict(nx.degree(bag)).values()",
"_____no_output_____"
],
[
"bins = np.round(np.logspace(np.log10(min(degree)), np.log10(max(degree)), 10))",
"_____no_output_____"
],
[
"from collections import Counter\ncnt = Counter(np.digitize(np.array(list(degree)), bins))",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,6))\nplt.subplot(1,2,1)\ndraw_graph(BA_graph_small, layout=nx.circular_layout)\nplt.subplot(1,2,2)\nx, y = list(zip(*[(bins[k-1], v/n) for k, v in cnt.items()]))\nplt.plot(x, y, 'o'); plt.xscale(\"log\"); plt.yscale(\"log\")\nplt.xlabel(\"Degree k\")\nplt.ylabel(\"P(k)\")\nplt.savefig(os.path.join(output_dir, \"Barabasi_Albert.png\"))",
"_____no_output_____"
],
[
"plt.figure(figsize=(15, 6))\n\nplt.hist(degree, bins=bins)\nplt.xscale(\"log\")\nplt.yscale(\"log\")",
"_____no_output_____"
]
],
[
[
"Other simple graph Benchmarks",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"graph = nx.florentine_families_graph()",
"_____no_output_____"
],
[
"nx.draw_kamada_kawai(graph, with_labels=True, node_size=20, font_size=14)\nplt.savefig(\"Florentine.png\")",
"_____no_output_____"
]
],
[
[
"### Benchmarks from the Network Data Repository",
"_____no_output_____"
],
[
"This dataset (and other) can be downloaded from http://networkrepository.com/. The datasets are generally in the MTX file format that has been described in the book. \n\nIn particular the dataset here presented is taken from the collaboration network of Arxiv Astro Physics, that can be downloaded from http://networkrepository.com/ca-AstroPh.php. ",
"_____no_output_____"
]
],
[
[
"from scipy.io import mmread",
"_____no_output_____"
],
[
"file = \"ca-AstroPh.mtx\"\nadj_matrix = mmread(file)",
"_____no_output_____"
],
[
"graph = nx.from_scipy_sparse_matrix(adj_matrix)",
"_____no_output_____"
],
[
"degrees = dict(nx.degree(graph))",
"_____no_output_____"
],
[
"ci = nx.clustering(graph)",
"_____no_output_____"
],
[
"centrality = nx.centrality.betweenness_centrality(graph)",
"_____no_output_____"
],
[
"stats = pd.DataFrame({\n \"centrality\": centrality, \n \"C_i\": ci, \n \"degree\": degrees\n})",
"_____no_output_____"
],
[
"stats.head()",
"_____no_output_____"
]
],
[
[
"Here we provide some simple analysis of the DataFrame we generated to see correlations between centrality, clustering coefficient and degree. ",
"_____no_output_____"
]
],
[
[
"plt.plot(stats[\"centrality\"], stats[\"degree\"], 'o')\nplt.xscale(\"log\")\nplt.yscale(\"log\")",
"_____no_output_____"
],
[
"plt.plot(stats[\"centrality\"], stats[\"C_i\"], 'o')\nplt.xscale(\"log\")\nplt.yscale(\"log\")",
"_____no_output_____"
]
],
[
[
"### Ego-network ",
"_____no_output_____"
],
[
"Here we plot the ego-network of the most-connected node, that has id 6933. However, even this network looks a bit messy since it has hundreds of nodes. We therefore sample randomly or based on centrality/clustering coefficient in order to plot a relevant subgraph.",
"_____no_output_____"
]
],
[
[
"neighbors = [n for n in nx.neighbors(graph, 6933)]",
"_____no_output_____"
],
[
"sampling = 0.1",
"_____no_output_____"
],
[
"nTop = round(len(neighbors)*sampling)",
"_____no_output_____"
],
[
"idx = {\n \"random\": stats.loc[neighbors].sort_index().index[:nTop], \n \"centrality\": stats.loc[neighbors].sort_values(\"centrality\", ascending=False).index[:nTop],\n \"C_i\": stats.loc[neighbors].sort_values(\"C_i\", ascending=False).index[:nTop]\n}",
"_____no_output_____"
],
[
"def plotSubgraph(graph, indices, center = 6933):\n draw_graph(\n nx.subgraph(graph, list(indices) + [center]),\n layout = nx.kamada_kawai_layout\n )",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,6))\nfor ith, title in enumerate([\"random\", \"centrality\", \"C_i\"]):\n plt.subplot(1,3,ith+1)\n plotSubgraph(graph, idx[title])\n plt.title(title)\nplt.savefig(os.path.join(output_dir, \"PhAstro\"))",
"_____no_output_____"
]
],
[
[
"### Data to Gephi",
"_____no_output_____"
],
[
"Otherwise, we could also export the data from networkx in order to plot it and analyse it using the Gephi software.",
"_____no_output_____"
]
],
[
[
"nx.write_gexf(graph, 'ca-AstroPh.gexf')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a9b397ca78d772fdb0df9601bcafc97a0147103
| 6,032 |
ipynb
|
Jupyter Notebook
|
pyrexMD/examples/4b.GDT_Analyses_RNA.ipynb
|
KIT-MBS/pyREX
|
cf3036400850f8b155399cd9444225352a34db08
|
[
"MIT"
] | 6 |
2021-12-08T04:18:36.000Z
|
2022-03-31T23:01:33.000Z
|
pyrexMD/examples/4b.GDT_Analyses_RNA.ipynb
|
KIT-MBS/pyREX
|
cf3036400850f8b155399cd9444225352a34db08
|
[
"MIT"
] | 1 |
2021-07-16T20:39:08.000Z
|
2021-07-17T14:55:54.000Z
|
pyrexMD/examples/4b.GDT_Analyses_RNA.ipynb
|
KIT-MBS/pyrexMD
|
cf3036400850f8b155399cd9444225352a34db08
|
[
"MIT"
] | null | null | null | 24.620408 | 129 | 0.552056 |
[
[
[
"# This jupyter notebook contains examples of\n- some basic functions related to Global Distance Test (GDT) analyses\n- local accuracy plot",
"_____no_output_____"
]
],
[
[
"%matplotlib notebook\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport MDAnalysis as mda\nimport pyrexMD.misc as misc\nimport pyrexMD.core as core\nimport pyrexMD.topology as top\nimport pyrexMD.analysis.analyze as ana\nimport pyrexMD.analysis.gdt as gdt",
"_____no_output_____"
]
],
[
[
"We define MDAnalysis universes to handle data. In this case we define:\n- ref: universe with reference structure\n- mobile: universe with trajectory",
"_____no_output_____"
]
],
[
[
"pdb = \"files/traj_rna/4tzx_ref.pdb\"\ntpr = \"files/traj_rna/traj_rna.tpr\"\ntraj = \"files/traj_rna/traj_rna_cat.xtc\"\n\nref = mda.Universe(pdb)\nmobile = mda.Universe(tpr, traj)\n\ntv = core.iPlayer(mobile)\ntv()",
"_____no_output_____"
]
],
[
[
"# Global Distance Test (GDT) Analysis\nfirst we norm and align the universes (shift res ids, atom ids) and run the Global Distance Test",
"_____no_output_____"
]
],
[
[
"# first norm and align universes\ntop.norm_and_align_universe(mobile, ref)\n\n# run GDT using selection idnex string for correct mapping\nGDT = gdt.GDT_rna(mobile, ref)\nGDT_percent, GDT_resids, GDT_cutoff, RMSD, FRAME = GDT",
"_____no_output_____"
]
],
[
[
"Now we can calculate individual GDT scores\n- TS: Total Score\n- HA: High Accuracy",
"_____no_output_____"
]
],
[
[
"GDT_TS = gdt.get_GDT_TS(GDT_percent)\nGDT_HA = gdt.get_GDT_HA(GDT_percent)",
"_____no_output_____"
]
],
[
[
"We can print the scores in a table to take a quick look on the content",
"_____no_output_____"
]
],
[
[
"frames = [i for i in range(len(GDT_TS))]\n\nmisc.cprint(\"GDT TS GDT HA frame\", \"blue\")\n_ = misc.print_table([GDT_TS, GDT_HA, frames], verbose_stop=10, spacing=10)",
"_____no_output_____"
]
],
[
[
"alternatively we can also first rank the scores and print the table sorted by rank",
"_____no_output_____"
]
],
[
[
"SCORES = gdt.GDT_rank_scores(GDT_percent, ranking_order=\"GDT_TS\", verbose=False)\nGDT_TS_ranked, GDT_HA_ranked, GDT_ndx_ranked = SCORES\n\nmisc.cprint(\"GDT TS GDT HA frame\", \"blue\")\n_ = misc.print_table([GDT_TS_ranked, GDT_HA_ranked, GDT_ndx_ranked], spacing=10, verbose_stop=10)",
"_____no_output_____"
]
],
[
[
"To plot the GDT_TS curve we can use a generalized PLOT function:",
"_____no_output_____"
]
],
[
[
"fig, ax = ana.PLOT(xdata=frames, ydata=GDT_TS, xlabel=\"Frame\", ylabel=\"GDT TS\")",
"_____no_output_____"
]
],
[
[
"Histrograms are often also important as they can be used to extract probabilities of protein conformations",
"_____no_output_____"
]
],
[
[
"hist = ana.plot_hist(GDT_TS, n_bins=20, xlabel=\"GDT TS\", ylabel=\"Counts\")",
"_____no_output_____"
]
],
[
[
"# Local Accuracy Plot\nFigure showing local accuracy of models at specified frames to identify which parts of a structure are good or bad refined.",
"_____no_output_____"
]
],
[
[
"# edit text box positions of labels \"Frame\", \"TS\", \"HA\"\ntext_pos_kws = {\"text_pos_Frame\": [-33.6, -0.3],\n \"text_pos_TS\": [-16.0, -0.3],\n \"text_pos_HA\": [-7.4, -0.3],\n \"font_scale\": 1.0,\n \"show_frames\": True,\n \"vmax\": 14} \n\n# plot\nA = gdt.plot_LA_rna(mobile, ref, GDT_TS_ranked, GDT_HA_ranked, GDT_ndx_ranked, **text_pos_kws)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9b5248219dc89a9e1a7a687d100c2a4e16874f
| 16,397 |
ipynb
|
Jupyter Notebook
|
Data Analysis/DataAnalysisProcess/ipython_notebook_tutorial.ipynb
|
bhupendpatil/Practice
|
9663b3f41e359787cbbd04aedb3db3c605c6ec8e
|
[
"MIT"
] | 1 |
2020-12-23T06:22:29.000Z
|
2020-12-23T06:22:29.000Z
|
Data Analysis/DataAnalysisProcess/ipython_notebook_tutorial.ipynb
|
bhupendpatil/Practice
|
9663b3f41e359787cbbd04aedb3db3c605c6ec8e
|
[
"MIT"
] | 8 |
2020-06-18T19:32:39.000Z
|
2022-03-11T11:37:07.000Z
|
Data Analysis/DataAnalysisProcess/ipython_notebook_tutorial.ipynb
|
bhupendpatil/Practice
|
9663b3f41e359787cbbd04aedb3db3c605c6ec8e
|
[
"MIT"
] | 1 |
2021-01-19T00:16:34.000Z
|
2021-01-19T00:16:34.000Z
| 55.772109 | 7,660 | 0.74227 |
[
[
[
"# Text Using Markdown\n\n**If you double click on this cell**, you will see the text change so that all of the formatting is removed. This allows you to edit this block of text. This block of text is written using [Markdown](http://daringfireball.net/projects/markdown/syntax), which is a way to format text using headers, links, italics, and many other options. Hit _shift_ + _enter_ or _shift_ + _return_ on your keyboard to show the formatted text again. This is called \"running\" the cell, and you can also do it using the run button in the toolbar.",
"_____no_output_____"
],
[
"# Code cells\n\nOne great advantage of IPython notebooks is that you can show your Python code alongside the results, add comments to the code, or even add blocks of text using Markdown. These notebooks allow you to collaborate with others and share your work. The following cell is a code cell.",
"_____no_output_____"
]
],
[
[
"# Hit shift + enter or use the run button to run this cell and see the results\n\nprint('hello world')",
"hello world\n"
],
[
"# The last line of every code cell will be displayed by default, \n# even if you don't print it. Run this cell to see how this works.\n\n2 + 2 # The result of this line will not be displayed\n3 + 3 # The result of this line will be displayed, because it is the last line of the cell",
"_____no_output_____"
]
],
[
[
"# Nicely formatted results\n\nIPython notebooks allow you to display nicely formatted results, such as plots and tables, directly in\nthe notebook. You'll learn how to use the following libraries later on in this course, but for now here's a\npreview of what IPython notebook can do.",
"_____no_output_____"
]
],
[
[
"# If you run this cell, you should see the values displayed as a table.\n\n# Pandas is a software library for data manipulation and analysis. You'll learn to use it later in this course.\nimport pandas as pd\n\ndf = pd.DataFrame({'a': [2, 4, 6, 8], 'b': [1, 3, 5, 7]})\ndf",
"_____no_output_____"
],
[
"# If you run this cell, you should see a scatter plot of the function y = x^2\n\n%pylab inline\nimport matplotlib.pyplot as plt\n\nxs = range(-30, 31)\nys = [x ** 2 for x in xs]\n\nplt.scatter(xs, ys)",
"Populating the interactive namespace from numpy and matplotlib\n"
]
],
[
[
"# Creating cells \n \nTo create a new **code cell**, click \"Insert > Insert Cell [Above or Below]\". A code cell will automatically be created.\n\nTo create a new **markdown cell**, first follow the process above to create a code cell, then change the type from \"Code\" to \"Markdown\" using the dropdown next to the run, stop, and restart buttons.",
"_____no_output_____"
],
[
"# Re-running cells\n\nIf you find a bug in your code, you can always update the cell and re-run it. However, any cells that come afterward won't be automatically updated. Try it out below. First run each of the three cells. The first two don't have any output, but you will be able to tell they've run because a number will appear next to them, for example, \"In [5]\". The third cell should output the message \"Intro to Data Analysis is awesome!\"",
"_____no_output_____"
]
],
[
[
"class_name = \"Intro to Data Analysis\"",
"_____no_output_____"
],
[
"message = class_name + \" is awesome!\"",
"_____no_output_____"
],
[
"message",
"_____no_output_____"
]
],
[
[
"Once you've run all three cells, try modifying the first one to set `class_name` to your name, rather than \"Intro to Data Analysis\", so you can print that you are awesome. Then rerun the first and third cells without rerunning the second.\n\nYou should have seen that the third cell still printed \"Intro to Data Analysis is awesome!\" That's because you didn't rerun the second cell, so even though the `class_name` variable was updated, the `message` variable was not. Now try rerunning the second cell, and then the third.\n\nYou should have seen the output change to \"*your name* is awesome!\" Often, after changing a cell, you'll want to rerun all the cells below it. You can do that quickly by clicking \"Cell > Run All Below\".\n\nOne final thing to remember: if you shut down the kernel after saving your notebook, the cells' output will still show up as you left it at the end of your session when you start the notebook back up. However, the state of the kernel will be reset. If you are actively working on a notebook, remember to re-run your cells to set up your working environment to really pick up where you last left off.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a9b56ef24e6ee1988efedde88b937fb35277b7d
| 30,744 |
ipynb
|
Jupyter Notebook
|
Code & Dataset/3.Name Matching & Data Mapping/AmazonDataMapping.ipynb
|
TeamEpicProjects/Best-Gadget-Finder
|
3b12024551e2c9036aa02acb9af555c650abf0b2
|
[
"MIT"
] | null | null | null |
Code & Dataset/3.Name Matching & Data Mapping/AmazonDataMapping.ipynb
|
TeamEpicProjects/Best-Gadget-Finder
|
3b12024551e2c9036aa02acb9af555c650abf0b2
|
[
"MIT"
] | null | null | null |
Code & Dataset/3.Name Matching & Data Mapping/AmazonDataMapping.ipynb
|
TeamEpicProjects/Best-Gadget-Finder
|
3b12024551e2c9036aa02acb9af555c650abf0b2
|
[
"MIT"
] | 3 |
2021-06-16T14:26:31.000Z
|
2021-09-04T14:32:56.000Z
| 37.955556 | 120 | 0.450104 |
[
[
[
"# Amazon product data mapping",
"_____no_output_____"
]
],
[
[
"# importing libraries\nfrom fuzzywuzzy import fuzz\nimport pandas as pd \nimport re\nimport csv\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"# Amazon product dataset\namazon = pd.read_csv('AmazonProductdata.csv')\n\n# change columns name\namazon.rename(columns={'Name': 'Amazon_Name',\n 'Synonyms': 'Amazon_Synonyms',\n 'SalesPrice': 'Amazon_SalesPrice',\n 'OriginalPrice': 'Amazon_OriginalPrice',\n 'Rating': 'Amazon_Rating',\n 'ProductLink': 'Amazon_ProductLink',\n 'ImageLink': 'Amazon_ImageLink'}, inplace=True)",
"_____no_output_____"
],
[
"# Flipkart product dataset\nflipkart = pd.read_csv('FlipkartDataset.csv')\n\n# change columns name\nflipkart.rename(columns={'Name': 'Flipkart_Name',\n 'Synonyms': 'Flipkart_Synonyms',\n 'SalesPrice': 'Flipkart_SalesPrice',\n 'OriginalPrice': 'Flipkart_OriginalPrice',\n 'Rating': 'Flipkart_Rating',\n 'ProductLink': 'Flipkart_ProductLink',\n 'ImageLink': 'Flipkart_ImageLink'}, inplace=True)",
"_____no_output_____"
],
[
"# Snapdeal product dataset\nsnapdeal= pd.read_csv('SnapdealDataset.csv')\n\n# change columns name\nsnapdeal.rename(columns={'Name': 'Snapdeal_Name',\n 'Synonyms': 'Snapdeal_Synonyms',\n 'SalesPrice': 'Snapdeal_SalesPrice',\n 'OriginalPrice': 'Snapdeal_OriginalPrice',\n 'Rating': 'Snapdeal_Rating',\n 'ProductLink': 'Snapdeal_ProductLink',\n 'ImageLink': 'Snapdeal_ImageLink'}, inplace=True)",
"_____no_output_____"
],
[
"# Remove all the special characters\namazon_name = amazon['Amazon_Synonyms'].str.replace(r\"[^0-9,a-z,A-Z, ]\",'')\namazon_name = amazon_name.str.replace(r\"[,]\",'')\namazon_name_lst = amazon_name.to_list()\n\n# remove duplicate string\nnew_amazon_name_lst = []\nfor lst in amazon_name_lst:\n new_amazon_lst = (' '.join(dict.fromkeys(lst.split())))\n new_amazon_name_lst.append(new_amazon_lst)\n\n\namazon.insert(1, \"New_Amazon_Name\",new_amazon_name_lst)\namazon.head()",
"_____no_output_____"
],
[
"# Remove all the special characters\nflipkart_name = flipkart['Flipkart_Name'].str.replace(r\"[(),,]\",'')\nflipkart.insert(1, \"New_Flipkart_Name\",flipkart_name, True)\nflipkart.head()",
"_____no_output_____"
],
[
"# Remove all the special characters\nsnapdeal_name = snapdeal['Snapdeal_Name'].str.replace(r\"[(),,]\",'')\nsnapdeal.insert(1, \"New_Snapdeal_Name\",snapdeal_name, True)\nsnapdeal.head()",
"_____no_output_____"
],
[
"def get_match(amazon):\n \"\"\"Extract and Return matched value using fuzzywuzzy\"\"\"\n \n dictionary_name = {}\n try:\n # iterate flipkart product name\n for product_name in flipkart['New_Flipkart_Name']:\n\n # string matching using token_set_ratio function\n match = fuzz.token_set_ratio(amazon, product_name)\n \n if match >= 85:\n dictionary_name[product_name] = match\n Keymax = max(dictionary_name, key=dictionary_name.get)\n \n # return max matched value\n return Keymax\n except:\n # return NAN if not matched\n return ''\n\n \n# store matched values\nflipkart_result = []\n\n# Iterate amazon product ame\nfor item in amazon['New_Amazon_Name']:\n record = get_match(item)\n flipkart_result.append(record)\n\namazon.insert(2, \"New_Flipkart_Name\", flipkart_result)",
"_____no_output_____"
],
[
"def get_match(amazon):\n \"\"\"Extract and Return matched value using fuzzywuzzy\"\"\"\n \n dictionary_name = {}\n try :\n # iterate snapdeal product name\n for product_name in snapdeal['New_Snapdeal_Name']:\n \n # string matching using token_set_ratio function\n match = fuzz.token_set_ratio(amazon,product_name)\n \n if match >=85 :\n dictionary_name[product_name] = match\n Keymax = max(dictionary_name, key=dictionary_name.get) \n \n # return max matched value \n return Keymax\n except :\n # return NAN if not matched\n return ''\n\n# store matched values\nsnapdeal_result = []\n\n# Iterate amazon product name\nfor item in amazon['New_Amazon_Name']:\n record = get_match(item)\n snapdeal_result.append(record)\n \namazon.insert(3, \"New_Snapdeal_Name\",snapdeal_result)",
"_____no_output_____"
],
[
"# Merge snapdeal and flipkart data in amazon dataset\ndata = amazon.merge(flipkart,on='New_Flipkart_Name',how='left')\nnew_data = data.merge(snapdeal,on='New_Snapdeal_Name',how='left')",
"_____no_output_____"
],
[
"# drop unwanted columns\nnew_data = new_data.drop(['Id','ID_x','ID_y','New_Amazon_Name','New_Flipkart_Name','New_Snapdeal_Name'], axis = 1)",
"_____no_output_____"
],
[
"# rename columns\nnew_data.rename(columns={'Ratings': 'Amazon_Rating',\n 'Availability': 'Amazon_Availability',\n 'Availibility': 'Snapdeal_Availability',\n }, inplace=True)",
"_____no_output_____"
],
[
"# There are the columns \nnew_data.columns",
"_____no_output_____"
],
[
"# Save dataframe in csv file\nnew_data.to_csv('AmazonMappedData.csv',index=0)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9b7a089b258e28fa65883b148d145231eab6ab
| 55,080 |
ipynb
|
Jupyter Notebook
|
docsrc/R/ManageData.ipynb
|
wklchris/blog
|
6229eeef06e5e542736ac26722c1ac7a03829204
|
[
"MIT"
] | null | null | null |
docsrc/R/ManageData.ipynb
|
wklchris/blog
|
6229eeef06e5e542736ac26722c1ac7a03829204
|
[
"MIT"
] | null | null | null |
docsrc/R/ManageData.ipynb
|
wklchris/blog
|
6229eeef06e5e542736ac26722c1ac7a03829204
|
[
"MIT"
] | null | null | null | 23.578767 | 148 | 0.371133 |
[
[
[
"# 数据管理\n\n本节内容可应用在数据读取之后。包括基本的运算(包括统计函数)、数据重整(排序、合并、子集、随机抽样、整合、重塑等)、字符串处理、异常值(NA/Inf/NaN)处理等内容。也包括 apply() 这种函数式编程函数的使用。",
"_____no_output_____"
],
[
"## 数学函数\n\n数学运算符和一些统计学上需要的函数。\n\n### 数学运算符\n\n| 四则 | 幂运算 | 求余 | 整除 |\n| --- | --- | --- | --- |\n| +, -, \\*, / | ^ 或 \\*\\* | %% | %/% |\n\n例子:",
"_____no_output_____"
]
],
[
[
"a <- 2 ^ 3\nb <- 5 %% 2\nc <- 5 %/% 2\nprint(c(a, b, c))",
"[1] 8 1 2\n"
]
],
[
[
"### 基本数学函数\n\n- 绝对值:abs()\n- 平方根:sqrt()\n- 三角函数:sin(), cos(), tan(), acos(), asin(), atan()\n- 对数:\n - log(x, base=n) 以 n 为底 x 的对数\n - log10(x) 以 10 为底的对数\n- 指数:exp()\n- 取整:\n - 向上取整 ceiling()\n - 向下取整 floor()\n - 舍尾取整(绝对值减小) trunc()\n - 四舍五入到第 N 位 round(x, digits=N)\n - 四舍五入为有效数字共 N 位 singif(x, digits=N)\n\n### 统计、概率与随机数\n\n描述性统计等更多的统计内容,参考 [“描述性统计”一文](DescriptiveStatistics.ipynb)。\n\n#### 统计函数\n\n常用的统计函数:\n\n- 均值:mean()\n- 中位数:median()\n- 标准差:sd()\n- 方差:var()\n- 绝对中位差:mad(x, center=median(x), constant=1.4826, ...),计算式:\n\n$$ \\mathrm{mad}(x) = constant * \\mathrm{Median}(|x - center|)$$\n\n- 分位数:quantile(x, probs),例如 quantile(x, c(.3, 84%)) 返回 x 的 30% 和 84% 分位数。\n- 极值:min() & max()\n- 值域与极差:range(x),例如 range(c(1, 2, 3)) 结果为 c(1, 3)。极差用 diff(range(x))\n- 差分:diff(x, lag=1)。可以用 lag 指定滞后项的个数,默认 1\n- 标准化:scale(x, center=TRUE, scale=TRUE)。可以使用 scale(x) * SD + C 来获得标准差为 SD、均值为 C 的标准化结果。",
"_____no_output_____"
],
[
"#### 概率函数\n\n常用的概率分布函数:\n\n- 正态分布:norm\n- 泊松分布:pois\n- 均匀分布:unif\n- Beta 分布:beta\n- 二项分布:binom\n- 柯西分布:cauchy\n- 卡方分布:chisq\n- 指数分布:exp\n- F 分布:f\n- t 分布:t\n- Gamma 分布:gamma\n- 几何分布:geom\n- 超几何分布:hyper\n- 对数正态分布:lnorm\n- Logistic 分布:logis\n- 多项分布:multinom\n- 负二项分布:nbinom\n\n以上各概率函数的缩写记为 *abbr*, 那么对应的概率函数有:\n\n1. **密度函数**: d{abbr}(),例如对于正态就是 dnorm()\n2. **分布函数**:p{abbr}()\n3. **分位数函数**:q{abbr}()\n4. **生成随机数**:r{abbr}(),例如常用的 runif() 生成均匀分布\n\n#### 例子\n\n通过 runif() 产生 $[0, 1]$ 上的服从均匀分布的伪随机数列。通过 set.seed() 可以指定随机数种子,使得代码可以重现。不过**作用域只有跟随其后的那个随机数函数。**",
"_____no_output_____"
]
],
[
[
"set.seed(123)\nprint(runif(3))",
"[1] 0.2875775 0.7883051 0.4089769\n"
],
[
"# 位于 1.96 左侧的标准正态分布曲线下方的面积\npnorm(1.96)",
"_____no_output_____"
],
[
"# 均值为500,标准差为100 的正态分布的0.9 分位点\nqnorm(.9, mean=500, sd=100)",
"_____no_output_____"
],
[
"# 生成 3 个均值为50,标准差为10 的正态随机数\nset.seed(123)\nprint(rnorm(3, mean=50, sd=10))",
"[1] 44.39524 47.69823 65.58708\n"
]
],
[
[
"## 数据框操作\n\n数据框是最常使用的数据类型。下面给出数据框使用中一些实用的场景,以及解决方案。\n\n### 行、列操作\n\n#### 新建\n\n创建一个新的列(变量)是很常见的操作。比如我们现在有数据框 df ,想要在右侧新建一个列,使其等于左侧两列的和。",
"_____no_output_____"
]
],
[
[
"df = data.frame(x1=c(1, 3, 5), x2=c(2, 4, 6))\n# 直接用美元符声明一个新列\ndf$sumx <- df$x1 + df$x2\ndf",
"_____no_output_____"
],
[
"# 或者使用 transform 函数\ndf <- transform(df, sumx2=x1+x2)\ndf",
"_____no_output_____"
]
],
[
[
"#### 重命名",
"_____no_output_____"
]
],
[
[
"colnames(df)[4] <- \"SUM\"\nprint(colnames(df))",
"[1] \"x1\" \"x2\" \"sumx\" \"SUM\" \n"
]
],
[
[
"#### 选取/剔除: subset()",
"_____no_output_____"
]
],
[
[
"# 选取前两列\ndf[,1:2] # 或者 df[c(\"x1\", \"x2\")]",
"_____no_output_____"
],
[
"# 剔除列 sumx\ndf <- df[!names(df) == \"sumx\"]\ndf",
"_____no_output_____"
],
[
"# 剔除第三列\ndf <- df[-c(3)] # 或者 df[c(-3)]\ndf",
"_____no_output_____"
]
],
[
[
"至于选取行,与列的操作方式是类似的:",
"_____no_output_____"
]
],
[
[
"# 选取 x1>2 且 x2为偶数的观测(行)\ndf[df$x1 > 2 & df$x2 %% 2 ==0,]",
"_____no_output_____"
]
],
[
[
"再介绍一个 subset() 指令,非常简单粗暴。先来一个复杂点的数据集:",
"_____no_output_____"
]
],
[
[
"DF <- data.frame(age = c(22, 37, 28, 33, 43),\n gender = c(1, 2, 1, 2, 1),\n q1 = c(1, 5, 3, 3, 2),\n q2 = c(4, 4, 5, 3, 1),\n q3 = c(3, 2, 4, 3, 1))\nDF$gender <- factor(DF$gender, labels=c(\"Male\", \"Female\"))\n\nDF",
"_____no_output_____"
],
[
"# 选中年龄介于 25 与 40 之间的观测\n# 并只保留变量 age 到 q2\nsubset(DF, age > 25 & age < 40, select=age:q2)",
"_____no_output_____"
]
],
[
[
"#### 横向合并\n\n如果你有两个**行数相同**的数据框,你可以使用 merge() 将其进行内联合并(inner join),他们将通过一个或多个共有的变量进行合并。",
"_____no_output_____"
]
],
[
[
"df1 <- data.frame(ID=c(1, 2, 3), Sym=c(\"A\", \"B\", \"C\"), Oprtr=c(\"x\", \"y\", \"z\"))\ndf2 <- data.frame(ID=c(1, 3, 2), Oprtr=c(\"x\", \"y\", \"z\"))\n\n# 按 ID 列合并\nmerge(df1, df2, by=\"ID\")",
"_____no_output_____"
],
[
"# 由于 ID 与 Oprtr 一致的只有一行,因此其余的都舍弃\nmerge(df1, df2, by=c(\"ID\", \"Oprtr\"))",
"_____no_output_____"
]
],
[
[
"或者直接用 cbind() 函数组合。",
"_____no_output_____"
]
],
[
[
"# 直接组合。注意:列名相同的话,在按列名调用时右侧的会被忽略\ncbind(df1, df2)",
"_____no_output_____"
]
],
[
[
"#### 纵向合并\n\n相当于追加观测。两个数据框必须有**相同的变量**,尽管顺序可以不同。如果两个数据框变量不同请:\n\n- 删除多余变量;\n- 在缺少变量的数据框中,追加同名变量并将其设为缺失值 NA。",
"_____no_output_____"
]
],
[
[
"df1 <- data.frame(ID=c(1, 2, 3), Sym=c(\"A\", \"B\", \"C\"), Oprtr=c(\"x\", \"y\", \"z\"))\ndf2 <- data.frame(ID=c(1, 3, 2), Oprtr=c(\"x\", \"y\", \"z\"))\ndf2$Sym <- NA\n\nrbind(df1, df2)",
"_____no_output_____"
]
],
[
[
"### 逻辑型筛选\n\n通过逻辑判断来过滤数据,或者选取数据子集,或者将子集作统一更改。在前面的一些例子中已经使用到了。",
"_____no_output_____"
]
],
[
[
"df$x3 <- c(7, 8, 9)\n# 把列 x3 中的奇数换成 NA\ndf$x3[df$x3 %% 2 == 1] <- NA\ndf",
"_____no_output_____"
],
[
"df$y <- c(7, 12, 27)\n# 把所有小于 3 的标记为 NaN\n# 把所有大于 10 的数按奇偶标记为正负Inf\n\ndf[df < 3] <- NaN\ndf[df > 10 & df %% 2 == 1] <- Inf\ndf[df > 10 & df %% 2 == 0] <- -Inf\ndf",
"_____no_output_____"
]
],
[
[
"### 排序\n\n排序使用 order() 命令。",
"_____no_output_____"
]
],
[
[
"df <- data.frame(age =c(22, 37, 28, 33, 43),\n gender=c(1, 2, 1, 2, 1))\ndf$gender <- factor(df$gender, labels=c(\"Male\", \"Female\"))\n\n# 按gender升序排序,各gender内按age降序排序\ndf[order(df$gender, -df$age),]",
"_____no_output_____"
]
],
[
[
"### 随机抽样\n\n从已有的数据集中随机抽选样本是常见的做法。例如,其中一份用于构建预测模型,另一份用于验证模型。\n\n```r\n# 无放回地从 df 的所有观测中,抽取一个大小为 3 的样本\ndf[sample(1:nrow(df), 3, replace=F)]\n```\n\n随机抽样的 R 包有 sampling 与 survey,如果可能我会在本系列下另建文章介绍。\n\n### SQL语句\n\n在 R 中,借助 sqldf 包可以直接用 SQL 语句操作数据框(data.frame)。一个来自书中的例子:\n\n```r\nnewdf <- sqldf(\"select * from mtcars where carb=1 order by mpg\", row.names=TRUE)\n```\n\n这里就不过多涉及了。",
"_____no_output_____"
],
[
"## 字符串处理\n\nR 中的字符串处理函数有以下几种:\n\n### 通用函数\n\n| 函数 | 含义 |\n| --- | --- |\n| nchar(x) | 计算字符串的长度 |\n| substr(x, start, stop) | 提取子字符串 |\n| grep(pattern, x, ignore.case=FALSE, fixed=FALSE) | 正则搜索,返回为匹配的下标。如果 fixed=T,则按字符串而不是正则搜索。 |\n| grepl() | 类似 grep(),只不过返回值是逻辑值向量。 |\n| sub(pattern, replacement, x, ignore.base=FALSE, fixed=FALSE) | 在 x 中搜索正则式,并以 replacement 将其替换。如果 fixed=T,则按字符串而不是正则搜索 |\n| strsplit(x, split, fixed=FALSE) | 在 split 处分割字符向量 x 中的元素,返回一个列表。 |\n| paste(x1, x2, ..., sep=\"\") | 连接字符串,连接符为 sep。也可以连接重复字串:`paste(\"x\", 1:3, sep=\"\")` |\n| toupper(x) | 转换字符串为全大写 |\n| tolower(x) | 转换字符串为全小写 |\n\n一些例子。首先是正则表达式的使用:",
"_____no_output_____"
]
],
[
[
"streg <- c(\"abc\", \"abcc\", \"abccc\", \"abc5\")\nre1 <- grep(\"abc*\", streg)\nre2 <- grep(\"abc\\\\d\", streg) # 注意反斜杠要双写来在 R 中转义\nre3 <- sub(\"[a-z]*\", \"Hey\", streg)\nre4 <- sub(\"[a-z]*\\\\d\", \"NEW\", streg)\n\nprint(list(re1, re2, re3, re4))",
"[[1]]\n[1] 1 2 3 4\n\n[[2]]\n[1] 4\n\n[[3]]\n[1] \"Hey\" \"Hey\" \"Hey\" \"Hey5\"\n\n[[4]]\n[1] \"abc\" \"abcc\" \"abccc\" \"NEW\" \n\n"
]
],
[
[
"然后是字符串分割与连接。注意这里的 paste() 有非常巧妙的用法:",
"_____no_output_____"
]
],
[
[
"splt <- strsplit(streg, \"c\") # 结果中不含分隔符 \"c\"\ncat1 <- paste(\"a\", \"b\", \"c\", sep=\"-\")\ncat2 <- paste(\"x\", 1:3, sep=\"\") # 生成列名时非常有用\n\nprint(list(splt, cat1, cat2))",
"[[1]]\n[[1]][[1]]\n[1] \"ab\"\n\n[[1]][[2]]\n[1] \"ab\" \"\" \n\n[[1]][[3]]\n[1] \"ab\" \"\" \"\" \n\n[[1]][[4]]\n[1] \"ab\" \"5\" \n\n\n[[2]]\n[1] \"a-b-c\"\n\n[[3]]\n[1] \"x1\" \"x2\" \"x3\"\n\n"
]
],
[
[
"### 日期型字符串\n\n与其他类型相似,日期型字符串能够通过 as.Date() 函数处理。各格式字符的含义如下:\n\n| 符号 | 含义 | 通用示例 | 中文示例 |\n| --- | --- | --- | --- |\n| %d | 日(1~31) | 22 | 22 |\n| %a | 缩写星期 | Mon | 周一 |\n| %A | 全写星期 | Monday | 星期一 |\n| %m | 月(1~12) | 10 | 10 |\n| %b | 缩写月 | Jan | 1月 |\n| %B | 全写月 | January | 一月 |\n| %y | 两位年 | 17 | 17 |\n| %Y | 四位年 | 2017 | 2017 |",
"_____no_output_____"
]
],
[
[
"# 对字符串数据 x,用法:as.Date(x, format=, ...)\ndates <- as.Date(\"01-28-2017\", format=\"%m-%d-%Y\")\nprint(dates)",
"[1] \"2017-01-28\"\n"
]
],
[
[
"要想获得当前的日期或时间,有两种格式可以参考,并可以用 format() 函数辅助输出。",
"_____no_output_____"
]
],
[
[
"# Sys.Date() 返回一个精确到日的标准日期格式\ndates1 <- Sys.Date()\nformat(dates1, format=\"%A\") # 可以指定输出格式",
"_____no_output_____"
],
[
"# date() 返回一个精确到秒的详细的字串\ndates2 <- date()\ndates2",
"_____no_output_____"
]
],
[
[
"函数 difftime() 提供了计算时间差的方式。其中计量单位可以是以下之一:\"auto\", \"secs\", \"mins\", \"hours\", \"days\", \"weeks\"。\n\n截至本文最后更新,我有 1100+ 周大。唔……这好像听起来没什么感觉",
"_____no_output_____"
]
],
[
[
"dates1 <- as.Date(\"1994-11-23\")\ndates2 <- Sys.Date()\ndifftime(dates2, dates1, units=\"weeks\")",
"_____no_output_____"
]
],
[
[
"## 异常值处理\n\n异常值包括三类:\n\n- NA:缺失值。\n- Inf:正无穷。用 -Inf 表示负无穷。**无穷与数可以比较大小,**比如 -Inf < 3 为真。\n- NaN:非可能值。比如 0/0。\n\n使用 is.na() 函数判断数据集中是否存在 NA 或者 NaN,并返回矩阵。注意 NaN 会被判断为缺失值。",
"_____no_output_____"
]
],
[
[
"is.na(df)",
"_____no_output_____"
]
],
[
[
"另外也有类似的函数来判断 Inf 与 NaN,但只能对一维数据集使用:",
"_____no_output_____"
]
],
[
[
"print(c(is.infinite(c(Inf, -Inf)), is.nan(NA)))",
"[1] TRUE TRUE FALSE\n"
]
],
[
[
"在进行数据处理之前,处理 NA 缺失值是必须的步骤。如果某些数值过于离群,你也可能需要将其标记为 NA 。行移除是最简单粗暴的处理方法。",
"_____no_output_____"
]
],
[
[
"# NA 行移除\ndf <- na.omit(df)\ndf",
"_____no_output_____"
]
],
[
[
"## 整合与重构\n\n### 转置\n\n常见的转置方法是 t() 函数:",
"_____no_output_____"
]
],
[
[
"df = matrix(1:6, nrow=2, ncol=3)\nt(df)",
"_____no_output_____"
]
],
[
[
"### 整合:aggregate()\n\n这个函数是非常强大的。语法:\n\n aggregate(x, by=list(), FUN)\n \n其中 x 是待整合的数据对象,by 是分类依据的列,FUN 是待应用的标量函数。",
"_____no_output_____"
]
],
[
[
"# 这个例子改编自 R 的官方帮助 aggregate()\ndf <- data.frame(v1 = c(1,3,5,7,8,3,5,NA,4,6,7,9),\n v2 = c(11,33,55,77,88,33,55,NA,44,55,77,99) )\nby1 <- c(\"red\", \"blue\", 1, 2, NA, \"big\", 1, 2, \"red\", 1, NA, 12)\nby2 <- c(\"wet\", \"dry\", 99, 95, NA, \"damp\", 95, 99, \"red\", 99, NA, NA)\n\n# 按照 by1 & by2 整合原数据 testDF\n# 注意(by1, by2)=(1, 99) 对应 (v1, v2)=(5, 55) 与 (6,55) 两条数据\n# 因此第三行的 v1 = mean(c(5, 6)) = 5.5\naggregate(x = df, by = list(b1=by1, b2=by2), FUN = \"mean\")",
"_____no_output_____"
],
[
"# 用公式筛选原数据的列,仅整合这些列\n# 注意:v1中的一个含 NA 的观测被移除\naggregate(cbind(df$v1) ~ by1+by2, FUN = \"mean\")",
"_____no_output_____"
]
],
[
[
"还有一个强大的整合包 reshape2,这里就不多介绍了。",
"_____no_output_____"
],
[
"## 函数式编程\n\n函数式编程是每个科学计算语言中的重要内容;操作实现的优先级依次是**矢量运算(例如 df+1)、函数式书写,最后才是循环语句**。在 R 中,函数式编程主要是由 apply 函数族承担。R 中的 apply 函数族包括:\n\n- apply():指定轴向。传入 data.frame,返回 vector.\n- tapply():\n- vapply():\n- lapply():\n- sapply():\n- mapply():\n- rapply():\n- eapply():\n\n下面依次介绍。",
"_____no_output_____"
],
[
"### apply():指定多维对象的轴\n\n在 R 中,通过 apply() 可以将函数运用于多维对象。基本语法是:\n\n apply(d, N, FUN, ...)\n\n其中,N 用于指定将函数 FUN 应用于数据 d 的第几维(1为行,2为列)。省略号中可以传入 function 的参数。",
"_____no_output_____"
]
],
[
[
"df <- data.frame(x=c(1, 2, 3), y=c(5, 4, 2), z=c(8, 6, 9), s=c(3, 7, 4))\ndf",
"_____no_output_____"
],
[
"# 计算 df 各列的中位数\ncolmean <- apply(df, 2, median)\n# 计算 df 各行的 25 分位数\nrowquan <- apply(df, 1, quantile, probs=.25)\n\nprint(list(colmean, rowquan))",
"[[1]]\nx y z s \n2 4 8 4 \n\n[[2]]\n[1] 2.50 3.50 2.75\n\n"
]
],
[
[
"### lapply():列表式应用\n\nlapply 函数的本意是对 list 对象进行操作。返回值是 list 类型。",
"_____no_output_____"
]
],
[
[
"lst <- list(a=c(0,1), b=c(1,2), c=c(3,4))\nlapply(lst, function(x) {sum(x^2)})",
"_____no_output_____"
]
],
[
[
"但同样可以作用于 DataFrame 对象的各个列(因为 DataFrame 对象是类似于各列组成的 list):",
"_____no_output_____"
]
],
[
[
"lapply(df, sum)",
"_____no_output_____"
]
],
[
[
"### sapply()/vapply():变种 lapply()\n\nsapply() 实质上是一种异化的 lapply(),返回值可以转变为 vector 而不是 list 类型。 ",
"_____no_output_____"
]
],
[
[
"class(sapply(lst, function(x) {sum(x^2)}))\nclass(lapply(lst, function(x) {sum(x^2)}))",
"_____no_output_____"
],
[
"print(sapply(df, sum))",
" x y z s \n 6 11 23 14 \n"
]
],
[
[
"参数 simplify=TRUE 是默认值,表示返回 vector 而不是 list。如果改为 FALSE,就退化为 lapply() 函数。",
"_____no_output_____"
]
],
[
[
"sapply(df, sum, simplify=FALSE)",
"_____no_output_____"
]
],
[
[
"vapply() 函数可以通过 FUN.VALUE 参数传入行名称,但这一步往往可以借助 lapply()/sapply() 加上外部的 row.names() 函数完成。",
"_____no_output_____"
],
[
"### mapply():多输入值的应用\n\nmapply() 函数支持多个输入值:\n\n mapply(FUN, [input1, input2, ...], MoreArgs=NULL)\n \n其中各 input 的**长度应该相等或互为整数倍数**。该函数的用处在于避免了事先将数据合并。",
"_____no_output_____"
]
],
[
[
"print(mapply(min, seq(0, 2, by=0.5), -2:7))",
" [1] -2.0 -1.0 0.0 1.0 2.0 0.0 0.5 1.0 1.5 2.0\n"
]
],
[
[
"### tapply():分组应用\n\ntapply() 函数可以借助 factor 的各水平进行分组,然后进行计算。类似于 group by 操作:\n\n tapply(X, idx, FUN)\n\n其中 X 是数据,idx 是分组依据。",
"_____no_output_____"
]
],
[
[
"df <- data.frame(x=1:6, groups=rep(c(\"a\", \"b\"), 3))\nprint(tapply(df$x, df$groups, cumsum))",
"$a\n[1] 1 4 9\n\n$b\n[1] 2 6 12\n\n"
]
],
[
[
"其他的 apply() 函数很少用到,在此就不介绍了。",
"_____no_output_____"
],
[
"## 其他实用函数\n\n在本系列的 [“数据读写操作”一文](ReadData.ipynb) 中,也介绍了一些实用的函数,可以参考。\n\n此外还有:\n\n| 函数 | 含义 |\n| --- | --- |\n| seq(from=N, to=N, by=N, [length.out=N, along.with=obj]) | 生成数列。参数分别是起、止、步长、数列长、指定数列长度与某对象等长。 |\n| rep(x, N) | 重复组合。比如 rep(1:2, 2) 会生成一个向量 c(1, 2, 1, 2) |\n| cut(x, N, [ordered_result=F]) | 分割为因子。 将连续变量 x 分割为有 N 个水平的因子,可以指定是否有序。 | \n| pretty(x, N) | 美观分割。将连续变量 x 分割为 N 个区间(N+1 个端点),并使端点为取整值。 绘图中使用。|\n| cat(obj1, obj2, ..., [file=, append=]) | 连接多个对象,并输出到屏幕或文件。 |",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a9b7aa91fe6fa91f708229a941f1b2441fc6373
| 18,068 |
ipynb
|
Jupyter Notebook
|
PythonNotebooks/indexFileNavigation/.ipynb_checkpoints/index_file_navigation_platformCategory-checkpoint.ipynb
|
CopernicusMarineInsitu/INSTACTraining
|
6ba829db4e380e4895cae7ced2f9c774768a41b2
|
[
"MIT"
] | 9 |
2018-07-11T12:58:51.000Z
|
2020-12-30T11:15:47.000Z
|
PythonNotebooks/indexFileNavigation/.ipynb_checkpoints/index_file_navigation_platformCategory-checkpoint.ipynb
|
CopernicusMarineInsitu/INSTACTraining
|
6ba829db4e380e4895cae7ced2f9c774768a41b2
|
[
"MIT"
] | 1 |
2018-10-12T18:18:52.000Z
|
2018-10-15T13:44:40.000Z
|
PythonNotebooks/indexFileNavigation/.ipynb_checkpoints/index_file_navigation_platformCategory-checkpoint.ipynb
|
CopernicusMarineInsitu/INSTACTraining
|
6ba829db4e380e4895cae7ced2f9c774768a41b2
|
[
"MIT"
] | 4 |
2017-09-28T07:27:24.000Z
|
2020-12-30T11:16:21.000Z
| 33.583643 | 514 | 0.418862 |
[
[
[
"<h3> ABSTRACT </h3>",
"_____no_output_____"
],
[
"All CMEMS in situ data products can be found and downloaded after [registration](http://marine.copernicus.eu/services-portfolio/register-now/) via [CMEMS catalogue] (http://marine.copernicus.eu/services-portfolio/access-to-products/).\n\nSuch channel is advisable just for sporadic netCDF donwloading because when operational, interaction with the web user interface is not practical. In this context though, the use of scripts for ftp file transference is is a much more advisable approach.\n\nAs long as every line of such files contains information about the netCDFs contained within the different directories [see at tips why](https://github.com/CopernicusMarineInsitu/INSTACTraining/blob/master/tips/README.md), it is posible for users to loop over its lines to download only those that matches a number of specifications such as spatial coverage, time coverage, provider, data_mode, parameters or file_name related (region, data type, TS or PF, platform code, or/and platform category, timestamp).",
"_____no_output_____"
],
[
"<h3>PREREQUISITES</h3>",
"_____no_output_____"
],
[
"- [credentias](http://marine.copernicus.eu/services-portfolio/register-now/)\n- aimed [in situ product name](http://cmems-resources.cls.fr/documents/PUM/CMEMS-INS-PUM-013.pdf)\n- aimed [hosting distribution unit](https://github.com/CopernicusMarineInsitu/INSTACTraining/blob/master/tips/README.md)\n- aimed [index file](https://github.com/CopernicusMarineInsitu/INSTACTraining/blob/master/tips/README.md)\n\ni.e:",
"_____no_output_____"
]
],
[
[
"user = '' #type CMEMS user name within colons\npassword = '' #type CMEMS password within colons\nproduct_name = 'INSITU_BAL_NRT_OBSERVATIONS_013_032' #type aimed CMEMS in situ product \ndistribution_unit = 'cmems.smhi.se' #type aimed hosting institution\nindex_file = 'index_history.txt' #type aimed index file name\n\n#remember! platform category only for history and monthly directories",
"_____no_output_____"
]
],
[
[
"<h3>DOWNLOAD</h3>",
"_____no_output_____"
],
[
"1. Index file download",
"_____no_output_____"
]
],
[
[
"import ftplib ",
"_____no_output_____"
],
[
"ftp=ftplib.FTP(distribution_unit,user,password) \nftp.cwd(\"Core\")\nftp.cwd(product_name) \nlocal_file = open(index_file, 'wb')\nftp.retrbinary('RETR ' + index_file, local_file.write)\nlocal_file.close()\nftp.quit()\n#ready when 221 Goodbye.!",
"_____no_output_____"
]
],
[
[
"<h3>QUICK VIEW</h3>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom random import randint",
"_____no_output_____"
],
[
"index = np.genfromtxt(index_file, skip_header=6, unpack=False, delimiter=',', dtype=None,\n names=['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',\n 'geospatial_lon_min', 'geospatial_lon_max',\n 'time_coverage_start', 'time_coverage_end', \n 'provider', 'date_update', 'data_mode', 'parameters'])",
"_____no_output_____"
],
[
"dataset = randint(0,len(index)) #ramdom line of the index file",
"_____no_output_____"
],
[
"values = [index[dataset]['catalog_id'], '<a href='+index[dataset]['file_name']+'>'+index[dataset]['file_name']+'</a>', index[dataset]['geospatial_lat_min'], index[dataset]['geospatial_lat_max'],\n index[dataset]['geospatial_lon_min'], index[dataset]['geospatial_lon_max'], index[dataset]['time_coverage_start'],\n index[dataset]['time_coverage_end'], index[dataset]['provider'], index[dataset]['date_update'], index[dataset]['data_mode'],\n index[dataset]['parameters']]\nheaders = ['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',\n 'geospatial_lon_min', 'geospatial_lon_max',\n 'time_coverage_start', 'time_coverage_end', \n 'provider', 'date_update', 'data_mode', 'parameters']\ndf = pd.DataFrame(values, index=headers, columns=[dataset])\ndf.style",
"_____no_output_____"
]
],
[
[
"<h3>FILTERING CRITERIA</h3>",
"_____no_output_____"
],
[
"Regarding the above glimpse, it is posible to filter by 12 criteria. As example we will setup next a filter to only download those files that contains data within a defined boundingbox.",
"_____no_output_____"
],
[
" 1. Aimed category ",
"_____no_output_____"
]
],
[
[
"targeted_category = 'drifter'",
"_____no_output_____"
]
],
[
[
" 2. netCDF filtering/selection",
"_____no_output_____"
]
],
[
[
"selected_netCDFs = [];\n\nfor netCDF in index: \n file_name = netCDF['file_name']\n \n folders = file_name.split('/')[3:len(file_name.split('/'))-1]\n category = file_name.split('/')[3:len(file_name.split('/'))-1][len(file_name.split('/')[3:len(file_name.split('/'))-1])-1]\n \n if (category == targeted_category):\n selected_netCDFs.append(file_name)\n \nprint(\"total: \" +str(len(selected_netCDFs)))",
"total: 11\n"
]
],
[
[
"<h3> SELECTION DOWNLOAD </h3>",
"_____no_output_____"
]
],
[
[
"for nc in selected_netCDFs:\n\n last_idx_slash = nc.rfind('/')\n ncdf_file_name = nc[last_idx_slash+1:]\n folders = nc.split('/')[3:len(nc.split('/'))-1]\n host = nc.split('/')[2] #or distribution unit\n \n ftp=ftplib.FTP(host,user,password) \n for folder in folders:\n ftp.cwd(folder)\n \n local_file = open(ncdf_file_name, 'wb')\n ftp.retrbinary('RETR '+ncdf_file_name, local_file.write)\n local_file.close() \n\n ftp.quit()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9b95afa9f0832a178be55ae542516a832f6697
| 26,695 |
ipynb
|
Jupyter Notebook
|
metinisleme.ipynb
|
ozturkonur98/tensorflow
|
482ffac821d53d970f4f1ede762792efc0cdb7fa
|
[
"MIT"
] | null | null | null |
metinisleme.ipynb
|
ozturkonur98/tensorflow
|
482ffac821d53d970f4f1ede762792efc0cdb7fa
|
[
"MIT"
] | null | null | null |
metinisleme.ipynb
|
ozturkonur98/tensorflow
|
482ffac821d53d970f4f1ede762792efc0cdb7fa
|
[
"MIT"
] | null | null | null | 117.083333 | 10,550 | 0.812324 |
[
[
[
"from google.colab import files\nimport string\n\nyuklenenDosya = files.upload()\n\narticle = open(\"veri.txt\", \"rt\")\nreadarticle = article.read()\nreadarticle = readarticle.lower()\narticle.close()\nprint(readarticle)\n\n\nkeywords = [\n \"bilgi\",\n \"belge\", \n \"açık\",\n \"erişim\", \n \"bilim\", \n \"büyük\",\n \"veri\", \n \"semantik\", \n \"teknoloji\", \n \"makine\",\n \"öğrenmesi\", \n \"yapay\",\n \"zeka\"\n]\nprint(keywords)\n\nfor keyword in keywords:\n result = readarticle.count(keyword)\n print(keyword+\": \" + str(result))\n\n print(\"✔ Hesaplama İşlemi Tarih\")\nimport datetime\n\nTarih = datetime.datetime.now()\n\nprint(Tarih)\n\n\nkeywords=[ 17,0,34,19,15,7,63,0,9,23,2,5,4]\ntoplam=0\nortalama=0\nprint(\"✔Toplam İlişkili Anahtar Kelime Sayısı\")\nfor i in range(0,len(keywords)):\n toplam+=keywords[i]\n \nortalama=toplam/len(keywords)\n\n#Toplam İlişkili Anahtar Kelime Sayısı \nprint('Toplam : ',toplam)\n#Toplam İlişkili Anahtar Kelime ortalaması\nprint(\"✔Anahtar Kelimelerin ortalaması\")\nprint('Ortalama : ',ortalama)\n#Toplam İlişkili Anahtar Kelime yüzdesi\nprint(\"✔Anahtar Kelime İlişkisinin Yüzdesi\")\nprint('Yüzde : ',toplam*ortalama/100)\n\nif toplam*ortalama/100 > 25:\n print(\"✔Paragrafla Anahtar Kelimelerin Bir İlişkisi vardır\")\n\nelse:\n print(\"✔Paragrafla Anahtar Kelimelerin Bir İlişkisi yoktur.\")\n\nprint(\"✔ Paragrafla Anahtar Kelimelerin Bir İlişkisi olup olmadığını gösteren grafik\")\n\n\n\n\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nx = [ 17,0,34,19,15,7,63,0,9,23,2,5,4]\ny = [ 17,0,34,19,15,7,63,0,9,23,2,5,4]\n\n\nfor i in range(0,len(x)):\n toplam+=x[i]\n \nortalama=toplam/len(x)\n\n\n\nslope, intercept, r, p, std_err = stats.linregress(x, y)\n\ndef myfunc(x):\n return slope * x + intercept\n\nmymodel = list(map(myfunc, x))\n\nplt.scatter(x,y)\nplt.plot(x, mymodel)\nplt.show()\n\n\n\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a9b9bd37327c2487d49da61868c931dfe12fbe8
| 146,356 |
ipynb
|
Jupyter Notebook
|
notebooks/Data Setup/data setup scikit-learn gaussians Tribuo Unit Tests.ipynb
|
geoffreydstewart/TribuoHdbscan
|
0ee465eab4ff5bc01d5f0e262bc1b0570939208f
|
[
"Apache-2.0"
] | 1 |
2022-03-15T05:43:04.000Z
|
2022-03-15T05:43:04.000Z
|
notebooks/Data Setup/data setup scikit-learn gaussians Tribuo Unit Tests.ipynb
|
geoffreydstewart/TribuoHdbscan
|
0ee465eab4ff5bc01d5f0e262bc1b0570939208f
|
[
"Apache-2.0"
] | null | null | null |
notebooks/Data Setup/data setup scikit-learn gaussians Tribuo Unit Tests.ipynb
|
geoffreydstewart/TribuoHdbscan
|
0ee465eab4ff5bc01d5f0e262bc1b0570939208f
|
[
"Apache-2.0"
] | null | null | null | 457.3625 | 132,844 | 0.915501 |
[
[
[
"## This notebook prepares an dataset for unit testing the Tribuo HDBSCAN* implementation. It is also used to compare HDBSCAN* implementations.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.datasets import make_blobs",
"_____no_output_____"
],
[
"X, y = make_blobs(n_samples=2000, centers=4, n_features=3, random_state=0)",
"_____no_output_____"
],
[
"print(X.shape)",
"(2000, 3)\n"
],
[
"print(X[0])",
"[-1.23310899 8.97710796 10.54741024]\n"
],
[
"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(8, 8))\nax = fig.add_subplot(111, projection='3d')\nax.scatter(X[:, 0], X[:, 1], X[:, 2],\n linewidths=1, alpha=.7,\n edgecolor='k',\n s = 2)\nplt.show()",
"_____no_output_____"
],
[
"df = pd.DataFrame({'Feature1': X[:, 0], 'Feature2': X[:, 1], 'Feature3': X[:, 2]})\n",
"_____no_output_____"
],
[
"df.head(10)",
"_____no_output_____"
],
[
"# the same data is saved to 2 different files, the Tribuo and Python loader uses the header\ndf.to_csv('/Users/gstewart/temp/development/mscs/cpsc69700/RefImpl/HDBSCAN_Star/basic-gaussians.csv', index=False, header=False)\ndf.to_csv('../../data/basic-gaussians.csv', index=False, header=True)\n\n# the data is also used for comparing prediction results\ndf[:1980].to_csv('../../data/basic-gaussians-train.csv', index=False, header=True)\ndf[1980:].to_csv('../../data/basic-gaussians-predict.csv', index=False, header=True)\n",
"_____no_output_____"
],
[
"print(y.tolist())",
"[2, 3, 1, 3, 0, 2, 1, 1, 0, 2, 1, 0, 3, 0, 2, 0, 1, 3, 3, 3, 3, 1, 0, 3, 3, 3, 3, 3, 1, 0, 2, 2, 1, 3, 2, 3, 0, 2, 0, 1, 2, 0, 0, 3, 2, 2, 2, 2, 2, 2, 3, 0, 1, 3, 1, 3, 2, 3, 1, 3, 1, 1, 1, 1, 3, 3, 1, 3, 1, 1, 0, 3, 2, 2, 1, 0, 2, 0, 3, 2, 0, 0, 3, 1, 2, 1, 0, 0, 2, 2, 1, 3, 1, 1, 0, 3, 2, 2, 3, 3, 1, 1, 0, 1, 3, 3, 1, 3, 2, 2, 1, 2, 0, 2, 2, 1, 1, 2, 3, 1, 3, 2, 2, 0, 0, 2, 1, 1, 2, 0, 0, 0, 2, 0, 1, 2, 0, 0, 2, 1, 1, 3, 1, 1, 0, 2, 0, 2, 1, 3, 2, 0, 3, 2, 3, 0, 2, 1, 1, 1, 2, 2, 1, 1, 3, 0, 0, 3, 2, 0, 2, 3, 0, 2, 0, 2, 0, 3, 0, 2, 0, 0, 0, 0, 1, 1, 1, 0, 3, 1, 0, 3, 3, 2, 3, 0, 2, 1, 1, 1, 0, 1, 2, 3, 0, 0, 1, 0, 1, 3, 0, 2, 0, 1, 3, 3, 0, 1, 2, 2, 2, 0, 0, 3, 3, 2, 0, 1, 3, 1, 2, 0, 1, 0, 3, 0, 2, 2, 3, 2, 2, 1, 2, 0, 3, 1, 1, 3, 3, 0, 3, 3, 3, 2, 2, 0, 1, 2, 0, 2, 3, 1, 1, 3, 3, 3, 1, 3, 1, 3, 3, 3, 2, 3, 0, 1, 1, 1, 1, 1, 1, 3, 3, 2, 3, 0, 2, 3, 1, 3, 3, 0, 1, 1, 1, 2, 1, 2, 3, 2, 3, 2, 1, 1, 1, 2, 1, 0, 1, 3, 3, 1, 2, 1, 1, 1, 2, 0, 2, 0, 1, 3, 3, 2, 0, 3, 2, 2, 2, 1, 1, 2, 1, 3, 1, 3, 2, 3, 1, 2, 0, 2, 0, 2, 0, 1, 1, 3, 0, 1, 2, 0, 1, 1, 3, 1, 1, 2, 1, 2, 2, 3, 3, 3, 0, 2, 0, 2, 1, 1, 1, 1, 2, 1, 2, 0, 2, 0, 2, 2, 3, 2, 3, 0, 3, 2, 1, 3, 2, 3, 1, 3, 3, 3, 3, 0, 1, 3, 2, 1, 2, 2, 3, 0, 3, 1, 0, 1, 0, 3, 0, 0, 1, 0, 0, 0, 1, 0, 2, 0, 2, 3, 1, 2, 2, 3, 2, 1, 3, 0, 3, 3, 2, 1, 2, 2, 1, 3, 3, 0, 2, 0, 0, 3, 3, 2, 3, 3, 2, 0, 0, 0, 0, 3, 1, 1, 0, 1, 3, 0, 3, 3, 3, 2, 2, 2, 3, 2, 0, 0, 2, 3, 3, 1, 3, 3, 0, 3, 3, 3, 0, 2, 3, 3, 0, 1, 2, 3, 1, 1, 1, 3, 3, 0, 1, 0, 0, 1, 0, 2, 0, 1, 1, 1, 2, 2, 3, 3, 2, 1, 1, 3, 3, 2, 0, 2, 0, 1, 0, 1, 2, 2, 3, 2, 0, 2, 2, 2, 1, 2, 1, 0, 3, 3, 1, 1, 1, 0, 0, 1, 2, 1, 3, 1, 0, 2, 0, 0, 0, 1, 0, 1, 0, 3, 0, 1, 1, 3, 2, 1, 2, 0, 0, 0, 2, 0, 3, 2, 3, 2, 3, 1, 1, 1, 0, 0, 3, 1, 2, 0, 0, 2, 2, 3, 2, 3, 1, 2, 1, 3, 3, 3, 0, 0, 3, 2, 1, 1, 1, 0, 0, 3, 2, 0, 0, 3, 2, 2, 2, 3, 2, 0, 0, 1, 1, 1, 1, 3, 0, 3, 3, 3, 1, 2, 3, 2, 0, 0, 2, 3, 0, 0, 2, 3, 3, 1, 1, 1, 3, 2, 3, 3, 2, 2, 3, 3, 2, 2, 3, 0, 1, 2, 2, 3, 3, 0, 0, 2, 3, 3, 3, 1, 0, 1, 3, 3, 2, 2, 1, 3, 2, 1, 0, 3, 0, 0, 0, 3, 3, 3, 3, 2, 0, 1, 0, 1, 3, 0, 0, 0, 1, 3, 1, 1, 2, 3, 2, 1, 2, 2, 3, 2, 2, 0, 1, 0, 0, 0, 0, 1, 3, 2, 2, 1, 1, 2, 3, 1, 2, 2, 1, 0, 0, 1, 2, 1, 3, 1, 0, 3, 3, 1, 3, 2, 2, 2, 1, 1, 1, 2, 1, 0, 3, 0, 2, 0, 2, 0, 0, 1, 2, 2, 0, 1, 0, 2, 2, 0, 1, 2, 2, 0, 1, 3, 0, 1, 0, 1, 0, 2, 2, 3, 3, 0, 1, 0, 2, 1, 2, 0, 1, 3, 0, 0, 0, 0, 0, 0, 1, 1, 3, 2, 0, 3, 1, 1, 1, 3, 0, 0, 2, 0, 3, 3, 1, 3, 1, 3, 0, 0, 0, 0, 3, 3, 0, 2, 2, 3, 2, 1, 3, 2, 1, 1, 0, 2, 3, 3, 2, 0, 2, 3, 3, 0, 0, 2, 1, 2, 2, 3, 2, 1, 1, 2, 3, 1, 3, 0, 1, 1, 2, 1, 3, 3, 1, 0, 2, 0, 2, 1, 2, 0, 0, 1, 1, 1, 0, 1, 3, 1, 2, 0, 2, 1, 1, 1, 0, 3, 0, 2, 1, 2, 0, 3, 2, 2, 2, 1, 0, 3, 0, 2, 1, 0, 0, 1, 3, 3, 2, 3, 2, 3, 2, 0, 1, 3, 3, 1, 0, 3, 3, 0, 3, 1, 1, 1, 2, 1, 0, 1, 0, 1, 3, 1, 1, 1, 2, 1, 3, 1, 0, 2, 0, 3, 2, 1, 0, 0, 3, 0, 1, 0, 2, 2, 2, 2, 0, 1, 0, 0, 0, 2, 0, 2, 0, 0, 3, 2, 1, 0, 3, 3, 0, 0, 1, 0, 3, 0, 2, 1, 0, 2, 2, 3, 3, 2, 0, 0, 1, 2, 2, 2, 2, 1, 2, 1, 3, 2, 1, 0, 0, 1, 0, 2, 2, 2, 0, 3, 2, 1, 2, 0, 1, 0, 3, 3, 2, 3, 2, 0, 3, 3, 3, 0, 3, 1, 2, 0, 2, 2, 1, 3, 3, 1, 1, 2, 3, 1, 0, 1, 3, 0, 0, 3, 3, 2, 2, 0, 0, 2, 0, 1, 3, 0, 3, 3, 2, 0, 0, 2, 0, 1, 1, 0, 2, 3, 1, 2, 1, 0, 2, 3, 0, 1, 2, 3, 2, 2, 0, 3, 0, 2, 2, 2, 0, 3, 0, 2, 1, 0, 0, 0, 0, 3, 0, 3, 1, 3, 2, 3, 1, 2, 2, 2, 0, 3, 3, 3, 1, 3, 1, 3, 1, 0, 1, 2, 0, 3, 3, 3, 0, 1, 0, 0, 1, 2, 0, 2, 1, 3, 2, 3, 3, 3, 2, 3, 3, 0, 1, 0, 3, 2, 2, 0, 3, 3, 0, 1, 3, 1, 3, 2, 1, 3, 1, 0, 0, 0, 3, 1, 3, 2, 2, 1, 0, 2, 3, 0, 0, 2, 1, 2, 1, 2, 1, 1, 0, 2, 3, 0, 3, 2, 1, 2, 0, 0, 1, 0, 0, 1, 3, 2, 0, 2, 0, 3, 2, 0, 2, 1, 0, 3, 2, 0, 1, 0, 0, 0, 1, 1, 0, 3, 3, 2, 1, 2, 3, 1, 1, 3, 0, 3, 2, 2, 3, 1, 3, 0, 2, 2, 3, 1, 0, 1, 1, 1, 2, 2, 2, 3, 2, 2, 2, 3, 2, 0, 3, 0, 1, 3, 3, 2, 1, 2, 2, 2, 3, 2, 0, 1, 1, 3, 2, 0, 3, 2, 2, 3, 0, 1, 3, 2, 1, 1, 3, 3, 1, 1, 0, 2, 1, 1, 3, 0, 0, 3, 2, 3, 2, 2, 3, 3, 2, 3, 3, 2, 2, 2, 3, 3, 1, 2, 3, 1, 1, 2, 0, 3, 2, 3, 1, 1, 1, 2, 1, 3, 0, 3, 3, 2, 3, 2, 0, 2, 3, 1, 3, 2, 1, 2, 3, 0, 0, 0, 1, 2, 0, 3, 2, 0, 3, 1, 1, 3, 2, 0, 2, 0, 1, 2, 0, 0, 3, 3, 1, 1, 2, 0, 1, 3, 0, 3, 3, 0, 3, 2, 3, 1, 0, 0, 1, 0, 2, 1, 1, 3, 2, 1, 0, 0, 0, 2, 1, 1, 1, 0, 0, 1, 0, 2, 2, 1, 0, 2, 3, 3, 1, 0, 3, 3, 1, 3, 3, 2, 1, 0, 3, 1, 1, 1, 2, 0, 0, 0, 1, 1, 0, 3, 1, 3, 0, 1, 2, 1, 2, 3, 0, 2, 3, 1, 3, 2, 2, 0, 1, 2, 0, 0, 0, 1, 3, 2, 3, 3, 0, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 3, 1, 0, 3, 2, 0, 3, 3, 3, 3, 0, 3, 0, 1, 2, 0, 2, 1, 3, 3, 0, 0, 0, 3, 2, 3, 3, 2, 3, 0, 3, 0, 1, 2, 2, 3, 1, 2, 2, 2, 3, 0, 3, 3, 1, 2, 2, 0, 3, 3, 2, 0, 2, 2, 1, 0, 3, 0, 1, 3, 0, 0, 3, 0, 0, 0, 3, 1, 0, 2, 0, 3, 1, 3, 0, 1, 2, 3, 0, 0, 0, 3, 1, 3, 0, 3, 2, 0, 2, 2, 0, 3, 1, 0, 0, 0, 0, 1, 3, 1, 3, 3, 1, 3, 0, 0, 1, 0, 3, 1, 1, 0, 3, 3, 3, 1, 0, 0, 0, 2, 0, 0, 3, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 2, 2, 2, 1, 2, 3, 1, 2, 0, 3, 2, 2, 0, 2, 1, 2, 0, 0, 0, 0, 2, 3, 3, 2, 3, 1, 2, 1, 0, 1, 1, 2, 3, 1, 1, 2, 0, 1, 0, 3, 3, 1, 2, 1, 3, 3, 1, 2, 2, 3, 0, 3, 0, 3, 0, 2, 0, 2, 0, 3, 1, 1, 3, 3, 2, 0, 3, 3, 0, 0, 3, 2, 0, 2, 0, 3, 2, 2, 0, 3, 3, 3, 2, 2, 1, 2, 1, 3, 0, 2, 3, 1, 0, 1, 2, 2, 1, 0, 0, 2, 0, 1, 2, 0, 2, 2, 1, 0, 0, 0, 3, 1, 1, 0, 2, 2, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 1, 1, 0, 0, 0, 2, 3, 0, 2, 0, 1, 1, 2, 1, 1, 1, 0, 1, 3, 2, 0, 3, 1, 3, 3, 2, 0, 1, 2, 0, 0, 0, 1, 1, 3, 0, 3, 3, 2, 3, 1, 1, 0, 1, 3, 0, 0, 2, 1, 2, 3, 3, 1, 0, 1, 1, 3, 1, 1, 1, 1, 1, 2, 0, 2, 3, 1, 3, 1, 1, 2, 2, 2, 0, 3, 3, 0, 3, 2, 3, 2, 0, 1, 3, 1, 1, 1, 2, 2, 2, 2, 3, 3, 0, 1, 2, 3, 2, 3, 2, 1, 0, 3, 0, 1, 3, 2, 2, 2, 1, 2, 2, 0, 1, 3, 2, 3, 2, 3, 3, 2, 0, 2, 3, 2, 1, 1, 1, 2, 3, 3, 0, 2, 1, 1, 2, 0, 2, 1, 1, 1, 1, 0, 2, 3, 3, 1, 3, 3, 0, 3, 2, 0, 0, 0, 1, 2, 1, 1, 0, 1, 1, 2, 3, 3, 0, 3, 2, 3, 3, 0, 1, 1, 2, 2, 2, 3, 1, 2, 1, 1, 1, 0, 0, 3, 0, 2, 2, 1, 0, 1, 3, 2, 2, 0, 1, 3, 0, 2, 0, 1, 0, 0, 0, 3, 1, 1, 0, 0, 3, 0, 0, 3, 2, 1, 3, 1, 1, 3, 2, 1, 0, 0, 0, 2, 2, 2, 1, 3, 2, 2, 3, 2, 2, 0, 3, 2, 3, 0, 0, 1, 3, 1, 2, 1, 2, 3, 2, 0, 1, 1, 2, 0, 1, 1, 1, 2, 3, 3, 2, 1, 1, 2, 2, 1, 0, 1, 2, 2, 2, 1, 0, 2, 2, 1, 1, 3, 2, 0, 0, 2, 3, 1, 1, 0, 2, 2, 2, 2, 1, 1, 3, 1, 0, 3, 1]\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9b9c479b17ddc94c53dd4d971beb833455388d
| 34,546 |
ipynb
|
Jupyter Notebook
|
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__ETKThorn__ID_converter_ILGRMHD.ipynb
|
kazewong/nrpytutorial
|
cc511325f37f01284b2b83584beb2a452556b3fb
|
[
"BSD-2-Clause"
] | null | null | null |
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__ETKThorn__ID_converter_ILGRMHD.ipynb
|
kazewong/nrpytutorial
|
cc511325f37f01284b2b83584beb2a452556b3fb
|
[
"BSD-2-Clause"
] | null | null | null |
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__ETKThorn__ID_converter_ILGRMHD.ipynb
|
kazewong/nrpytutorial
|
cc511325f37f01284b2b83584beb2a452556b3fb
|
[
"BSD-2-Clause"
] | null | null | null | 43.673831 | 354 | 0.554275 |
[
[
[
"<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# Tutorial-IllinoisGRMHD: ID_converter_ILGRMHD ETKThorn\n\n## Authors: Leo Werneck & Zach Etienne\n\n<font color='red'>**This module is currently under development**</font>\n\n## In this tutorial module we generate the ID_converter_ILGRMHD ETK thorn files, compatible with our latest implementation of IllinoisGRMHD\n\n### Required and recommended citations:\n\n* **(Required)** Etienne, Z. B., Paschalidis, V., Haas R., Mösta P., and Shapiro, S. L. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)).\n* **(Required)** Noble, S. C., Gammie, C. F., McKinney, J. C., Del Zanna, L. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)).\n* **(Recommended)** Del Zanna, L., Bucciantini N., Londrillo, P. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)).",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis module is organized as follows\n\n0. [Step 0](#src_dir): **Source directory creation**\n1. [Step 1](#introduction): **Introduction**\n1. [Step 2](#convert_to_hydrobase__src): **`set_IllinoisGRMHD_metric_GRMHD_variables_based_on_HydroBase_and_ADMBase_variables.C`**\n1. [Step 3](#convert_to_hydrobase__param): **`param.ccl`**\n1. [Step 4](#convert_to_hydrobase__interface): **`interface.ccl`**\n1. [Step 5](#convert_to_hydrobase__schedule): **`schedule.ccl`**\n1. [Step 6](#convert_to_hydrobase__make): **`make.code.defn`**\n1. [Step n-1](#code_validation): **Code validation**\n1. [Step n](#latex_pdf_output): **Output this notebook to $\\LaTeX$-formatted PDF file**",
"_____no_output_____"
],
[
"<a id='src_dir'></a>\n\n# Step 0: Source directory creation \\[Back to [top](#toc)\\]\n$$\\label{src_dir}$$\n\nWe will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet.",
"_____no_output_____"
]
],
[
[
"# Step 0: Creation of the IllinoisGRMHD source directory\n# Step 0a: Load up cmdline_helper and create the directory\nimport os,sys\nnrpy_dir_path = os.path.join(\"..\",\"..\")\nif nrpy_dir_path not in sys.path:\n sys.path.append(nrpy_dir_path)\n\nimport cmdline_helper as cmd\nIDcIGM_dir_path = os.path.join(\"..\",\"ID_converter_ILGRMHD\")\ncmd.mkdir(IDcIGM_dir_path)\nIDcIGM_src_dir_path = os.path.join(IDcIGM_dir_path,\"src\")\ncmd.mkdir(IDcIGM_src_dir_path)\n\n# Step 0b: Create the output file path \noutfile_path__ID_converter_ILGRMHD__source = os.path.join(IDcIGM_src_dir_path,\"set_IllinoisGRMHD_metric_GRMHD_variables_based_on_HydroBase_and_ADMBase_variables.C\")\noutfile_path__ID_converter_ILGRMHD__make = os.path.join(IDcIGM_src_dir_path,\"make.code.defn\")\noutfile_path__ID_converter_ILGRMHD__param = os.path.join(IDcIGM_dir_path,\"param.ccl\")\noutfile_path__ID_converter_ILGRMHD__interface = os.path.join(IDcIGM_dir_path,\"interface.ccl\")\noutfile_path__ID_converter_ILGRMHD__schedule = os.path.join(IDcIGM_dir_path,\"schedule.ccl\")",
"_____no_output_____"
]
],
[
[
"<a id='introduction'></a>\n\n# Step 1: Introduction \\[Back to [top](#toc)\\]\n$$\\label{introduction}$$",
"_____no_output_____"
],
[
"<a id='convert_to_hydrobase__src'></a>\n\n# Step 2: `set_IllinoisGRMHD_metric_GRMHD_variables _based_on_HydroBase_and_ADMBase_variables.C` \\[Back to [top](#toc)\\]\n$$\\label{convert_to_hydrobase__src}$$",
"_____no_output_____"
]
],
[
[
"%%writefile $outfile_path__ID_converter_ILGRMHD__source\n/********************************\n * CONVERT ET ID TO IllinoisGRMHD\n * \n * Written in 2014 by Zachariah B. Etienne\n *\n * Sets metric & MHD variables needed \n * by IllinoisGRMHD, converting from\n * HydroBase and ADMBase.\n ********************************/\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <math.h>\n#include <sys/time.h>\n#include \"cctk.h\"\n#include \"cctk_Parameters.h\"\n#include \"cctk_Arguments.h\"\n#include \"IllinoisGRMHD_headers.h\"\n\nextern \"C\" void set_IllinoisGRMHD_metric_GRMHD_variables_based_on_HydroBase_and_ADMBase_variables(CCTK_ARGUMENTS) {\n\n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n\n if(rho_b_atm > 1e199) {\n CCTK_VError(VERR_DEF_PARAMS, \"You MUST set rho_b_atm to some reasonable value in your param.ccl file.\\n\");\n }\n\n // Convert ADM variables (from ADMBase) to the BSSN-based variables expected by this routine.\n IllinoisGRMHD_convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij(cctkGH,cctk_lsh, gxx,gxy,gxz,gyy,gyz,gzz,alp,\n gtxx,gtxy,gtxz,gtyy,gtyz,gtzz,\n gtupxx,gtupxy,gtupxz,gtupyy,gtupyz,gtupzz,\n phi_bssn,psi_bssn,lapm1);\n\n /***************\n * PPEOS Patch *\n ***************/\n eos_struct eos;\n initialize_EOS_struct_from_input(eos);\n \n if(pure_hydro_run) {\n#pragma omp parallel for\n for(int k=0;k<cctk_lsh[2];k++) for(int j=0;j<cctk_lsh[1];j++) for(int i=0;i<cctk_lsh[0];i++) {\n int index=CCTK_GFINDEX3D(cctkGH,i,j,k);\n Avec[CCTK_GFINDEX4D(cctkGH,i,j,k,0)]=0;\n Avec[CCTK_GFINDEX4D(cctkGH,i,j,k,1)]=0;\n Avec[CCTK_GFINDEX4D(cctkGH,i,j,k,2)]=0;\n Aphi[index]=0;\n }\n }\n\n#pragma omp parallel for\n for(int k=0;k<cctk_lsh[2];k++) for(int j=0;j<cctk_lsh[1];j++) for(int i=0;i<cctk_lsh[0];i++) {\n int index=CCTK_GFINDEX3D(cctkGH,i,j,k);\n\n rho_b[index] = rho[index];\n P[index] = press[index];\n\n /***************\n * PPEOS Patch *\n ***************\n * We now verify that the initial data\n * provided by the user is indeed \"cold\",\n * i.e. it contains no Thermal part and\n * P = P_cold.\n */\n /* Compute P_cold */\n const int polytropic_index = find_polytropic_K_and_Gamma_index(eos, rho_b[index]);\n const double K_poly = eos.K_ppoly_tab[polytropic_index];\n const double Gamma_poly = eos.Gamma_ppoly_tab[polytropic_index];\n const double P_cold = K_poly*pow(rho_b[index],Gamma_poly);\n\n /* Compare P and P_cold */\n double P_rel_error = fabs(P[index] - P_cold)/P[index];\n if( rho_b[index] > rho_b_atm && P_rel_error > 1e-2 ) {\n const double Gamma_poly_local = log(P[index]/K_poly) / log(rho_b[index]);\n /* Determine the value of Gamma_poly_local associated with P[index] */\n CCTK_VWarn(CCTK_WARN_ALERT, __LINE__, __FILE__, CCTK_THORNSTRING,\n\"Expected a PP EOS with local Gamma_poly = %.15e, but found a point such that Gamma_poly_local = %.15e.\\n\",\n Gamma_poly, Gamma_poly_local);\n CCTK_VWarn(CCTK_WARN_ALERT, __LINE__, __FILE__, CCTK_THORNSTRING,\n\"{rho_b; rho_b_atm; P; P_cold; P_rel_Error} = %.10e %e %.10e %.10e %e\\n\",\n rho_b[index], rho_b_atm, P[index],P_cold,P_rel_error);\n }\n\n Ax[index] = Avec[CCTK_GFINDEX4D(cctkGH,i,j,k,0)];\n Ay[index] = Avec[CCTK_GFINDEX4D(cctkGH,i,j,k,1)];\n Az[index] = Avec[CCTK_GFINDEX4D(cctkGH,i,j,k,2)];\n psi6phi[index] = Aphi[index];\n\t\n double ETvx = vel[CCTK_GFINDEX4D(cctkGH,i,j,k,0)];\n double ETvy = vel[CCTK_GFINDEX4D(cctkGH,i,j,k,1)];\n double ETvz = vel[CCTK_GFINDEX4D(cctkGH,i,j,k,2)];\n\n // IllinoisGRMHD defines v^i = u^i/u^0.\n\t\n // Meanwhile, the ET/HydroBase formalism, called the Valencia \n // formalism, splits the 4 velocity into a purely spatial part\n // and a part that is normal to the spatial hypersurface:\n // u^a = G (n^a + U^a), (Eq. 14 of arXiv:1304.5544; G=W, U^a=v^a)\n // where n^a is the unit normal vector to the spatial hypersurface,\n // n_a = {-\\alpha,0,0,0}, and U^a is the purely spatial part, which\n // is defined in HydroBase as the vel[] vector gridfunction.\n // Then u^a n_a = - \\alpha u^0 = G n^a n_a = -G, and\n // of course \\alpha u^0 = 1/sqrt(1+γ^ij u_i u_j) = \\Gamma,\n // the standard Lorentz factor.\n\n // Note that n^i = - \\beta^i / \\alpha, so \n // u^a = \\Gamma (n^a + U^a) \n // -> u^i = \\Gamma ( U^i - \\beta^i / \\alpha )\n // which implies\n // v^i = u^i/u^0\n // = \\Gamma/u^0 ( U^i - \\beta^i / \\alpha ) <- \\Gamma = \\alpha u^0\n // = \\alpha ( U^i - \\beta^i / \\alpha )\n // = \\alpha U^i - \\beta^i\n\n vx[index] = alp[index]*ETvx - betax[index];\n vy[index] = alp[index]*ETvy - betay[index];\n vz[index] = alp[index]*ETvz - betaz[index];\n\n }\n\n // Neat feature for debugging: Add a roundoff-error perturbation\n // to the initial data.\n // Set random_pert variable to ~1e-14 for a random 15th digit\n // perturbation.\n srand(random_seed); // Use srand() as rand() is thread-safe.\n for(int k=0;k<cctk_lsh[2];k++)\n for(int j=0;j<cctk_lsh[1];j++)\n for(int i=0;i<cctk_lsh[0];i++) {\n int index=CCTK_GFINDEX3D(cctkGH,i,j,k);\n double pert = (random_pert*(double)rand() / RAND_MAX);\n double one_plus_pert=(1.0+pert);\n rho[index]*=one_plus_pert;\n vx[index]*=one_plus_pert;\n vy[index]*=one_plus_pert;\n vz[index]*=one_plus_pert;\n\n psi6phi[index]*=one_plus_pert;\n Ax[index]*=one_plus_pert;\n Ay[index]*=one_plus_pert;\n Az[index]*=one_plus_pert;\n }\n\n // Next compute B & B_stagger from A_i. Note that this routine also depends on\n // the psi_bssn[] gridfunction being set to exp(phi).\n\n double dxi = 1.0/CCTK_DELTA_SPACE(0);\n double dyi = 1.0/CCTK_DELTA_SPACE(1);\n double dzi = 1.0/CCTK_DELTA_SPACE(2); \n\n#pragma omp parallel for\n for(int k=0;k<cctk_lsh[2];k++)\n for(int j=0;j<cctk_lsh[1];j++)\n for(int i=0;i<cctk_lsh[0];i++) {\n // Look Mom, no if() statements!\n int shiftedim1 = (i-1)*(i!=0); // This way, i=0 yields shiftedim1=0 and shiftedi=1, used below for our COPY boundary condition.\n int shiftedi = shiftedim1+1;\n\n int shiftedjm1 = (j-1)*(j!=0);\n int shiftedj = shiftedjm1+1;\n\n int shiftedkm1 = (k-1)*(k!=0);\n int shiftedk = shiftedkm1+1;\n\n int index,indexim1,indexjm1,indexkm1;\n\n int actual_index = CCTK_GFINDEX3D(cctkGH,i,j,k);\n\n double Psi = psi_bssn[actual_index];\n double Psim3 = 1.0/(Psi*Psi*Psi);\n\n // For the lower boundaries, the following applies a \"copy\" \n // boundary condition on Bi_stagger where needed.\n // E.g., Bx_stagger(i,jmin,k) = Bx_stagger(i,jmin+1,k)\n // We find the copy BC works better than extrapolation.\n // For the upper boundaries, we do the following copy:\n // E.g., Psi(imax+1,j,k)=Psi(imax,j,k)\n /**************/\n /* Bx_stagger */\n /**************/\n\n index = CCTK_GFINDEX3D(cctkGH,i,shiftedj,shiftedk);\n indexjm1 = CCTK_GFINDEX3D(cctkGH,i,shiftedjm1,shiftedk);\n indexkm1 = CCTK_GFINDEX3D(cctkGH,i,shiftedj,shiftedkm1);\n // Set Bx_stagger = \\partial_y A_z - partial_z A_y\n // \"Grid\" Ax(i,j,k) is actually Ax(i,j+1/2,k+1/2)\n // \"Grid\" Ay(i,j,k) is actually Ay(i+1/2,j,k+1/2)\n // \"Grid\" Az(i,j,k) is actually Ay(i+1/2,j+1/2,k)\n // Therefore, the 2nd order derivative \\partial_z A_y at (i+1/2,j,k) is:\n // [\"Grid\" Ay(i,j,k) - \"Grid\" Ay(i,j,k-1)]/dZ\n Bx_stagger[actual_index] = (Az[index]-Az[indexjm1])*dyi - (Ay[index]-Ay[indexkm1])*dzi;\n\n // Now multiply Bx and Bx_stagger by 1/sqrt(gamma(i+1/2,j,k)]) = 1/sqrt(1/2 [gamma + gamma_ip1]) = exp(-6 x 1/2 [phi + phi_ip1] )\n int imax_minus_i = (cctk_lsh[0]-1)-i;\n int indexip1jk = CCTK_GFINDEX3D(cctkGH,i + ( (imax_minus_i > 0) - (0 > imax_minus_i) ),j,k);\n double Psi_ip1 = psi_bssn[indexip1jk];\n Bx_stagger[actual_index] *= Psim3/(Psi_ip1*Psi_ip1*Psi_ip1);\n\n /**************/\n /* By_stagger */\n /**************/\n\n index = CCTK_GFINDEX3D(cctkGH,shiftedi,j,shiftedk);\n indexim1 = CCTK_GFINDEX3D(cctkGH,shiftedim1,j,shiftedk);\n indexkm1 = CCTK_GFINDEX3D(cctkGH,shiftedi,j,shiftedkm1);\n // Set By_stagger = \\partial_z A_x - \\partial_x A_z\n By_stagger[actual_index] = (Ax[index]-Ax[indexkm1])*dzi - (Az[index]-Az[indexim1])*dxi;\n\n // Now multiply By and By_stagger by 1/sqrt(gamma(i,j+1/2,k)]) = 1/sqrt(1/2 [gamma + gamma_jp1]) = exp(-6 x 1/2 [phi + phi_jp1] )\n int jmax_minus_j = (cctk_lsh[1]-1)-j;\n int indexijp1k = CCTK_GFINDEX3D(cctkGH,i,j + ( (jmax_minus_j > 0) - (0 > jmax_minus_j) ),k);\n double Psi_jp1 = psi_bssn[indexijp1k];\n By_stagger[actual_index] *= Psim3/(Psi_jp1*Psi_jp1*Psi_jp1);\n\n\n /**************/\n /* Bz_stagger */\n /**************/\n\n index = CCTK_GFINDEX3D(cctkGH,shiftedi,shiftedj,k);\n indexim1 = CCTK_GFINDEX3D(cctkGH,shiftedim1,shiftedj,k);\n indexjm1 = CCTK_GFINDEX3D(cctkGH,shiftedi,shiftedjm1,k);\n // Set Bz_stagger = \\partial_x A_y - \\partial_y A_x\n Bz_stagger[actual_index] = (Ay[index]-Ay[indexim1])*dxi - (Ax[index]-Ax[indexjm1])*dyi;\n\n // Now multiply Bz_stagger by 1/sqrt(gamma(i,j,k+1/2)]) = 1/sqrt(1/2 [gamma + gamma_kp1]) = exp(-6 x 1/2 [phi + phi_kp1] )\n int kmax_minus_k = (cctk_lsh[2]-1)-k;\n int indexijkp1 = CCTK_GFINDEX3D(cctkGH,i,j,k + ( (kmax_minus_k > 0) - (0 > kmax_minus_k) ));\n double Psi_kp1 = psi_bssn[indexijkp1];\n Bz_stagger[actual_index] *= Psim3/(Psi_kp1*Psi_kp1*Psi_kp1);\n\n }\n\n#pragma omp parallel for\n for(int k=0;k<cctk_lsh[2];k++)\n for(int j=0;j<cctk_lsh[1];j++)\n for(int i=0;i<cctk_lsh[0];i++) {\n // Look Mom, no if() statements!\n int shiftedim1 = (i-1)*(i!=0); // This way, i=0 yields shiftedim1=0 and shiftedi=1, used below for our COPY boundary condition.\n int shiftedi = shiftedim1+1;\n\n int shiftedjm1 = (j-1)*(j!=0);\n int shiftedj = shiftedjm1+1;\n\n int shiftedkm1 = (k-1)*(k!=0);\n int shiftedk = shiftedkm1+1;\n\n int index,indexim1,indexjm1,indexkm1;\n\n int actual_index = CCTK_GFINDEX3D(cctkGH,i,j,k);\n\n // For the lower boundaries, the following applies a \"copy\" \n // boundary condition on Bi and Bi_stagger where needed.\n // E.g., Bx(imin,j,k) = Bx(imin+1,j,k)\n // We find the copy BC works better than extrapolation.\n /******/\n /* Bx */\n /******/\n index = CCTK_GFINDEX3D(cctkGH,shiftedi,j,k);\n indexim1 = CCTK_GFINDEX3D(cctkGH,shiftedim1,j,k);\n // Set Bx = 0.5 ( Bx_stagger + Bx_stagger_im1 )\n // \"Grid\" Bx_stagger(i,j,k) is actually Bx_stagger(i+1/2,j,k)\n Bx[actual_index] = 0.5 * ( Bx_stagger[index] + Bx_stagger[indexim1] );\n\n /******/\n /* By */\n /******/\n index = CCTK_GFINDEX3D(cctkGH,i,shiftedj,k);\n indexjm1 = CCTK_GFINDEX3D(cctkGH,i,shiftedjm1,k);\n // Set By = 0.5 ( By_stagger + By_stagger_im1 )\n // \"Grid\" By_stagger(i,j,k) is actually By_stagger(i,j+1/2,k)\n By[actual_index] = 0.5 * ( By_stagger[index] + By_stagger[indexjm1] );\n\n /******/\n /* Bz */\n /******/\n index = CCTK_GFINDEX3D(cctkGH,i,j,shiftedk);\n indexkm1 = CCTK_GFINDEX3D(cctkGH,i,j,shiftedkm1);\n // Set Bz = 0.5 ( Bz_stagger + Bz_stagger_im1 )\n // \"Grid\" Bz_stagger(i,j,k) is actually Bz_stagger(i,j+1/2,k)\n Bz[actual_index] = 0.5 * ( Bz_stagger[index] + Bz_stagger[indexkm1] );\n }\n\n // Finally, enforce limits on primitives & compute conservative variables.\n#pragma omp parallel for\n for(int k=0;k<cctk_lsh[2];k++)\n for(int j=0;j<cctk_lsh[1];j++)\n for(int i=0;i<cctk_lsh[0];i++) {\n static const int zero_int=0;\n int index = CCTK_GFINDEX3D(cctkGH,i,j,k);\n\n int ww;\n\n double PRIMS[MAXNUMVARS];\n ww=0;\n PRIMS[ww] = rho_b[index]; ww++;\n PRIMS[ww] = P[index]; ww++;\n PRIMS[ww] = vx[index]; ww++;\n PRIMS[ww] = vy[index]; ww++;\n PRIMS[ww] = vz[index]; ww++;\n PRIMS[ww] = Bx[index]; ww++;\n PRIMS[ww] = By[index]; ww++;\n PRIMS[ww] = Bz[index]; ww++;\n\n double METRIC[NUMVARS_FOR_METRIC],dummy=0;\n ww=0;\n // FIXME: NECESSARY?\n //psi_bssn[index] = exp(phi[index]);\n METRIC[ww] = phi_bssn[index];ww++;\n METRIC[ww] = dummy; ww++; // Don't need to set psi.\n METRIC[ww] = gtxx[index]; ww++;\n METRIC[ww] = gtxy[index]; ww++;\n METRIC[ww] = gtxz[index]; ww++;\n METRIC[ww] = gtyy[index]; ww++;\n METRIC[ww] = gtyz[index]; ww++;\n METRIC[ww] = gtzz[index]; ww++;\n METRIC[ww] = lapm1[index]; ww++;\n METRIC[ww] = betax[index]; ww++;\n METRIC[ww] = betay[index]; ww++;\n METRIC[ww] = betaz[index]; ww++;\n METRIC[ww] = gtupxx[index]; ww++;\n METRIC[ww] = gtupyy[index]; ww++;\n METRIC[ww] = gtupzz[index]; ww++;\n METRIC[ww] = gtupxy[index]; ww++;\n METRIC[ww] = gtupxz[index]; ww++;\n METRIC[ww] = gtupyz[index]; ww++;\n\n double CONSERVS[NUM_CONSERVS] = {0,0,0,0,0};\n double g4dn[4][4];\n double g4up[4][4];\n double TUPMUNU[10],TDNMUNU[10];\n\n struct output_stats stats; stats.failure_checker=0;\n IllinoisGRMHD_enforce_limits_on_primitives_and_recompute_conservs(zero_int,PRIMS,stats,eos,\n METRIC,g4dn,g4up,TUPMUNU,TDNMUNU,CONSERVS);\n rho_b[index] = PRIMS[RHOB];\n P[index] = PRIMS[PRESSURE];\n vx[index] = PRIMS[VX];\n vy[index] = PRIMS[VY];\n vz[index] = PRIMS[VZ];\n\n rho_star[index] = CONSERVS[RHOSTAR];\n mhd_st_x[index] = CONSERVS[STILDEX];\n mhd_st_y[index] = CONSERVS[STILDEY];\n mhd_st_z[index] = CONSERVS[STILDEZ];\n tau[index] = CONSERVS[TAUENERGY];\n\n if(update_Tmunu) {\n ww=0;\n eTtt[index] = TDNMUNU[ww]; ww++;\n eTtx[index] = TDNMUNU[ww]; ww++;\n eTty[index] = TDNMUNU[ww]; ww++;\n eTtz[index] = TDNMUNU[ww]; ww++;\n eTxx[index] = TDNMUNU[ww]; ww++;\n eTxy[index] = TDNMUNU[ww]; ww++;\n eTxz[index] = TDNMUNU[ww]; ww++;\n eTyy[index] = TDNMUNU[ww]; ww++;\n eTyz[index] = TDNMUNU[ww]; ww++;\n eTzz[index] = TDNMUNU[ww];\n }\n }\n}\n\n",
"Overwriting ../ID_converter_ILGRMHD/src/set_IllinoisGRMHD_metric_GRMHD_variables_based_on_HydroBase_and_ADMBase_variables.C\n"
]
],
[
[
"<a id='convert_to_hydrobase__param'></a>\n\n# Step 3: `param.ccl` \\[Back to [top](#toc)\\]\n$$\\label{convert_to_hydrobase__param}$$",
"_____no_output_____"
]
],
[
[
"%%writefile $outfile_path__ID_converter_ILGRMHD__param\n# Parameter definitions for thorn ID_converter_ILGRMHD\n\nshares: IllinoisGRMHD\nUSES KEYWORD rho_b_max\nUSES KEYWORD rho_b_atm\nUSES KEYWORD tau_atm\nUSES KEYWORD neos\nUSES KEYWORD K_ppoly_tab0\nUSES KEYWORD rho_ppoly_tab_in[10]\nUSES KEYWORD Gamma_ppoly_tab_in[10]\nUSES KEYWORD Sym_Bz\nUSES KEYWORD GAMMA_SPEED_LIMIT\nUSES KEYWORD Psi6threshold\nUSES KEYWORD update_Tmunu\n\nprivate:\n\nINT random_seed \"Random seed for random, generally roundoff-level perturbation on initial data. Seeds srand(), and rand() is used for the RNG.\"\n{\n 0:99999999 :: \"Anything unsigned goes.\"\n} 0\n\nREAL random_pert \"Random perturbation atop data\"\n{\n *:* :: \"Anything goes.\"\n} 0\n\nBOOLEAN pure_hydro_run \"Set the vector potential and corresponding EM gauge quantity to zero\"\n{\n} \"no\"\n\n",
"Overwriting ../ID_converter_ILGRMHD/param.ccl\n"
]
],
[
[
"<a id='convert_to_hydrobase__interface'></a>\n\n# Step 4: `interface.ccl` \\[Back to [top](#toc)\\]\n$$\\label{convert_to_hydrobase__interface}$$",
"_____no_output_____"
]
],
[
[
"%%writefile $outfile_path__ID_converter_ILGRMHD__interface\n# Interface definition for thorn ID_converter_ILGRMHD\n\nimplements: ID_converter_ILGRMHD\n\ninherits: ADMBase, Boundary, SpaceMask, Tmunubase, HydroBase, grid, IllinoisGRMHD\n\nuses include header: IllinoisGRMHD_headers.h\nUSES INCLUDE: Symmetry.h\n\n",
"Overwriting ../ID_converter_ILGRMHD/interface.ccl\n"
]
],
[
[
"<a id='convert_to_hydrobase__schedule'></a>\n\n# Step 5: `schedule.ccl` \\[Back to [top](#toc)\\]\n$$\\label{convert_to_hydrobase__schedule}$$",
"_____no_output_____"
]
],
[
[
"%%writefile $outfile_path__ID_converter_ILGRMHD__schedule\n# Schedule definitions for thorn ID_converter_ILGRMHD\n\nschedule group IllinoisGRMHD_ID_Converter at CCTK_INITIAL after HydroBase_Initial before Convert_to_HydroBase\n{\n} \"Translate ET-generated, HydroBase-compatible initial data and convert into variables used by IllinoisGRMHD\"\n\nschedule set_IllinoisGRMHD_metric_GRMHD_variables_based_on_HydroBase_and_ADMBase_variables IN IllinoisGRMHD_ID_Converter as first_initialdata before TOV_Initial_Data\n{\n LANG: C\n OPTIONS: LOCAL\n # What the heck, let's synchronize everything!\n SYNC: IllinoisGRMHD::grmhd_primitives_Bi, IllinoisGRMHD::grmhd_primitives_Bi_stagger, IllinoisGRMHD::grmhd_primitives_allbutBi, IllinoisGRMHD::em_Ax,IllinoisGRMHD::em_Ay,IllinoisGRMHD::em_Az,IllinoisGRMHD::em_psi6phi,IllinoisGRMHD::grmhd_conservatives,IllinoisGRMHD::BSSN_quantities,ADMBase::metric,ADMBase::lapse,ADMBase::shift,ADMBase::curv\n} \"Convert HydroBase initial data (ID) to ID that IllinoisGRMHD can read.\"\n\nschedule IllinoisGRMHD_InitSymBound IN IllinoisGRMHD_ID_Converter as third_initialdata after second_initialdata\n{\n SYNC: IllinoisGRMHD::grmhd_conservatives,IllinoisGRMHD::em_Ax,IllinoisGRMHD::em_Ay,IllinoisGRMHD::em_Az,IllinoisGRMHD::em_psi6phi\n LANG: C\n} \"Schedule symmetries -- Actually just a placeholder function to ensure prolongation / processor syncs are done BEFORE the primitives solver.\"\n\nschedule IllinoisGRMHD_compute_B_and_Bstagger_from_A IN IllinoisGRMHD_ID_Converter as fourth_initialdata after third_initialdata\n{\n SYNC: IllinoisGRMHD::grmhd_primitives_Bi, IllinoisGRMHD::grmhd_primitives_Bi_stagger\n LANG: C\n} \"Compute B and B_stagger from A\"\n\nschedule IllinoisGRMHD_conserv_to_prims IN IllinoisGRMHD_ID_Converter as fifth_initialdata after fourth_initialdata\n{\n LANG: C\n} \"Compute primitive variables from conservatives. This is non-trivial, requiring a Newton-Raphson root-finder.\"\n\n",
"Overwriting ../ID_converter_ILGRMHD/schedule.ccl\n"
]
],
[
[
"<a id='convert_to_hydrobase__make'></a>\n\n# Step 6: `make.code.defn` \\[Back to [top](#toc)\\]\n$$\\label{convert_to_hydrobase__make}$$",
"_____no_output_____"
]
],
[
[
"%%writefile $outfile_path__ID_converter_ILGRMHD__make\n# Main make.code.defn file for thorn ID_converter_ILGRMHD\n\n# Source files in this directory\nSRCS = set_IllinoisGRMHD_metric_GRMHD_variables_based_on_HydroBase_and_ADMBase_variables.C\n\n",
"Overwriting ../ID_converter_ILGRMHD/src/make.code.defn\n"
]
],
[
[
"<a id='code_validation'></a>\n\n# Step n-1: Code validation \\[Back to [top](#toc)\\]\n$$\\label{code_validation}$$\n\nFirst we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook.",
"_____no_output_____"
]
],
[
[
"# # Verify if the code generated by this tutorial module\n# # matches the original IllinoisGRMHD source code\n\n# # First download the original IllinoisGRMHD source code\n# import urllib\n# from os import path\n\n# original_IGM_file_url = \"https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/A_i_rhs_no_gauge_terms.C\"\n# original_IGM_file_name = \"A_i_rhs_no_gauge_terms-original.C\"\n# original_IGM_file_path = os.path.join(IGM_src_dir_path,original_IGM_file_name)\n\n# # Then download the original IllinoisGRMHD source code\n# # We try it here in a couple of ways in an attempt to keep\n# # the code more portable\n# try:\n# original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read().decode(\"utf-8\")\n# # Write down the file the original IllinoisGRMHD source code\n# with open(original_IGM_file_path,\"w\") as file:\n# file.write(original_IGM_file_code)\n# except:\n# try:\n# original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read().decode(\"utf-8\")\n# # Write down the file the original IllinoisGRMHD source code\n# with open(original_IGM_file_path,\"w\") as file:\n# file.write(original_IGM_file_code)\n# except:\n# # If all else fails, hope wget does the job\n# !wget -O $original_IGM_file_path $original_IGM_file_url\n\n# # Perform validation\n# Validation__A_i_rhs_no_gauge_terms__C = !diff $original_IGM_file_path $outfile_path__A_i_rhs_no_gauge_terms__C\n\n# if Validation__A_i_rhs_no_gauge_terms__C == []:\n# # If the validation passes, we do not need to store the original IGM source code file\n# !rm $original_IGM_file_path\n# print(\"Validation test for A_i_rhs_no_gauge_terms.C: PASSED!\")\n# else:\n# # If the validation fails, we keep the original IGM source code file\n# print(\"Validation test for A_i_rhs_no_gauge_terms.C: FAILED!\")\n# # We also print out the difference between the code generated\n# # in this tutorial module and the original IGM source code\n# print(\"Diff:\")\n# for diff_line in Validation__A_i_rhs_no_gauge_terms__C:\n# print(diff_line)",
"_____no_output_____"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step n: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-IllinoisGRMHD__A_i_rhs_no_gauge_terms.pdf](Tutorial-IllinoisGRMHD__A_i_rhs_no_gauge_terms.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means).",
"_____no_output_____"
]
],
[
[
"latex_nrpy_style_path = os.path.join(nrpy_dir_path,\"latex_nrpy_style.tplx\")\n#!jupyter nbconvert --to latex --template $latex_nrpy_style_path Tutorial-IllinoisGRMHD__A_i_rhs_no_gauge_terms.ipynb\n#!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__A_i_rhs_no_gauge_terms.tex\n#!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__A_i_rhs_no_gauge_terms.tex\n#!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__A_i_rhs_no_gauge_terms.tex\n!rm -f Tut*.out Tut*.aux Tut*.log",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9ba39f3ef1452da2a8591a4fec61a79a69e82b
| 1,128 |
ipynb
|
Jupyter Notebook
|
docs/intro/test.ipynb
|
fedem-p/my_documentation_template
|
2be50dd3e0cb6bfe84f2b3119119782050296489
|
[
"MIT"
] | 1 |
2021-03-03T23:16:47.000Z
|
2021-03-03T23:16:47.000Z
|
docs/intro/test.ipynb
|
fedem-p/my_documentation_template
|
2be50dd3e0cb6bfe84f2b3119119782050296489
|
[
"MIT"
] | 1 |
2021-03-10T16:38:06.000Z
|
2021-03-11T12:50:17.000Z
|
docs/intro/test.ipynb
|
fedem-p/my_documentation_template
|
2be50dd3e0cb6bfe84f2b3119119782050296489
|
[
"MIT"
] | null | null | null | 16.347826 | 34 | 0.485816 |
[
[
[
"# Test Notebook",
"_____no_output_____"
],
[
"a simple test",
"_____no_output_____"
]
],
[
[
"#with some code\nprint(\"ciao\")",
"ciao\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a9baa724b4df4982bc392fd6c96cc2af791e5f1
| 82,778 |
ipynb
|
Jupyter Notebook
|
thermodynamic_addressability/test.ipynb
|
j3ny/Analysis
|
16579af7d5763dc28055bafe484bd9cffd0505c5
|
[
"MIT"
] | null | null | null |
thermodynamic_addressability/test.ipynb
|
j3ny/Analysis
|
16579af7d5763dc28055bafe484bd9cffd0505c5
|
[
"MIT"
] | null | null | null |
thermodynamic_addressability/test.ipynb
|
j3ny/Analysis
|
16579af7d5763dc28055bafe484bd9cffd0505c5
|
[
"MIT"
] | null | null | null | 231.871148 | 33,724 | 0.893776 |
[
[
[
"import seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_style(\"whitegrid\", {\"font.family\": \"DejaVu Sans\"})\nsns.set(palette=\"pastel\", color_codes=True)\nsns.set_context(\"poster\")\n\n%matplotlib inline\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text', usetex=True)",
"_____no_output_____"
],
[
"path = 'data/'\nfilename_DB = 'DeBruijn_alpha.json'\nfilename_pUC19 = 'pUC19_alpha.json'\nfilename_M13 = 'M13_square.json'\nfilename_DB7k = 'DB_7k_square.json'\n\nDB_small = seaborn.ree\n\n#ids, sequences, energies\n#_, _, energies_DB = read_data(path + filename_DB)\n#_, _, energies_pUC19 = read_data(path + filename_pUC19)\n#_, _, energies_M13 = read_data(path + filename_M13)\n\n_, _, energies_DB_short = read_data(path + filename_DB, short=True)\n_, _, energies_pUC19_short = read_data(path + filename_pUC19, short=True)\n_, _, energies_M13_short = read_data(path + filename_M13, short=True)\n_, _, energies_DB7k_short = read_data(path + filename_DB7k, short=True)\n\n#DB_dist_2 = get_boltzmann_distribution(d[:2] for d in energies_DB_short)\n#pUC19_dist_2 = get_boltzmann_distribution(d[:2] for d in energies_pUC19_short)\n#M13_dist_2 = get_boltzmann_distribution(d[:2] for d in energies_M13_short)\n\n#DB_dist_10 = get_boltzmann_distribution(d[:10] for d in energies_DB_short)\n#pUC19_dist_10 = get_boltzmann_distribution(d[:10] for d in energies_pUC19_short)\n#M13_dist_10 = get_boltzmann_distribution(d[:10] for d in energies_M13_short)\n\n#DB_dist_100 = get_boltzmann_distribution(d[:100] for d in energies_DB_short)\n#pUC19_dist_100 = get_boltzmann_distribution(d[:100] for d in energies_pUC19_short)\n#M13_dist_100 = get_boltzmann_distribution(d[:100] for d in energies_M13_short)\n\nDB_dist_all = get_boltzmann_distribution(d for d in energies_DB_short)\npUC19_dist_all = get_boltzmann_distribution(d for d in energies_pUC19_short)\nM13_dist_all = get_boltzmann_distribution(d for d in energies_M13_short)\nDB7k_dist_all = get_boltzmann_distribution(d for d in energies_DB7k_short)\n\n#DB_dist = get_boltzmann_distribution(d[:100] for d in energies_DB_short)\n#pUC19_dist = get_boltzmann_distribution(d[:100] for d in energies_pUC19_short)\n#M13_dist = get_boltzmann_distribution(d[:100] for d in energies_M13_short)\n\n#DB_dist = get_boltzmann_distribution(energies_DB_short)\n#pUC19_dist = get_boltzmann_distribution(energies_pUC19_short)\n\n#dist = [d[0] for d in DB_dist]",
"_____no_output_____"
],
[
"def example_plot(ax, fontsize=12):\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)",
"_____no_output_____"
],
[
"def distribution_plot(ax, data_label, data, xlabel, ylabel, fontsize=15):\n bins = 20\n x = numpy.zeros(bins)\n for dist in data:\n i = int(dist[0]*bins)\n i = 0 if i < 0 else i\n i = bins-1 if i > bins-1 else i\n x[i] += 1\n for i in range(len(x)):\n x[i] = 1.0 * x[i] / len(data)\n index = numpy.arange(0, bins)\n ax.bar(index, x, bar_width, linewidth=0)\n ax.set_xticks(numpy.arange(0, bins+1))\n ax.set_xticklabels([('$' + str(i*0.05) + '$') if i % 2 == 0 else \"\" for i in range(0, bins+1)])\n #ax.tick_params(axis='both', which='major')\n \n ylimit = 0.2 if ('6.9' in data_label or 'M13' in data_label) else 0.3\n ax.set_xlim(0, bins)\n ax.set_ylim(0, ylimit)\n \n ax.set_xlabel(xlabel, fontsize=20)\n ax.set_ylabel(ylabel, fontsize=20)\n ax.set_title(data_label, fontsize=20)\n ax.legend()\n ",
"_____no_output_____"
],
[
"plt.close('all')\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\n\ndata_set = OrderedDict()\ndata_set['pUC19 (all)'] = pUC19_dist_all\ndata_set['DB (all)'] = DB_dist_all\ndata_set['M13 (all)'] = M13_dist_all\ndata_set['DB7k (all)'] = DB7k_dist_all\n\nxlabel = r'Specific binding probability'\nylabel = r'Fraction of staples'\n\ndistribution_plot(ax1, r'pUC19', pUC19_dist_all, '', ylabel)\ndistribution_plot(ax2, r'DB (2.4 knt)', DB_dist_all, '', '')\ndistribution_plot(ax3, r'M13', M13_dist_all, xlabel, ylabel)\ndistribution_plot(ax4, r'DBS (6.9 knt)', DB7k_dist_all, xlabel, '')\n\n#%matplotlib inline\n\nfig.set_size_inches(10, 10)\n\nplt.tight_layout()\nplt.savefig(\"/home/j3ny/repos/analysis/Analysis/thermodynamic_addressability/output/addressability_comparison.pdf\",format='pdf',dpi=600)\n#plt.savefig(\"/home/j3ny/repos/analysis/Analysis/thermodynamic_addressability/output/addressability_comparison_long.pdf\",format='pdf',dpi=600)\n",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=2, ncols=2)\n\nbar_width = 1.0\n\ndata_set = OrderedDict()\ndata_set['pUC19'] = pUC19_dist_all\ndata_set['DBS (2.4 knt)'] = DB_dist_all\ndata_set['M13'] = M13_dist_all\ndata_set['DBS (6.9 knt)'] = DB7k_dist_all\n\nplt.close('all')\nfig = plt.figure()\n\nfrom mpl_toolkits.axes_grid1 import Grid\ngrid = Grid(fig, rect=111, nrows_ncols=(2,2),\n axes_pad=0.4, label_mode='O',\n add_all = True,\n )\n\nfor ax, (data_label, data) in zip(grid, data_set.items()):\n xlabel = 'Specific binding probability' if ('6.9' in data_label or 'M13' in data_label) else ''\n ylabel = 'Fraction of staples'if ('pUC' in data_label or 'M13' in data_label) else ''\n distribution_plot(ax, data_label, data, xlabel, ylabel)\n\n#axes[0,0].set_title('pUC19')\n \n#grid[0].set_title('pUC19')\n#grid[0].set_ylabel('Fraction of staples', fontsize=15)\n#grid[1].set_title('DBS (2.4 knt)')\n\n#grid[2].set_title('M13')\n#grid[2].set_xlabel('Specific binding probability', fontsize=15)\n#grid[2].set_ylabel('Fraction of staples', fontsize=15)\n\n#grid[3].set_title('DBS (6.9 knt)')\n\n#axes[1].set_title('M13')\n#axes[2].set_title(r'$\\lambda$-phage')\n\n#fig.text(0.16, 0.92, 'pUC19', fontsize=15)\n#fig.text(0.6, 0.92, 'DBS (2.4 knt)', fontsize=15)\n\n#fig.text(0.16, 0.46, 'M13mp18', fontsize=15)\n#fig.text(0.6, 0.46, 'DBS (6.9 knt)', fontsize=15)\n\nfig.set_size_inches(6, 6)\n\nplt.tight_layout()\nplt.savefig(\"/home/j3ny/repos/analysis/Analysis/thermodynamic_addressability/output/addressability_comparison.pdf\",format='pdf',dpi=600)",
"_____no_output_____"
],
[
"#######################\n## OBSOLETE ##\n#######################\n\n#%matplotlib inline\n\nfig, axes = plt.subplots(nrows=2, ncols=2)\n\nbar_width = 1.0\n\ndata_set = OrderedDict()\ndata_set['pUC19 (all)'] = pUC19_dist_all\ndata_set['DB (all)'] = DB_dist_all\ndata_set['M13 (all)'] = M13_dist_all\ndata_set['DB7k (all)'] = DB7k_dist_all\n\nfor ax0, (data_label, data) in zip(axes.flat, data_set.items()):\n distribution_plot(ax0)\n\n#fig.text(0.19, 0.96, 'De Bruijn', ha='center')\nfig.text(0.3, 1, 'pUC19 (2.6 knt)', ha='center')\nfig.text(0.7, 1, 'DBS (2.4 knt)', ha='center')\n\nfig.text(0.5, 0.008, 'Specific binding probability', ha='center')\nfig.text(0.001, 0.5, 'Fraction of staples', va='center', rotation='vertical')\n\nfig.set_size_inches(7, 7)\n\nplt.tight_layout()\nplt.savefig(\"/home/j3ny/repos/analysis/Analysis/thermodynamic_addressability/output/addressability_comparison.pdf\",format='pdf',dpi=600)",
"_____no_output_____"
],
[
"## CONVERT DATA\n\npath = 'data/'\nfilename_DB = 'DeBruijn_alpha.json'\nfilename_pUC19 = 'pUC19_alpha.json'\nfilename_M13 = 'M13_square.json'\nfilename_DB7k = 'DB_7k_square.json'\n\nids, sequences, energies = read_data(path + filename_DB7k, short=True)\ndist_all = get_boltzmann_distribution(d for d in energies)\n\nwith open('data/DB_medium.csv', 'w') as out:\n for i in range(len(ids)):\n out.write(ids[i] + ',' + sequences[i] + ',')\n out.write('%.3f' % dist_all[i][0])\n out.write('\\n')\n #print idsi], sequences[i], energies_DB_short[i], DB_dist_all[i]",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9bb7e60eccb45fdebedae32e9ccd005eb7cfac
| 3,259 |
ipynb
|
Jupyter Notebook
|
TensorflowStudy/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
nutboy-cn/JupyterBook
|
839915c991e4cd3d56dee3987a7cf249e4db3d35
|
[
"Apache-2.0"
] | null | null | null |
TensorflowStudy/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
nutboy-cn/JupyterBook
|
839915c991e4cd3d56dee3987a7cf249e4db3d35
|
[
"Apache-2.0"
] | null | null | null |
TensorflowStudy/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
nutboy-cn/JupyterBook
|
839915c991e4cd3d56dee3987a7cf249e4db3d35
|
[
"Apache-2.0"
] | null | null | null | 24.503759 | 102 | 0.481743 |
[
[
[
"print (\"helloworld\")",
"helloworld\n"
],
[
"print(\"你好\")",
"你好\n"
],
[
"import tensorflow as tf\nmnist=tf.keras.datasets.mnist",
"_____no_output_____"
],
[
"(x_train,y_train),(x_test,y_test)=mnist.load_data()\nx_train,x_test=x_train/255.0,x_test/255.0\nmodel=tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28,28)),\n tf.keras.layers.Dense(128,activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10,activation='softmax')\n])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodel.fit(x_train,y_train,epochs=5)\nmodel.evaluate(x_test,y_test)",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 3s 0us/step\nEpoch 1/5\n1875/1875 [==============================] - 2s 1ms/step - loss: 0.2979 - accuracy: 0.9132\nEpoch 2/5\n1875/1875 [==============================] - 2s 1ms/step - loss: 0.1447 - accuracy: 0.9577\nEpoch 3/5\n1875/1875 [==============================] - 2s 1ms/step - loss: 0.1078 - accuracy: 0.9672\nEpoch 4/5\n1875/1875 [==============================] - 2s 1ms/step - loss: 0.0875 - accuracy: 0.9731\nEpoch 5/5\n1875/1875 [==============================] - 2s 1ms/step - loss: 0.0769 - accuracy: 0.9754\n313/313 [==============================] - 0s 905us/step - loss: 0.0757 - accuracy: 0.9775\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a9bba9f0e0053eb46966c9d43eea83e9b8ceb8e
| 375,789 |
ipynb
|
Jupyter Notebook
|
Case_01/.ipynb_checkpoints/EDA-checkpoint.ipynb
|
filipeaguiarrod/Case-Gol-Linhas-A-reas
|
a441251d09889a23ce1abb98c5294537552eea2c
|
[
"MIT"
] | null | null | null |
Case_01/.ipynb_checkpoints/EDA-checkpoint.ipynb
|
filipeaguiarrod/Case-Gol-Linhas-A-reas
|
a441251d09889a23ce1abb98c5294537552eea2c
|
[
"MIT"
] | null | null | null |
Case_01/.ipynb_checkpoints/EDA-checkpoint.ipynb
|
filipeaguiarrod/Case-Gol-Linhas-A-reas
|
a441251d09889a23ce1abb98c5294537552eea2c
|
[
"MIT"
] | null | null | null | 293.127145 | 163,540 | 0.909117 |
[
[
[
"### Bibliotecas Utilizadas:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
" - Faça um ranking para o número total de PAX por dia da semana. - - Power BI\n - Qual a correlação de sábado e domingo somados com o total de RPK? - ????\n - Qual a média de ‘Monetário’ por mês por Canal? E a mediana? - Power BI\n - Crie um forecast de PAX por ‘Local de Venda’ para os próximos 15 dias a contar da última data de venda. (Aqui a técnica é livre) - Série Temporal\n - Supondo que você precisa gerar um estudo para a área responsável, com base em qualquer modelo ou premissa, qual ‘Local de Venda’ você considera mais crítico. Por quê? - Power BI\n - Criar modelo relacionando o comporatamento de venda com variaveis não apresentada nos dados (Ex : PIB, Dolar, e etc) - Regressão Múltipla",
"_____no_output_____"
],
[
" Observações: \n \n - PAX é o total de passageiros. RPK é um indicador diretamente relacionada com o número de PAX.\n - Não se atenha às grandezas. Os dados são fictícios. 😉\n - Envie todo o material que produzir (códigos, tabelas e outros arquivos) com o detalhamento de cada um. Se possível, comente nos códigos.\n - Para a apresentação, use PowerPoint ou qualquer outra ferramenta de DataViz que julgar pertinente.\n",
"_____no_output_____"
],
[
"Pelo próprio excel o xlsx foi modificado apenas para estar com a aba dados e salvo em csv, UTF-8 delimitado por ;",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('data.csv',sep=';')",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2432 entries, 0 to 2431\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Data Venda 2432 non-null object\n 1 Canal de Venda 2432 non-null object\n 2 Local de Venda 2432 non-null object\n 3 PAX 2432 non-null int64 \n 4 Monetário Vendido 2432 non-null object\n 5 RPK 2432 non-null int64 \ndtypes: int64(2), object(4)\nmemory usage: 114.1+ KB\n"
],
[
"#Observando qualidade de preenchimento do dataset;\n\ndf.isnull().sum()",
"_____no_output_____"
]
],
[
[
"Organizando as variáveis e atribuindo seu tipo correto.",
"_____no_output_____"
]
],
[
[
"df[\"Data Venda\"] = pd.to_datetime(df[\"Data Venda\"])\n\ndf[['Canal de Venda','Local de Venda']] = df[['Canal de Venda','Local de Venda']].astype('category')\n\ndf[['PAX','RPK']] = df[['PAX','RPK']].astype('float')\n",
"_____no_output_____"
],
[
"df[\"Monetário Vendido\"] = df[\"Monetário Vendido\"].str.replace(\",\", \".\")\ndf[\"Monetário Vendido\"] = df[\"Monetário Vendido\"].astype('float')",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2432 entries, 0 to 2431\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Data Venda 2432 non-null datetime64[ns]\n 1 Canal de Venda 2432 non-null category \n 2 Local de Venda 2432 non-null category \n 3 PAX 2432 non-null float64 \n 4 Monetário Vendido 2432 non-null float64 \n 5 RPK 2432 non-null float64 \ndtypes: category(2), datetime64[ns](1), float64(3)\nmemory usage: 81.2 KB\n"
],
[
"df.isnull().sum()",
"_____no_output_____"
]
],
[
[
"#### Criando uma coluna que contenha os dia da semana;\n\n - Nova coluna contendo: Conversão de dia para dia da semana;",
"_____no_output_____"
]
],
[
[
"df['Dia Semana'] = df['Data Venda'].dt.dayofweek\ndf['Dia Semana'].sample(5)",
"_____no_output_____"
]
],
[
[
"De acordo com a documentação do dayofweek: \n \nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.dayofweek.html\n\n\"... Monday, which is denoted by 0 and ends on Sunday which is denoted by 6...\" ",
"_____no_output_____"
]
],
[
[
"df['Dia Semana Nome'] = df['Data Venda'].dt.day_name()\ndf['Dia Semana Nome'].sample(5)",
"_____no_output_____"
],
[
"df.groupby(['Dia Semana Nome'])['PAX'].agg('sum').sort_values(ascending=False)",
"_____no_output_____"
]
],
[
[
"## Repostas",
"_____no_output_____"
],
[
" - Faça um ranking para o número total de PAX por dia da semana.\n - Qual a correlação de sábado e domingo somados com o total de RPK?\n - Qual a média de ‘Monetário’ por mês por Canal? E a mediana?\n - Crie um forecast de PAX por ‘Local de Venda’ para os próximos 15 dias a contar da última data de venda. (Aqui a técnica é livre) \n - Supondo que você precisa gerar um estudo para a área responsável, com base em qualquer modelo ou premissa, qual ‘Local de Venda’ você considera mais crítico. Por quê?\n - Criar modelo relacionando o comporatamento de venda com variaveis não apresentada nos dados (Ex : PIB, Dolar, e etc) ",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
],
[
"df.sample(15)",
"_____no_output_____"
]
],
[
[
"#### Análise Exploratória dos Dados",
"_____no_output_____"
]
],
[
[
"sns.pairplot(df)",
"_____no_output_____"
]
],
[
[
"#### Modelo relacionando o comportamento de venda com variaveis não apresentada nos dados (Ex : PIB, Dolar, e etc) ",
"_____no_output_____"
],
[
"Criar modelo relacionando o comporatamento de venda com variaveis não apresentada nos dados (Ex : PIB, Dolar, e etc)\n\nBrainstorm de possíveis variáveis a avaliar:\n\nPIB,\nAumento população,\nDolar,\nEuro,\nAções,\nCondições Climáticas, ( Estações do Ano )\nDesemprego,\nIPCA,\nSelic,\nCDI",
"_____no_output_____"
],
[
"Para coleta de algumas variáveis é necessário supor uma vez que os dados são fictícios! \n\nEstações do Ano, Jet fuel, Dolar price. \ntive a liberardade de considerar que estamos falando de Brasil uma vez que Monetário Vendido estava inicialmente em R$",
"_____no_output_____"
]
],
[
[
"df=pd.read_csv('data_modified.csv',sep=';') # Foram adicionadas duas colunas com Jet fuel price e Taxa de Desemprego no Brasil",
"_____no_output_____"
]
],
[
[
"fonte - Desemprego: https://www.ibge.gov.br/estatisticas/sociais/trabalho/9173-pesquisa-nacional-por-amostra-de-domicilios-continua-trimestral.html?=&t=series-historicas&utm_source=landing&utm_medium=explica&utm_campaign=desemprego\n\nfonte - Fuel Jet Price: https://www.indexmundi.com/pt/pre%C3%A7os-de-mercado/?mercadoria=combust%c3%advel-de-jato&meses=60",
"_____no_output_____"
],
[
"Pensei em pegar os valores de dólar porém obtive muitos dados que seriam necessários grande tempo para sincronizar as datas, desta forma resolvi seguir assim mesmo",
"_____no_output_____"
]
],
[
[
"df.columns",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2432 entries, 0 to 2431\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Data Venda 2432 non-null object \n 1 Canal de Venda 2432 non-null object \n 2 Local de Venda 2432 non-null object \n 3 PAX 2432 non-null int64 \n 4 Monetário Vendido 2432 non-null object \n 5 RPK 2432 non-null int64 \n 6 Preço Jet Fuel 2432 non-null object \n 7 Taxa de Desemprego Brasil 2432 non-null float64\ndtypes: float64(1), int64(2), object(5)\nmemory usage: 152.1+ KB\n"
],
[
"df.isnull().sum()",
"_____no_output_____"
],
[
"#Mudando os Dtypes das variáveis:\n\ndf[\"Data Venda\"] = pd.to_datetime(df[\"Data Venda\"])\n\ndf[['Canal de Venda','Local de Venda']] = df[['Canal de Venda','Local de Venda']].astype('category')\n\ndf[['PAX','RPK']] = df[['PAX','RPK']].astype('float')",
"_____no_output_____"
],
[
"df[\"Monetário Vendido\"] = df[\"Monetário Vendido\"].str.replace(\",\", \".\")\ndf[\"Monetário Vendido\"] = df[\"Monetário Vendido\"].astype('float')",
"_____no_output_____"
],
[
"df['Preço Jet Fuel'] = df['Preço Jet Fuel'].str.replace(\",\", \".\")\ndf['Preço Jet Fuel'] = df['Preço Jet Fuel'].astype('float')",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2432 entries, 0 to 2431\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Data Venda 2432 non-null datetime64[ns]\n 1 Canal de Venda 2432 non-null category \n 2 Local de Venda 2432 non-null category \n 3 PAX 2432 non-null float64 \n 4 Monetário Vendido 2432 non-null float64 \n 5 RPK 2432 non-null float64 \n 6 Preço Jet Fuel 2432 non-null float64 \n 7 Taxa de Desemprego Brasil 2432 non-null float64 \ndtypes: category(2), datetime64[ns](1), float64(5)\nmemory usage: 119.2 KB\n"
],
[
"sns.pairplot(df)",
"_____no_output_____"
],
[
"sns.heatmap(df[['PAX',\n 'Monetário Vendido', 'RPK', 'Preço Jet Fuel',\n 'Taxa de Desemprego Brasil']].corr(),vmax=1,vmin=-1,annot=True)",
"_____no_output_____"
]
],
[
[
"### Regressão Linear com PAX x Monetário Vendido\n\n\n- RPK altamente relacionada com PAX, vai sair para que não haja problemas com a multicolineariedade;\n- Taxa Desmprego do Brasil e Prejo do Jet Fuel não influenciaram como pensei, \no jet fuel pensei que aumentaria o preço e teria um impacto negativo no monetário, porém é muito baixa a correlação\n- Seguindo com regressão linear simples PAX x Monetário Vendido\n",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(12.5,12.5))\nsns.scatterplot(data=df,x='PAX',y='Monetário Vendido')\n",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
],
[
"# Criação do modelo de regressão linear pelo statsmodels\n\nimport statsmodels.api as sm",
"_____no_output_____"
],
[
"X=df['PAX']\ny=df['Monetário Vendido']",
"_____no_output_____"
],
[
"spector_data = sm.datasets.spector.load(as_pandas=False)\n\nspector_data.exog = sm.add_constant(spector_data.exog, prepend=False)\n\nmod = sm.OLS(X, y)\n\nres = mod.fit()\n\nprint(res.summary())",
" OLS Regression Results \n=======================================================================================\nDep. Variable: PAX R-squared (uncentered): 0.916\nModel: OLS Adj. R-squared (uncentered): 0.916\nMethod: Least Squares F-statistic: 2.660e+04\nDate: Wed, 06 Jan 2021 Prob (F-statistic): 0.00\nTime: 23:14:10 Log-Likelihood: -17310.\nNo. Observations: 2432 AIC: 3.462e+04\nDf Residuals: 2431 BIC: 3.463e+04\nDf Model: 1 \nCovariance Type: nonrobust \n=====================================================================================\n coef std err t P>|t| [0.025 0.975]\n-------------------------------------------------------------------------------------\nMonetário Vendido 0.0296 0.000 163.104 0.000 0.029 0.030\n==============================================================================\nOmnibus: 1291.117 Durbin-Watson: 1.588\nProb(Omnibus): 0.000 Jarque-Bera (JB): 17779.233\nSkew: 2.190 Prob(JB): 0.00\nKurtosis: 15.501 Cond. No. 1.00\n==============================================================================\n\nNotes:\n[1] R² is computed without centering (uncentered) since the model does not contain a constant.\n[2] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n"
]
],
[
[
"Gostaria de exploar muito mais o modelo e procurar novas variáveis, porém decidi parar pelo tempo!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a9bbc11daea2bf76e6d043ce08c22a0f7ecfc60
| 35,606 |
ipynb
|
Jupyter Notebook
|
pyspark-advanced/jupyter-errors/PySpark Stacktraces - Full.ipynb
|
kryvokhyzha/examples-and-courses
|
477e82ee24e6abba8a6b6d92555f2ed549ca682c
|
[
"MIT"
] | 1 |
2021-12-13T15:41:48.000Z
|
2021-12-13T15:41:48.000Z
|
pyspark-advanced/jupyter-errors/PySpark Stacktraces - Full.ipynb
|
kryvokhyzha/examples-and-courses
|
477e82ee24e6abba8a6b6d92555f2ed549ca682c
|
[
"MIT"
] | 15 |
2021-09-12T15:06:13.000Z
|
2022-03-31T19:02:08.000Z
|
pyspark-advanced/jupyter-errors/PySpark Stacktraces - Full.ipynb
|
kryvokhyzha/examples-and-courses
|
477e82ee24e6abba8a6b6d92555f2ed549ca682c
|
[
"MIT"
] | 1 |
2022-01-29T00:37:52.000Z
|
2022-01-29T00:37:52.000Z
| 187.4 | 13,404 | 0.754451 |
[
[
[
"from pyspark.sql import SparkSession\nimport pyspark.sql.functions as f\n\nif not 'spark' in locals():\n spark = SparkSession.builder \\\n .master(\"local[*]\") \\\n .config(\"spark.driver.memory\",\"24G\") \\\n .getOrCreate()\n\nspark",
"_____no_output_____"
],
[
"import pandas as pd\n\nreplication_df = spark.createDataFrame(pd.DataFrame(list(range(1,1000)),columns=['replication_id'])).repartition(1000, 'replication_id')",
"_____no_output_____"
],
[
"from pyspark.sql.functions import *\nfrom pyspark.sql.types import *\n\n\noutSchema = StructType([StructField('replication_id', IntegerType(), True),\n StructField('sil_score', DoubleType(), True),\n StructField('num_clusters', IntegerType(), True),\n StructField('min_samples', IntegerType(), True),\n StructField('min_cluster_size', IntegerType(), True)])\n\n\n@pandas_udf(outSchema, PandasUDFType.GROUPED_MAP)\ndef run_model(df_pandas):\n\n\n # Return result as a pandas data frame\n result = pd.DataFrame({'replication_id': replication_id, 'sil_score': 2,\n 'num_clusters': 3, 'min_samples': 4,\n 'min_cluster_size': 5}, index=[0])\n\n return result\n\n\n\nresults = replication_df.groupby(\"replication_id\").apply(run_model)",
"/opt/anaconda3/lib/python3.7/site-packages/pyspark/sql/pandas/group_ops.py:76: UserWarning: It is preferred to use 'applyInPandas' over this API. This API will be deprecated in the future releases. See SPARK-28264 for more details.\n \"more details.\", UserWarning)\n"
],
[
"spark.conf.set(\"spark.sql.pyspark.jvmStacktrace.enabled\",True)\nspark.conf.set(\"spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled\",False)\n\nresults.count()",
"_____no_output_____"
],
[
"spark.conf.set(\"spark.sql.pyspark.jvmStacktrace.enabled\",False)\nspark.conf.set(\"spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled\",False)\n\nresults.count()",
"_____no_output_____"
],
[
"spark.conf.set(\"spark.sql.pyspark.jvmStacktrace.enabled\",False)\nspark.conf.set(\"spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled\",True)\n\nresults.count()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9bc0f718c25397b0d9cf8835bfb69e61211a9e
| 22,865 |
ipynb
|
Jupyter Notebook
|
codes/.ipynb_checkpoints/Toy Examples Exercise 2-checkpoint.ipynb
|
makni-mehdi/federated-swag
|
a0175ed267e07959687c4290aa3a2a8e1899faa5
|
[
"BSD-2-Clause"
] | null | null | null |
codes/.ipynb_checkpoints/Toy Examples Exercise 2-checkpoint.ipynb
|
makni-mehdi/federated-swag
|
a0175ed267e07959687c4290aa3a2a8e1899faa5
|
[
"BSD-2-Clause"
] | null | null | null |
codes/.ipynb_checkpoints/Toy Examples Exercise 2-checkpoint.ipynb
|
makni-mehdi/federated-swag
|
a0175ed267e07959687c4290aa3a2a8e1899faa5
|
[
"BSD-2-Clause"
] | null | null | null | 64.957386 | 15,672 | 0.81229 |
[
[
[
"import numpy as np\nimport numpy.random\nfrom numpy.random import multivariate_normal, uniform\nfrom operator import mul\nfrom functools import reduce\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# Exercise 1\n### Initialization",
"_____no_output_____"
]
],
[
[
"N = 10 #Nb clients\nm = 100 #Nb data points\nmala_samples = 10**6\nnb_thetas = 5 * 10**5\neta = 0.1\nseed = 42\nmean = np.array([0, 0])\nI_2 = np.array([[1, 0], [0, 1]])\nnumpy.random.seed(seed=seed)",
"_____no_output_____"
]
],
[
[
"### Data Generation",
"_____no_output_____"
]
],
[
[
"mu = np.array([multivariate_normal(mean, I_2) for i in range(N)])\nX = np.array([[multivariate_normal(2 * mu[i], I_2 / 3) for j in range(m)] for i in range(N)])",
"_____no_output_____"
],
[
"print(mu.shape)\nprint(X.shape)",
"(10, 2)\n(10, 100, 2)\n"
],
[
"X.mean(axis=(0, 1))",
"_____no_output_____"
]
],
[
[
"### Parameters sampling",
"_____no_output_____"
]
],
[
[
"theta = np.array([reduce(mul, [multivariate_normal(X[i, j], I_2) for i, j in zip(range(N), range(m))], 1) for _ in range(nb_thetas)])",
"_____no_output_____"
]
],
[
[
"### Computing Expectiation using MALA\n<!-- #### In what follows we use the fact that $f(\\theta) = ||\\theta||$ -->",
"_____no_output_____"
]
],
[
[
"def f(theta):\n return np.linalg.norm(theta)\n\ndef grad_f(theta):\n return theta / np.linalg.norm(theta)",
"_____no_output_____"
],
[
"def q(x, y, eta):\n return - f(y) - np.linalg.norm(x - y + eta * grad_f(y))**2 / (4 * eta)\ndef mala_sample(current, eta):\n return current - eta * grad_f(current) + np.sqrt(2 * eta) * multivariate_normal(mean, I_2) #2 * current is grad f(current)",
"_____no_output_____"
],
[
"def mala_algo(theta_star, mala_samples, eta):\n current = theta_star\n accepted = []\n count = 0\n for _ in range(mala_samples):\n new = mala_sample(current, eta)\n alpha = min(1, np.exp(q(current, new, eta) - q(new, current, eta)))\n u = uniform(0, 1)\n if u <= alpha:\n accepted.append(new)\n current = new\n count += 1\n else:\n accepted.append(current)\n print(f\"Acceptance rate: {count / mala_samples}\")\n return accepted",
"_____no_output_____"
],
[
"theta_star = X.mean(axis=(0, 1))",
"_____no_output_____"
],
[
"accepted = np.array(mala_algo(theta_star, mala_samples, eta))",
"_____no_output_____"
],
[
"accepted.mean(axis=0)",
"_____no_output_____"
],
[
"average = np.linalg.norm(accepted[200000:], axis=1).mean()",
"_____no_output_____"
],
[
"average",
"_____no_output_____"
]
],
[
[
"### MSE loss plot",
"_____no_output_____"
]
],
[
[
"rolling_average = f(theta[0])\nloss = [(average - rolling_average)**2]\n\n# for n_iter in range(1, nb_thetas):\nfor n_iter in range(1, 10**4):\n rolling_average = (rolling_average * n_iter + f(theta[n_iter])) / (n_iter + 1)\n loss.append((average - rolling_average)**2)\n \n# print(loss)\n ",
"_____no_output_____"
],
[
"plt.plot(loss[10:])",
"_____no_output_____"
],
[
"X.sum()",
"_____no_output_____"
]
],
[
[
"# Exercise 2",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a9bc603ce26c24996773bcffa67cadb718107b3
| 5,589 |
ipynb
|
Jupyter Notebook
|
Hubspot/Hubspot_update_contact.ipynb
|
vivard/awesome-notebooks
|
899558bcc2165bb2155f5ab69ac922c6458e1799
|
[
"BSD-3-Clause"
] | null | null | null |
Hubspot/Hubspot_update_contact.ipynb
|
vivard/awesome-notebooks
|
899558bcc2165bb2155f5ab69ac922c6458e1799
|
[
"BSD-3-Clause"
] | null | null | null |
Hubspot/Hubspot_update_contact.ipynb
|
vivard/awesome-notebooks
|
899558bcc2165bb2155f5ab69ac922c6458e1799
|
[
"BSD-3-Clause"
] | null | null | null | 20.547794 | 284 | 0.459295 |
[
[
[
"<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>",
"_____no_output_____"
],
[
"# Hubspot - Update contact\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Hubspot/Hubspot_update_contact.ipynb\" target=\"_parent\"><img src=\"https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg\"/></a>",
"_____no_output_____"
],
[
"**Tags:** #hubspot #crm #sales #contact #naas_drivers",
"_____no_output_____"
],
[
"## Input",
"_____no_output_____"
],
[
"### Import library",
"_____no_output_____"
]
],
[
[
"from naas_drivers import hubspot",
"_____no_output_____"
]
],
[
[
"### Enter your Hubspot api key",
"_____no_output_____"
]
],
[
[
"auth_token = \"YOUR_HUBSPOT_API_KEY\"",
"_____no_output_____"
]
],
[
[
"### Connect to Hubspot",
"_____no_output_____"
]
],
[
[
"hs = hubspot.connect(auth_token)",
"_____no_output_____"
]
],
[
[
"## Model",
"_____no_output_____"
],
[
"### Enter contact parameters",
"_____no_output_____"
]
],
[
[
"contact_id = \"280751\"\nemail = \"[email protected]\"\nfirstname = \"Jean test\"\nlastname ='CASHSTOrY'\nphone = \"+336.00.00.00.00\"\njobtitle = \"Consultant\"\nwebsite = \"www.cashstory.com\"\ncompany = 'CASHSTORY'\nhubspot_owner_id = None",
"_____no_output_____"
]
],
[
[
"## Output",
"_____no_output_____"
],
[
"### Using patch method",
"_____no_output_____"
]
],
[
[
"update_contact = {\"properties\": \n {\n \"email\": email,\n \"firstname\": firstname,\n \"lastname\": lastname,\n \"phone\": phone,\n \"jobtitle\": jobtitle,\n \"website\": website,\n \"company\": company,\n \"url\": \"test3\",\n \"hubspot_owner_id\": hubspot_owner_id,\n }\n }\n\nhs.contacts.patch(contact_id, update_contact)",
"_____no_output_____"
]
],
[
[
"### Using update method",
"_____no_output_____"
]
],
[
[
"hs.contacts.update(contact_id,\n email,\n firstname,\n lastname,\n phone,\n jobtitle,\n website,\n company,\n hubspot_owner_id)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9be4c2b72343416cd3298c5076d19d6885346c
| 14,408 |
ipynb
|
Jupyter Notebook
|
sagemaker-python-sdk/tensorflow_script_mode_training_and_serving/tensorflow_script_mode_training_and_serving.ipynb
|
P15241328/amazon-sagemaker-examples
|
00cba545be0822474f070321a62d22865187e09b
|
[
"Apache-2.0"
] | 2 |
2021-03-31T21:10:44.000Z
|
2021-04-03T04:27:26.000Z
|
sagemaker-python-sdk/tensorflow_script_mode_training_and_serving/tensorflow_script_mode_training_and_serving.ipynb
|
P15241328/amazon-sagemaker-examples
|
00cba545be0822474f070321a62d22865187e09b
|
[
"Apache-2.0"
] | 1 |
2021-03-25T18:31:29.000Z
|
2021-03-25T18:31:29.000Z
|
sagemaker-python-sdk/tensorflow_script_mode_training_and_serving/tensorflow_script_mode_training_and_serving.ipynb
|
P15241328/amazon-sagemaker-examples
|
00cba545be0822474f070321a62d22865187e09b
|
[
"Apache-2.0"
] | 2 |
2021-09-25T08:40:23.000Z
|
2021-11-08T03:01:52.000Z
| 39.911357 | 835 | 0.645336 |
[
[
[
"# TensorFlow script mode training and serving\n\nScript mode is a training script format for TensorFlow that lets you execute any TensorFlow training script in SageMaker with minimal modification. The [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) handles transferring your script to a SageMaker training instance. On the training instance, SageMaker's native TensorFlow support sets up training-related environment variables and executes your training script. In this tutorial, we use the SageMaker Python SDK to launch a training job and deploy the trained model.\n\nScript mode supports training with a Python script, a Python module, or a shell script. In this example, we use a Python script to train a classification model on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). In this example, we will show how easily you can train a SageMaker using TensorFlow 1.x and TensorFlow 2.0 scripts with SageMaker Python SDK. In addition, this notebook demonstrates how to perform real time inference with the [SageMaker TensorFlow Serving container](https://github.com/aws/sagemaker-tensorflow-serving-container). The TensorFlow Serving container is the default inference method for script mode. For full documentation on the TensorFlow Serving container, please visit [here](https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/tensorflow/deploying_tensorflow_serving.rst).\n",
"_____no_output_____"
],
[
"# Set up the environment\n\nLet's start by setting up the environment:",
"_____no_output_____"
]
],
[
[
"import os\nimport sagemaker\nfrom sagemaker import get_execution_role\n\nsagemaker_session = sagemaker.Session()\n\nrole = get_execution_role()\nregion = sagemaker_session.boto_session.region_name",
"_____no_output_____"
]
],
[
[
"## Training Data\n\nThe MNIST dataset has been loaded to the public S3 buckets ``sagemaker-sample-data-<REGION>`` under the prefix ``tensorflow/mnist``. There are four ``.npy`` file under this prefix:\n* ``train_data.npy``\n* ``eval_data.npy``\n* ``train_labels.npy``\n* ``eval_labels.npy``",
"_____no_output_____"
]
],
[
[
"training_data_uri = 's3://sagemaker-sample-data-{}/tensorflow/mnist'.format(region)",
"_____no_output_____"
]
],
[
[
"# Construct a script for distributed training\n\nThis tutorial's training script was adapted from TensorFlow's official [CNN MNIST example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/layers/cnn_mnist.py). We have modified it to handle the ``model_dir`` parameter passed in by SageMaker. This is an S3 path which can be used for data sharing during distributed training and checkpointing and/or model persistence. We have also added an argument-parsing function to handle processing training-related variables.\n\nAt the end of the training job we have added a step to export the trained model to the path stored in the environment variable ``SM_MODEL_DIR``, which always points to ``/opt/ml/model``. This is critical because SageMaker uploads all the model artifacts in this folder to S3 at end of training.\n\nHere is the entire script:",
"_____no_output_____"
]
],
[
[
"!pygmentize 'mnist.py'\n\n# TensorFlow 2.1 script\n!pygmentize 'mnist-2.py'",
"_____no_output_____"
]
],
[
[
"# Create a training job using the `TensorFlow` estimator\n\nThe `sagemaker.tensorflow.TensorFlow` estimator handles locating the script mode container, uploading your script to a S3 location and creating a SageMaker training job. Let's call out a couple important parameters here:\n\n* `py_version` is set to `'py3'` to indicate that we are using script mode since legacy mode supports only Python 2. Though Python 2 will be deprecated soon, you can use script mode with Python 2 by setting `py_version` to `'py2'` and `script_mode` to `True`.\n\n* `distributions` is used to configure the distributed training setup. It's required only if you are doing distributed training either across a cluster of instances or across multiple GPUs. Here we are using parameter servers as the distributed training schema. SageMaker training jobs run on homogeneous clusters. To make parameter server more performant in the SageMaker setup, we run a parameter server on every instance in the cluster, so there is no need to specify the number of parameter servers to launch. Script mode also supports distributed training with [Horovod](https://github.com/horovod/horovod). You can find the full documentation on how to configure `distributions` [here](https://github.com/aws/sagemaker-python-sdk/tree/master/src/sagemaker/tensorflow#distributed-training). \n\n",
"_____no_output_____"
]
],
[
[
"from sagemaker.tensorflow import TensorFlow\n\n\nmnist_estimator = TensorFlow(entry_point='mnist.py',\n role=role,\n instance_count=2,\n instance_type='ml.p3.2xlarge',\n framework_version='1.15.2',\n py_version='py3',\n distribution={'parameter_server': {'enabled': True}})",
"_____no_output_____"
]
],
[
[
"You can also initiate an estimator to train with TensorFlow 2.1 script. The only things that you will need to change are the script name and ``framewotk_version``",
"_____no_output_____"
]
],
[
[
"mnist_estimator2 = TensorFlow(entry_point='mnist-2.py',\n role=role,\n instance_count=2,\n instance_type='ml.p3.2xlarge',\n framework_version='2.1.0',\n py_version='py3',\n distribution={'parameter_server': {'enabled': True}})",
"_____no_output_____"
]
],
[
[
"## Calling ``fit``\n\nTo start a training job, we call `estimator.fit(training_data_uri)`.\n\nAn S3 location is used here as the input. `fit` creates a default channel named `'training'`, which points to this S3 location. In the training script we can then access the training data from the location stored in `SM_CHANNEL_TRAINING`. `fit` accepts a couple other types of input as well. See the API doc [here](https://sagemaker.readthedocs.io/en/stable/estimators.html#sagemaker.estimator.EstimatorBase.fit) for details.\n\nWhen training starts, the TensorFlow container executes mnist.py, passing `hyperparameters` and `model_dir` from the estimator as script arguments. Because we didn't define either in this example, no hyperparameters are passed, and `model_dir` defaults to `s3://<DEFAULT_BUCKET>/<TRAINING_JOB_NAME>`, so the script execution is as follows:\n```bash\npython mnist.py --model_dir s3://<DEFAULT_BUCKET>/<TRAINING_JOB_NAME>\n```\nWhen training is complete, the training job will upload the saved model for TensorFlow serving.",
"_____no_output_____"
]
],
[
[
"mnist_estimator.fit(training_data_uri)",
"_____no_output_____"
]
],
[
[
"Calling fit to train a model with TensorFlow 2.1 script.",
"_____no_output_____"
]
],
[
[
"mnist_estimator2.fit(training_data_uri)",
"_____no_output_____"
]
],
[
[
"# Deploy the trained model to an endpoint\n\nThe `deploy()` method creates a SageMaker model, which is then deployed to an endpoint to serve prediction requests in real time. We will use the TensorFlow Serving container for the endpoint, because we trained with script mode. This serving container runs an implementation of a web server that is compatible with SageMaker hosting protocol. The [Using your own inference code]() document explains how SageMaker runs inference containers.",
"_____no_output_____"
]
],
[
[
"predictor = mnist_estimator.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')",
"_____no_output_____"
]
],
[
[
"Deployed the trained TensorFlow 2.1 model to an endpoint.",
"_____no_output_____"
]
],
[
[
"predictor2 = mnist_estimator2.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')",
"_____no_output_____"
]
],
[
[
"# Invoke the endpoint\n\nLet's download the training data and use that as input for inference.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\n!aws --region {region} s3 cp s3://sagemaker-sample-data-{region}/tensorflow/mnist/train_data.npy train_data.npy\n!aws --region {region} s3 cp s3://sagemaker-sample-data-{region}/tensorflow/mnist/train_labels.npy train_labels.npy\n\ntrain_data = np.load('train_data.npy')\ntrain_labels = np.load('train_labels.npy')",
"_____no_output_____"
]
],
[
[
"The formats of the input and the output data correspond directly to the request and response formats of the `Predict` method in the [TensorFlow Serving REST API](https://www.tensorflow.org/serving/api_rest). SageMaker's TensforFlow Serving endpoints can also accept additional input formats that are not part of the TensorFlow REST API, including the simplified JSON format, line-delimited JSON objects (\"jsons\" or \"jsonlines\"), and CSV data.\n\nIn this example we are using a `numpy` array as input, which will be serialized into the simplified JSON format. In addtion, TensorFlow serving can also process multiple items at once as you can see in the following code. You can find the complete documentation on how to make predictions against a TensorFlow serving SageMaker endpoint [here](https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/tensorflow/deploying_tensorflow_serving.rst#making-predictions-against-a-sagemaker-endpoint).",
"_____no_output_____"
]
],
[
[
"predictions = predictor.predict(train_data[:50])\nfor i in range(0, 50):\n prediction = predictions['predictions'][i]['classes']\n label = train_labels[i]\n print('prediction is {}, label is {}, matched: {}'.format(prediction, label, prediction == label))",
"_____no_output_____"
]
],
[
[
"Examine the prediction result from the TensorFlow 2.1 model.",
"_____no_output_____"
]
],
[
[
"predictions2 = predictor2.predict(train_data[:50])\nfor i in range(0, 50):\n prediction = np.argmax(predictions2['predictions'][i])\n label = train_labels[i]\n print('prediction is {}, label is {}, matched: {}'.format(prediction, label, prediction == label))",
"_____no_output_____"
]
],
[
[
"# Delete the endpoint\n\nLet's delete the endpoint we just created to prevent incurring any extra costs.",
"_____no_output_____"
]
],
[
[
"predictor.delete_endpoint()",
"_____no_output_____"
]
],
[
[
"Delete the TensorFlow 2.1 endpoint as well.",
"_____no_output_____"
]
],
[
[
"predictor2.delete_endpoint()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9be52092747c4e794306b4448ebf3f1cd4677b
| 2,288 |
ipynb
|
Jupyter Notebook
|
week6-Binary Search Tree/Binary Search Tree.ipynb
|
yongjin120/DSA
|
72781b419f26b7f02a9dd1e7ce5c5bc3be748d33
|
[
"MIT"
] | null | null | null |
week6-Binary Search Tree/Binary Search Tree.ipynb
|
yongjin120/DSA
|
72781b419f26b7f02a9dd1e7ce5c5bc3be748d33
|
[
"MIT"
] | null | null | null |
week6-Binary Search Tree/Binary Search Tree.ipynb
|
yongjin120/DSA
|
72781b419f26b7f02a9dd1e7ce5c5bc3be748d33
|
[
"MIT"
] | null | null | null | 23.587629 | 96 | 0.475087 |
[
[
[
"# Implementation of tree node\n\n> Has three references\n - Left hand side (LHS)\n - Right hand side (RHS)\n - Its own value\n - Its parent node\n\n ",
"_____no_output_____"
]
],
[
[
"class TreeNode:\n #Four references\n nodeLHS = ''\n nodeRHS = ''\n nodeParent = ''\n value = ''\n \n def __init__(self, value, nodeParent):\n self.value = value\n self.nodeParent = nodeParent\n \n def getLHS(self):\n return self.nodeLHS\n \n def getRHS(self):\n return self.nodeRHS\n \n def getValue(self):\n return self.value\n \n def getParent(self):\n return self.nodeParent\n \n def setLHS(self, LHS):\n self.nodeLHS = LHS\n \n def setRHS(self, RHS):\n self.nodeRHS= RHS\n \n def setValue(self):\n self.value=value\n \n def setParent(self, nodeParent):\n self.nodeParent=nodeParent",
"_____no_output_____"
]
],
[
[
"# Implementation of BST\n\n> - BST handles the data stored through its root\n - Root has its own value\n - Tree instance access to the root\n - Only through the root, the tree instances access to the descendant nodes of the root\n \n ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a9bf24fd48946d5b44cf31df5b7998c18dc91a1
| 57,619 |
ipynb
|
Jupyter Notebook
|
040 RNNs.ipynb
|
hoelzl/advanced-ml-course
|
250b303e21281b790f69fb385b9c812064b546eb
|
[
"Apache-2.0"
] | 1 |
2022-03-02T15:59:11.000Z
|
2022-03-02T15:59:11.000Z
|
040 RNNs.ipynb
|
hoelzl/advanced-ml-course
|
250b303e21281b790f69fb385b9c812064b546eb
|
[
"Apache-2.0"
] | null | null | null |
040 RNNs.ipynb
|
hoelzl/advanced-ml-course
|
250b303e21281b790f69fb385b9c812064b546eb
|
[
"Apache-2.0"
] | null | null | null | 35.589253 | 926 | 0.5506 |
[
[
[
"# Rekurrente Netze (RNNs)\n",
"_____no_output_____"
],
[
"## Sequentialle Daten\n\n<img src=\"img/ag/Figure-22-001.png\" style=\"width: 10%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Floating Window\n\n<img src=\"img/ag/Figure-22-002.png\" style=\"width: 20%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Verarbeitung mit MLP\n\n<img src=\"img/ag/Figure-22-002.png\" style=\"width: 20%; margin-left: 10%; margin-right: auto; float: left;\"/>\n<img src=\"img/ag/Figure-22-003.png\" style=\"width: 35%; margin-left: 10%; margin-right: auto; float: right;\"/>",
"_____no_output_____"
],
[
"## MLP berücksichtigt die Reihenfolge nicht!\n\n<img src=\"img/ag/Figure-22-004.png\" style=\"width: 25%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## RNNs: Netzwerke mit Speicher\n\n<img src=\"img/ag/Figure-22-005.png\" style=\"width: 15%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Zustand: Reperatur-Roboter\n\n<img src=\"img/ag/Figure-22-006.png\" style=\"width: 35%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Zustand: Reperatur-Roboter\n\n<img src=\"img/ag/Figure-22-007.png\" style=\"width: 35%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Zustand: Reperatur-Roboter\n\n<img src=\"img/ag/Figure-22-008.png\" style=\"width: 35%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Zustand: Reperatur-Roboter\n\n<img src=\"img/ag/Figure-22-009.png\" style=\"width: 85%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"# Arbeitsweise RNN\n\n<img src=\"img/ag/Figure-22-010.png\" style=\"width: 85%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## State wird nach der Verarbeitung geschrieben\n\n<img src=\"img/ag/Figure-22-011.png\" style=\"width: 35%; margin-left: 10%; margin-right: auto; float: left;\"/>\n<img src=\"img/ag/Figure-22-012.png\" style=\"width: 15%; margin-left: auto; margin-right: 10%; float: right;\"/>",
"_____no_output_____"
],
[
"## Netzwerkstruktur (einzelner Wert)\n\nWelche Operation ist sinnvoll?\n\n<img src=\"img/ag/Figure-22-013.png\" style=\"width: 35%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Netzwerkstruktur (einzelner Wert)\n\n<img src=\"img/ag/Figure-22-014.png\" style=\"width: 35%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Repräsentation in Diagrammen\n\n<img src=\"img/ag/Figure-22-015.png\" style=\"width: 10%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Entfaltete Darstellung\n\n<img src=\"img/ag/Figure-22-016.png\" style=\"width: 45%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Netzwerkstruktur für mehrere Werte\n\n<img src=\"img/ag/Figure-22-018.png\" style=\"width: 35%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Darstellung der Daten\n\n<img src=\"img/ag/Figure-22-019.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"# Darstellung der Daten\n\n<img src=\"img/ag/Figure-22-020.png\" style=\"width: 45%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"# Darstellung der Daten\n\n<img src=\"img/ag/Figure-22-021.png\" style=\"width: 45%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Arbeitsweise\n\n<img src=\"img/ag/Figure-22-022.png\" style=\"width: 55%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Probleme\n<div style=\"margin-top: 20pt; float:left;\">\n <ul>\n <li>Verlust der Gradienten</li>\n <li>Explosion der Gradienten</li>\n <li>Vergessen</li>\n </ul>\n</div>\n\n<img src=\"img/ag/Figure-22-023.png\" style=\"width: 55%; margin-left: auto; margin-right: 5%; float: right;\"/>",
"_____no_output_____"
],
[
"\n## LSTM\n\n<img src=\"img/ag/Figure-22-029.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Gates\n\n<img src=\"img/ag/Figure-22-024.png\" style=\"width: 55%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Gates\n\n<img src=\"img/ag/Figure-22-025.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Forget-Gate\n\n<img src=\"img/ag/Figure-22-026.png\" style=\"width: 30%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Remember Gate\n\n<img src=\"img/ag/Figure-22-027.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Output Gate\n\n<img src=\"img/ag/Figure-22-028.png\" style=\"width: 55%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## LSTM\n\n<img src=\"img/ag/Figure-22-029.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## LSTM Funktionsweise\n\n<img src=\"img/ag/Figure-22-030.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## LSTM Funktionsweise\n\n<img src=\"img/ag/Figure-22-031.png\" style=\"width: 45%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## LSTM Funktionsweise\n\n<img src=\"img/ag/Figure-22-032.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## LSTM Funktionsweise\n\n<img src=\"img/ag/Figure-22-033.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Verwendung von LSTMs\n\n<img src=\"img/ag/Figure-22-034.png\" style=\"width: 75%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Darstellung von LSTM Layern\n\n<img src=\"img/ag/Figure-22-035.png\" style=\"width: 25%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Conv/LSTM (Conv/RNN) Architektur\n\n<img src=\"img/ag/Figure-22-036.png\" style=\"width: 15%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Tiefe RNN Netze\n\n<img src=\"img/ag/Figure-22-037.png\" style=\"width: 55%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Bidirektionale RNNs\n\n<img src=\"img/ag/Figure-22-038.png\" style=\"width: 65%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Tiefe Bidirektionale Netze\n\n<img src=\"img/ag/Figure-22-039.png\" style=\"width: 45%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"# Anwendung: Generierung von Text\n\n<img src=\"img/ag/Figure-22-040.png\" style=\"width: 15%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"## Trainieren mittels Sliding Window\n\n<img src=\"img/ag/Figure-22-042.png\" style=\"width: 25%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
],
[
"# Vortrainierte LSTM-Modelle",
"_____no_output_____"
]
],
[
[
"from fastai.text.all import *",
"_____no_output_____"
],
[
"path = untar_data(URLs.IMDB)\npath.ls()",
"_____no_output_____"
],
[
"(path/'train').ls()",
"_____no_output_____"
],
[
"dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')",
"_____no_output_____"
],
[
"dls.show_batch()",
"_____no_output_____"
],
[
"learn = text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy)",
"_____no_output_____"
],
[
"# learn.fine_tune(4, 1e-2)",
"_____no_output_____"
],
[
"learn.fine_tune(4, 1e-2)",
"_____no_output_____"
],
[
"learn.show_results()",
"_____no_output_____"
],
[
"learn.predict(\"I really liked that movie!\")",
"_____no_output_____"
]
],
[
[
"# ULMFiT\n\nProblem: Wir trainieren die oberen Layer des Classifiers auf unser Problem, aber das Language-Model bleibt auf Wikipedia spezialisiert!\n\nLösung: Fine-Tuning des Language-Models bevor wir den Classifier trainieren.\n\n<img src=\"img/ulmfit.png\" style=\"width: 75%; margin-left: auto; margin-right: auto;\"/>",
"_____no_output_____"
]
],
[
[
"dls_lm = TextDataLoaders.from_folder(path, is_lm=True, valid_pct=0.1)",
"_____no_output_____"
],
[
"dls_lm.show_batch(max_n=5)",
"_____no_output_____"
],
[
"learn = language_model_learner(dls_lm, AWD_LSTM, metrics=[accuracy, Perplexity()], path=path, wd=0.1).to_fp16()",
"_____no_output_____"
],
[
"learn.fit_one_cycle(1, 1e-2)",
"_____no_output_____"
],
[
"learn.save('epoch-1')",
"_____no_output_____"
],
[
"learn = learn.load('epoch-1')",
"_____no_output_____"
],
[
"learn.unfreeze()\nlearn.fit_one_cycle(10, 1e-3)",
"_____no_output_____"
],
[
"learn.save_encoder('finetuned')",
"_____no_output_____"
],
[
"TEXT = \"I liked this movie because\"\nN_WORDS = 40\nN_SENTENCES = 2\npreds = [learn.predict(TEXT, N_WORDS, temperature=0.75) \n for _ in range(N_SENTENCES)]",
"_____no_output_____"
],
[
"print(\"\\n\".join(preds))",
"i liked this movie because i watched it in the Middle East when it was shown in 2005 . i like Sylvain Brawn . He is great in this film . i love him and i loved the characters\ni liked this movie because the cast and crew were great . The plot is believable , the acting was simple and the story line was very well developed . It does n't take place in the Japanese , and the lines\n"
],
[
"dls_clas = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test', text_vocab=dls_lm.vocab)",
"_____no_output_____"
],
[
"learn = text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy)",
"_____no_output_____"
],
[
"learn = learn.load_encoder('finetuned')",
"_____no_output_____"
],
[
"learn.fit_one_cycle(1, 2e-2)",
"_____no_output_____"
],
[
"learn.freeze_to(-2)\nlearn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2))",
"_____no_output_____"
],
[
"learn.freeze_to(-3)\nlearn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3))",
"_____no_output_____"
],
[
"learn.unfreeze()\nlearn.fit_one_cycle(2, slice(1e-3/(2.6**4),1e-3))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c0d02726a177e7911338903d97faa11755550
| 2,692 |
ipynb
|
Jupyter Notebook
|
error_and_fp/Floating Point vs Program Logic.ipynb
|
JiaheXu/MATH
|
9cb2b412ba019794702cacf213471742745d17a6
|
[
"MIT"
] | null | null | null |
error_and_fp/Floating Point vs Program Logic.ipynb
|
JiaheXu/MATH
|
9cb2b412ba019794702cacf213471742745d17a6
|
[
"MIT"
] | null | null | null |
error_and_fp/Floating Point vs Program Logic.ipynb
|
JiaheXu/MATH
|
9cb2b412ba019794702cacf213471742745d17a6
|
[
"MIT"
] | null | null | null | 28.946237 | 561 | 0.453938 |
[
[
[
"# Floating point vs Program Logic",
"_____no_output_____"
],
[
"What will the following code snippet do?",
"_____no_output_____"
]
],
[
[
"from time import sleep\n\nx = 0.0\n\nwhile x != 1.0:\n x += 0.1\n print(repr(x))\n \n sleep(0.1)",
"0.1\n0.2\n0.30000000000000004\n0.4\n0.5\n0.6\n0.7\n0.7999999999999999\n0.8999999999999999\n0.9999999999999999\n"
]
],
[
[
"How do you prevent that from happening?",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a9c21f28ab4af7f487db198a98b82a0a7e79a12
| 174,643 |
ipynb
|
Jupyter Notebook
|
visualize_decision_tree.ipynb
|
amcodec1/Simple-Decision-Tree-Demo
|
fb7cfdc443999d1d862483213cd38287b4fe9176
|
[
"MIT"
] | null | null | null |
visualize_decision_tree.ipynb
|
amcodec1/Simple-Decision-Tree-Demo
|
fb7cfdc443999d1d862483213cd38287b4fe9176
|
[
"MIT"
] | null | null | null |
visualize_decision_tree.ipynb
|
amcodec1/Simple-Decision-Tree-Demo
|
fb7cfdc443999d1d862483213cd38287b4fe9176
|
[
"MIT"
] | null | null | null | 221.067089 | 152,070 | 0.868005 |
[
[
[
"import numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom IPython.display import Image \nfrom StringIO import StringIO\nimport pydotplus\nfrom sklearn import preprocessing\nfrom sklearn import tree\n%matplotlib inline",
"_____no_output_____"
],
[
"def convert_yes_no(txt):\n if 'yes' in txt:\n return 1\n else:\n return 0",
"_____no_output_____"
],
[
"def plot_decision_tree(clf,feature_name,target_name):\n dot_data = StringIO() \n tree.export_graphviz(clf, out_file=dot_data, \n feature_names=feature_name, \n class_names=target_name, \n filled=True, rounded=True, \n special_characters=True) \n graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) \n return Image(graph.create_png())",
"_____no_output_____"
],
[
"df = pd.read_csv('tennis.csv')",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.windy = df.windy.astype(int)\ndf.play = df.play.apply(convert_yes_no)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df = pd.get_dummies(df)\ndf",
"_____no_output_____"
],
[
"X_train = df.loc[:, df.columns != 'play']",
"_____no_output_____"
],
[
"Y_train = df.play",
"_____no_output_____"
],
[
"clf = tree.DecisionTreeClassifier(criterion='entropy')",
"_____no_output_____"
],
[
"clf = clf.fit(X_train,Y_train)",
"_____no_output_____"
],
[
"plot_decision_tree(clf, X_train.columns,df.columns[1])",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c2bfa1f332d122b088b93351c0bc35311d773
| 532,218 |
ipynb
|
Jupyter Notebook
|
ml-foundation/deeplearning/DeepFeaturesForImageClassification.ipynb
|
okkhoy/pyDataAnalysis
|
b28558239a2a4a15865d65b29a0d45a6ca0093d1
|
[
"MIT"
] | null | null | null |
ml-foundation/deeplearning/DeepFeaturesForImageClassification.ipynb
|
okkhoy/pyDataAnalysis
|
b28558239a2a4a15865d65b29a0d45a6ca0093d1
|
[
"MIT"
] | null | null | null |
ml-foundation/deeplearning/DeepFeaturesForImageClassification.ipynb
|
okkhoy/pyDataAnalysis
|
b28558239a2a4a15865d65b29a0d45a6ca0093d1
|
[
"MIT"
] | null | null | null | 701.209486 | 284,930 | 0.545568 |
[
[
[
"import graphlab",
"_____no_output_____"
]
],
[
[
"### Load train and test images",
"_____no_output_____"
]
],
[
[
"image_train = graphlab.SFrame('image_train_data/')\nimage_test = graphlab.SFrame('image_test_data/')",
"[INFO] This non-commercial license of GraphLab Create is assigned to [email protected] and will expire on September 26, 2016. For commercial licensing options, visit https://dato.com/buy/.\n\n[INFO] Start server at: ipc:///tmp/graphlab_server-7036 - Server binary: /usr/local/lib/python2.7/dist-packages/graphlab/unity_server - Server log: /tmp/graphlab_server_1449375968.log\n[INFO] GraphLab Server Version: 1.6.1\n"
],
[
"graphlab.canvas.set_target('ipynb')",
"_____no_output_____"
],
[
"image_train['image'].show()",
"_____no_output_____"
],
[
"image_train.show()",
"_____no_output_____"
]
],
[
[
"### Train a classifier on the raw image pixels",
"_____no_output_____"
]
],
[
[
"raw_pixel_model = graphlab.logistic_classifier.create(image_train, \n target='label',\n features=['image_array'])",
"PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n You can set ``validation_set=None`` to disable validation tracking.\n\nPROGRESS: Logistic regression:\nPROGRESS: --------------------------------------------------------\nPROGRESS: Number of examples : 1908\nPROGRESS: Number of classes : 4\nPROGRESS: Number of feature columns : 1\nPROGRESS: Number of unpacked features : 3072\nPROGRESS: Number of coefficients : 9219\nPROGRESS: Starting L-BFGS\nPROGRESS: --------------------------------------------------------\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\nPROGRESS: | Iteration | Passes | Step size | Elapsed Time | Training-accuracy | Validation-accuracy |\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\nPROGRESS: | 1 | 6 | 0.000006 | 3.002678 | 0.261530 | 0.175258 |\nPROGRESS: | 2 | 9 | 5.000000 | 4.100743 | 0.408281 | 0.360825 |\nPROGRESS: | 3 | 10 | 5.000000 | 4.590109 | 0.419287 | 0.360825 |\nPROGRESS: | 4 | 12 | 1.000000 | 5.418787 | 0.445493 | 0.443299 |\nPROGRESS: | 5 | 13 | 1.000000 | 5.881215 | 0.459644 | 0.453608 |\nPROGRESS: | 6 | 14 | 1.000000 | 6.361428 | 0.384172 | 0.329897 |\nPROGRESS: | 10 | 18 | 1.000000 | 8.157102 | 0.508910 | 0.463918 |\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\n"
]
],
[
[
"### Making prediction with the simple raw pixel model",
"_____no_output_____"
]
],
[
[
"image_test[0:3]['image'].show()",
"_____no_output_____"
],
[
"image_test[0:3]['label']",
"_____no_output_____"
],
[
"raw_pixel_model.predict(image_test[0:3])",
"_____no_output_____"
]
],
[
[
"#### Evaluating raw pixel model on test data",
"_____no_output_____"
]
],
[
[
"raw_pixel_model.evaluate(image_test)",
"_____no_output_____"
]
],
[
[
"### Improving the model with deep features",
"_____no_output_____"
]
],
[
[
"len(image_train)",
"_____no_output_____"
],
[
"len(image_test)",
"_____no_output_____"
],
[
"deep_learning_model = graphlab.load_model(imagenet_model)",
"_____no_output_____"
],
[
"image_train.head()",
"_____no_output_____"
]
],
[
[
"### Training classifier using deep features",
"_____no_output_____"
]
],
[
[
"deep_features_model = graphlab.logistic_classifier.create(image_train,\n features=['deep_features'],\n target='label')",
"PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n You can set ``validation_set=None`` to disable validation tracking.\n\nPROGRESS: WARNING: Detected extremely low variance for feature(s) 'deep_features' because all entries are nearly the same.\nProceeding with model training using all features. If the model does not provide results of adequate quality, exclude the above mentioned feature(s) from the input dataset.\nPROGRESS: Logistic regression:\nPROGRESS: --------------------------------------------------------\nPROGRESS: Number of examples : 1908\nPROGRESS: Number of classes : 4\nPROGRESS: Number of feature columns : 1\nPROGRESS: Number of unpacked features : 4096\nPROGRESS: Number of coefficients : 12291\nPROGRESS: Starting L-BFGS\nPROGRESS: --------------------------------------------------------\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\nPROGRESS: | Iteration | Passes | Step size | Elapsed Time | Training-accuracy | Validation-accuracy |\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\nPROGRESS: | 1 | 5 | 0.000131 | 2.392598 | 0.710168 | 0.701031 |\nPROGRESS: | 2 | 9 | 0.250000 | 4.446716 | 0.769392 | 0.793814 |\nPROGRESS: | 3 | 10 | 0.250000 | 5.107348 | 0.770440 | 0.783505 |\nPROGRESS: | 4 | 11 | 0.250000 | 5.796446 | 0.780922 | 0.783505 |\nPROGRESS: | 5 | 12 | 0.250000 | 6.473750 | 0.791929 | 0.804124 |\nPROGRESS: | 6 | 13 | 0.250000 | 7.120380 | 0.809748 | 0.824742 |\nPROGRESS: | 7 | 14 | 0.250000 | 7.774480 | 0.844864 | 0.845361 |\nPROGRESS: | 8 | 15 | 0.250000 | 8.428039 | 0.849057 | 0.855670 |\nPROGRESS: | 9 | 16 | 0.250000 | 9.071685 | 0.864256 | 0.865979 |\nPROGRESS: | 10 | 17 | 0.250000 | 9.720529 | 0.879979 | 0.855670 |\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\n"
]
],
[
[
"### Making prediction using deep feature model",
"_____no_output_____"
]
],
[
[
"deep_features_model.predict(image_test[0:3])",
"_____no_output_____"
],
[
"deep_features_model.evaluate(image_test)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9c33437c5e74fbf219c95afbb96ff12e23438d
| 1,018,021 |
ipynb
|
Jupyter Notebook
|
tfrecords/tfrecords_guide.ipynb
|
scotthuang1989/mytensorflow_example_code
|
2fd1099afc3f60ef9a1bf22227c4cde51ede212c
|
[
"MIT"
] | 32 |
2017-10-30T01:44:56.000Z
|
2021-12-23T06:40:47.000Z
|
tfrecords/tfrecords_guide.ipynb
|
scotthuang1989/mytensorflow_example_code
|
2fd1099afc3f60ef9a1bf22227c4cde51ede212c
|
[
"MIT"
] | 1 |
2020-11-18T21:13:24.000Z
|
2020-11-18T21:13:24.000Z
|
tfrecords/tfrecords_guide.ipynb
|
scotthuang1989/mytensorflow_example_code
|
2fd1099afc3f60ef9a1bf22227c4cde51ede212c
|
[
"MIT"
] | 19 |
2017-12-15T13:27:36.000Z
|
2020-08-18T00:16:38.000Z
| 1,561.381902 | 194,346 | 0.945962 |
[
[
[
"# This notebook is copied from [here](https://github.com/warmspringwinds/tensorflow_notes/blob/master/tfrecords_guide.ipynb) with some small changes",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"### Introduction",
"_____no_output_____"
],
[
"In this post we will cover how to convert a dataset into _.tfrecord_ file.\nBinary files are sometimes easier to use, because you don't have to specify\ndifferent directories for images and groundtruth annotations. While storing your data\nin binary file, you have your data in one block of memory, compared to storing\neach image and annotation separately. Openning a file is a considerably\ntime-consuming operation especially if you use _hdd_ and not _ssd_, because it\ninvolves moving the disk reader head and that takes quite some time. Overall,\nby using binary files you make it easier to distribute and make\nthe data better aligned for efficient reading.\n\nThe post consists of tree parts: \n* in the first part, we demonstrate how you can get raw data bytes of any image using _numpy_ which is in some sense similar to what you do when converting your dataset to binary format. \n\n* Second part shows how to convert a dataset to _tfrecord_ file without defining a computational graph and only by employing some built-in _tensorflow_ functions. \n\n* Third part explains how to define a model for reading your data from created binary file and batch it in a random manner, which is necessary during training.",
"_____no_output_____"
],
[
"### Getting raw data bytes in numpy",
"_____no_output_____"
],
[
"Here we demonstrate how you can get raw data bytes of an image (any ndarray)\nand how to restore the image back. \nOne important note is that **during this operation\nthe information about the dimensions of the image is lost and we have to \nuse it to recover the original image. This is one of the reasons why\nwe will have to store the raw image representation along with the dimensions\nof the original image.**",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncat_img = plt.imread('data/imgs/cat.jpg')\nplt.imshow(cat_img)\n# io.imshow(cat_img)",
"_____no_output_____"
],
[
"# Let's convert the picture into string representation\n# using the ndarray.tostring() function \ncat_string = cat_img.tostring()\n\n# Now let's convert the string back to the image\n# Important: the dtype should be specified\n# otherwise the reconstruction will be errorness\n# Reconstruction is 1d, so we need sizes of image\n# to fully reconstruct it.\nreconstructed_cat_1d = np.fromstring(cat_string, dtype=np.uint8)\n\n# Here we reshape the 1d representation\n# This is the why we need to store the sizes of image\n# along with its serialized representation.\nreconstructed_cat_img = reconstructed_cat_1d.reshape(cat_img.shape)\n\n# Let's check if we got everything right and compare\n# reconstructed array to the original one.\nnp.allclose(cat_img, reconstructed_cat_img)",
"_____no_output_____"
]
],
[
[
"### Creating a _.tfrecord_ file and reading it without defining a graph",
"_____no_output_____"
],
[
"Here we show how to write a small dataset (three images/annotations from _PASCAL VOC_) to\n_.tfrrecord_ file and read it without defining a computational graph.\n\nWe also make sure that images that we read back from _.tfrecord_ file are equal to\nthe original images. Pay attention that we also write the sizes of the images along with\nthe image in the raw format. We showed an example on why we need to also store the size\nin the previous section.",
"_____no_output_____"
]
],
[
[
"# Get some image/annotation pairs for example \nfilename_pairs = [\n('data/VOC2012/JPEGImages/2007_000032.jpg',\n'data/VOC2012/SegmentationClass/2007_000032.png'),\n('data/VOC2012/JPEGImages/2007_000039.jpg',\n'data/VOC2012/SegmentationClass/2007_000039.png'),\n('data/VOC2012/JPEGImages/2007_000033.jpg',\n'data/VOC2012/SegmentationClass/2007_000033.png')\n ]",
"_____no_output_____"
],
[
"%matplotlib inline\n\n# Important: We are using PIL to read .png files later.\n# This was done on purpose to read indexed png files\n# in a special way -- only indexes and not map the indexes\n# to actual rgb values. This is specific to PASCAL VOC\n# dataset data. If you don't want thit type of behaviour\n# consider using skimage.io.imread()\nfrom PIL import Image\nimport numpy as np\nimport skimage.io as io\nimport tensorflow as tf\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ntfrecords_filename = 'pascal_voc_segmentation.tfrecords'\n\nwriter = tf.python_io.TFRecordWriter(tfrecords_filename)\n\n# Let's collect the real images to later on compare\n# to the reconstructed ones\noriginal_images = []\n\nfor img_path, annotation_path in filename_pairs:\n \n img = np.array(Image.open(img_path))\n annotation = np.array(Image.open(annotation_path))\n \n # The reason to store image sizes was demonstrated\n # in the previous example -- we have to know sizes\n # of images to later read raw serialized string,\n # convert to 1d array and convert to respective\n # shape that image used to have.\n height = img.shape[0]\n width = img.shape[1]\n \n # Put in the original images into array\n # Just for future check for correctness\n original_images.append((img, annotation))\n \n img_raw = img.tostring()\n annotation_raw = annotation.tostring()\n \n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'image_raw': _bytes_feature(img_raw),\n 'mask_raw': _bytes_feature(annotation_raw)}))\n \n writer.write(example.SerializeToString())\n\nwriter.close()",
"_____no_output_____"
],
[
"reconstructed_images = []\n\nrecord_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)\n\nfor string_record in record_iterator:\n \n example = tf.train.Example()\n example.ParseFromString(string_record)\n \n height = int(example.features.feature['height']\n .int64_list\n .value[0])\n \n width = int(example.features.feature['width']\n .int64_list\n .value[0])\n \n img_string = (example.features.feature['image_raw']\n .bytes_list\n .value[0])\n \n annotation_string = (example.features.feature['mask_raw']\n .bytes_list\n .value[0])\n \n img_1d = np.fromstring(img_string, dtype=np.uint8)\n reconstructed_img = img_1d.reshape((height, width, -1))\n \n annotation_1d = np.fromstring(annotation_string, dtype=np.uint8)\n \n # Annotations don't have depth (3rd dimension)\n reconstructed_annotation = annotation_1d.reshape((height, width))\n \n reconstructed_images.append((reconstructed_img, reconstructed_annotation))\n ",
"_____no_output_____"
],
[
"# Let's check if the reconstructed images match\n# the original images\n\nfor original_pair, reconstructed_pair in zip(original_images, reconstructed_images):\n \n img_pair_to_compare, annotation_pair_to_compare = zip(original_pair,\n reconstructed_pair)\n print(np.allclose(*img_pair_to_compare))\n print(np.allclose(*annotation_pair_to_compare))\n",
"True\nTrue\nTrue\nTrue\nTrue\nTrue\n"
]
],
[
[
"### Defining the graph to read and batch images from _.tfrecords_",
"_____no_output_____"
],
[
"Here we define a graph to read and batch images from the file that we have created\npreviously. It is very important to randomly shuffle images during training and depending\non the application we have to use different batch size.\n\nIt is very important to point out that if we use batching -- we have to define\nthe sizes of images beforehand. This may sound like a limitation, but actually in the\nImage Classification and Image Segmentation fields the training is performed on the images\nof the same size. \n\nThe code provided here is partially based on [this official example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py) and code from [this stackoverflow question](http://stackoverflow.com/questions/35028173/how-to-read-images-with-different-size-in-a-tfrecord-file).\nAlso if you want to know how you can control the batching according to your need read [these docs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.train.shuffle_batch.md)\n.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport tensorflow as tf\nimport skimage.io as io\n\nIMAGE_HEIGHT = 384\nIMAGE_WIDTH = 384\n\ntfrecords_filename = 'pascal_voc_segmentation.tfrecords'\n\ndef read_and_decode(filename_queue):\n \n reader = tf.TFRecordReader()\n\n _, serialized_example = reader.read(filename_queue)\n\n features = tf.parse_single_example(\n serialized_example,\n # Defaults are not specified since both keys are required.\n features={\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'mask_raw': tf.FixedLenFeature([], tf.string)\n })\n\n # Convert from a scalar string tensor (whose single string has\n # length mnist.IMAGE_PIXELS) to a uint8 tensor with shape\n # [mnist.IMAGE_PIXELS].\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n annotation = tf.decode_raw(features['mask_raw'], tf.uint8)\n \n height = tf.cast(features['height'], tf.int32)\n width = tf.cast(features['width'], tf.int32)\n \n image_shape = tf.stack([height, width, 3])\n annotation_shape = tf.stack([height, width, 1])\n \n image = tf.reshape(image, image_shape)\n annotation = tf.reshape(annotation, annotation_shape)\n \n image_size_const = tf.constant((IMAGE_HEIGHT, IMAGE_WIDTH, 3), dtype=tf.int32)\n annotation_size_const = tf.constant((IMAGE_HEIGHT, IMAGE_WIDTH, 1), dtype=tf.int32)\n \n # Random transformations can be put here: right before you crop images\n # to predefined size. To get more information look at the stackoverflow\n # question linked above.\n \n resized_image = tf.image.resize_image_with_crop_or_pad(image=image,\n target_height=IMAGE_HEIGHT,\n target_width=IMAGE_WIDTH)\n \n resized_annotation = tf.image.resize_image_with_crop_or_pad(image=annotation,\n target_height=IMAGE_HEIGHT,\n target_width=IMAGE_WIDTH)\n \n \n images, annotations = tf.train.shuffle_batch( [resized_image, resized_annotation],\n batch_size=2,\n capacity=30,\n num_threads=2,\n min_after_dequeue=10)\n \n return images, annotations",
"_____no_output_____"
],
[
"filename_queue = tf.train.string_input_producer(\n [tfrecords_filename], num_epochs=10)\n\n# Even when reading in multiple threads, share the filename\n# queue.\nimage, annotation = read_and_decode(filename_queue)\n\n# The op for initializing the variables.\ninit_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\nwith tf.Session() as sess:\n \n sess.run(init_op)\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n \n # Let's read off 3 batches just for example\n for i in range(3):\n \n img, anno = sess.run([image, annotation])\n print(img[0, :, :, :].shape)\n \n print('current batch')\n \n # We selected the batch size of two\n # So we should get two image pairs in each batch\n # Let's make sure it is random\n\n io.imshow(img[0, :, :, :])\n io.show()\n\n io.imshow(anno[0, :, :, 0])\n io.show()\n \n io.imshow(img[1, :, :, :])\n io.show()\n\n io.imshow(anno[1, :, :, 0])\n io.show()\n \n \n coord.request_stop()\n coord.join(threads)\n",
"(384, 384, 3)\ncurrent batch\n"
]
],
[
[
"### Conclusion and Discussion",
"_____no_output_____"
],
[
"In this post we covered how to convert a dataset into _.tfrecord_ format,\nmade sure that we have the same data and saw how to define a graph to\nread and batch files from the created file.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a9c378fb57a0fbcf6b8019780a347cca5cc5360
| 334,381 |
ipynb
|
Jupyter Notebook
|
experiments/tl_3v2.wisig+cores/oracle.run1.framed-cores_wisig/trials/12/trial.ipynb
|
stevester94/csc500-notebooks
|
4c1b04c537fe233a75bed82913d9d84985a89177
|
[
"MIT"
] | null | null | null |
experiments/tl_3v2.wisig+cores/oracle.run1.framed-cores_wisig/trials/12/trial.ipynb
|
stevester94/csc500-notebooks
|
4c1b04c537fe233a75bed82913d9d84985a89177
|
[
"MIT"
] | null | null | null |
experiments/tl_3v2.wisig+cores/oracle.run1.framed-cores_wisig/trials/12/trial.ipynb
|
stevester94/csc500-notebooks
|
4c1b04c537fe233a75bed82913d9d84985a89177
|
[
"MIT"
] | null | null | null | 130.413807 | 120,274 | 0.61208 |
[
[
[
"# Transfer Learning Template",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n\n \nimport os, json, sys, time, random\nimport numpy as np\nimport torch\nfrom torch.optim import Adam\nfrom easydict import EasyDict\nimport matplotlib.pyplot as plt\n\nfrom steves_models.steves_ptn import Steves_Prototypical_Network\n\nfrom steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper\nfrom steves_utils.iterable_aggregator import Iterable_Aggregator\nfrom steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig\nfrom steves_utils.torch_sequential_builder import build_sequential\nfrom steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader\nfrom steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)\nfrom steves_utils.PTN.utils import independent_accuracy_assesment\n\nfrom torch.utils.data import DataLoader\n\nfrom steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory\n\nfrom steves_utils.ptn_do_report import (\n get_loss_curve,\n get_results_table,\n get_parameters_table,\n get_domain_accuracies,\n)\n\nfrom steves_utils.transforms import get_chained_transform",
"_____no_output_____"
]
],
[
[
"# Allowed Parameters\nThese are allowed parameters, not defaults\nEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)\n\nPapermill uses the cell tag \"parameters\" to inject the real parameters below this cell.\nEnable tags to see what I mean",
"_____no_output_____"
]
],
[
[
"required_parameters = {\n \"experiment_name\",\n \"lr\",\n \"device\",\n \"seed\",\n \"dataset_seed\",\n \"n_shot\",\n \"n_query\",\n \"n_way\",\n \"train_k_factor\",\n \"val_k_factor\",\n \"test_k_factor\",\n \"n_epoch\",\n \"patience\",\n \"criteria_for_best\",\n \"x_net\",\n \"datasets\",\n \"torch_default_dtype\",\n \"NUM_LOGS_PER_EPOCH\",\n \"BEST_MODEL_PATH\",\n \"x_shape\",\n}",
"_____no_output_____"
],
[
"from steves_utils.CORES.utils import (\n ALL_NODES,\n ALL_NODES_MINIMUM_1000_EXAMPLES,\n ALL_DAYS\n)\n\nfrom steves_utils.ORACLE.utils_v2 import (\n ALL_DISTANCES_FEET_NARROWED,\n ALL_RUNS,\n ALL_SERIAL_NUMBERS,\n)\n\nstandalone_parameters = {}\nstandalone_parameters[\"experiment_name\"] = \"STANDALONE PTN\"\nstandalone_parameters[\"lr\"] = 0.001\nstandalone_parameters[\"device\"] = \"cuda\"\n\nstandalone_parameters[\"seed\"] = 1337\nstandalone_parameters[\"dataset_seed\"] = 1337\n\nstandalone_parameters[\"n_way\"] = 8\nstandalone_parameters[\"n_shot\"] = 3\nstandalone_parameters[\"n_query\"] = 2\nstandalone_parameters[\"train_k_factor\"] = 1\nstandalone_parameters[\"val_k_factor\"] = 2\nstandalone_parameters[\"test_k_factor\"] = 2\n\n\nstandalone_parameters[\"n_epoch\"] = 50\n\nstandalone_parameters[\"patience\"] = 10\nstandalone_parameters[\"criteria_for_best\"] = \"source_loss\"\n\nstandalone_parameters[\"datasets\"] = [\n {\n \"labels\": ALL_SERIAL_NUMBERS,\n \"domains\": ALL_DISTANCES_FEET_NARROWED,\n \"num_examples_per_domain_per_label\": 100,\n \"pickle_path\": os.path.join(get_datasets_base_path(), \"oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl\"),\n \"source_or_target_dataset\": \"source\",\n \"x_transforms\": [\"unit_mag\", \"minus_two\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"ORACLE_\"\n },\n {\n \"labels\": ALL_NODES,\n \"domains\": ALL_DAYS,\n \"num_examples_per_domain_per_label\": 100,\n \"pickle_path\": os.path.join(get_datasets_base_path(), \"cores.stratified_ds.2022A.pkl\"),\n \"source_or_target_dataset\": \"target\",\n \"x_transforms\": [\"unit_power\", \"times_zero\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"CORES_\"\n } \n]\n\nstandalone_parameters[\"torch_default_dtype\"] = \"torch.float32\" \n\n\n\nstandalone_parameters[\"x_net\"] = [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\":[-1, 1, 2, 256]}},\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":1, \"out_channels\":256, \"kernel_size\":(1,7), \"bias\":False, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":256, \"out_channels\":80, \"kernel_size\":(2,7), \"bias\":True, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 80*256, \"out_features\": 256}}, # 80 units per IQ pair\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": 256}},\n]\n\n# Parameters relevant to results\n# These parameters will basically never need to change\nstandalone_parameters[\"NUM_LOGS_PER_EPOCH\"] = 10\nstandalone_parameters[\"BEST_MODEL_PATH\"] = \"./best_model.pth\"\n\n\n\n\n",
"_____no_output_____"
],
[
"# Parameters\nparameters = {\n \"experiment_name\": \"tl_3Av2:oracle.run1.framed -> cores+wisig\",\n \"device\": \"cuda\",\n \"lr\": 0.0001,\n \"x_shape\": [2, 200],\n \"n_shot\": 3,\n \"n_query\": 2,\n \"train_k_factor\": 3,\n \"val_k_factor\": 2,\n \"test_k_factor\": 2,\n \"torch_default_dtype\": \"torch.float32\",\n \"n_epoch\": 50,\n \"patience\": 3,\n \"criteria_for_best\": \"target_accuracy\",\n \"x_net\": [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\": [-1, 1, 2, 200]}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 1,\n \"out_channels\": 256,\n \"kernel_size\": [1, 7],\n \"bias\": False,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 256}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 256,\n \"out_channels\": 80,\n \"kernel_size\": [2, 7],\n \"bias\": True,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 16000, \"out_features\": 256}},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\": 256}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": 256}},\n ],\n \"NUM_LOGS_PER_EPOCH\": 10,\n \"BEST_MODEL_PATH\": \"./best_model.pth\",\n \"n_way\": 16,\n \"datasets\": [\n {\n \"labels\": [\n \"1-10.\",\n \"1-11.\",\n \"1-15.\",\n \"1-16.\",\n \"1-17.\",\n \"1-18.\",\n \"1-19.\",\n \"10-4.\",\n \"10-7.\",\n \"11-1.\",\n \"11-14.\",\n \"11-17.\",\n \"11-20.\",\n \"11-7.\",\n \"13-20.\",\n \"13-8.\",\n \"14-10.\",\n \"14-11.\",\n \"14-14.\",\n \"14-7.\",\n \"15-1.\",\n \"15-20.\",\n \"16-1.\",\n \"16-16.\",\n \"17-10.\",\n \"17-11.\",\n \"17-2.\",\n \"19-1.\",\n \"19-16.\",\n \"19-19.\",\n \"19-20.\",\n \"19-3.\",\n \"2-10.\",\n \"2-11.\",\n \"2-17.\",\n \"2-18.\",\n \"2-20.\",\n \"2-3.\",\n \"2-4.\",\n \"2-5.\",\n \"2-6.\",\n \"2-7.\",\n \"2-8.\",\n \"3-13.\",\n \"3-18.\",\n \"3-3.\",\n \"4-1.\",\n \"4-10.\",\n \"4-11.\",\n \"4-19.\",\n \"5-5.\",\n \"6-15.\",\n \"7-10.\",\n \"7-14.\",\n \"8-18.\",\n \"8-20.\",\n \"8-3.\",\n \"8-8.\",\n ],\n \"domains\": [1, 2, 3, 4, 5],\n \"num_examples_per_domain_per_label\": -1,\n \"pickle_path\": \"/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl\",\n \"source_or_target_dataset\": \"target\",\n \"x_transforms\": [\"unit_power\", \"take_200\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"C_\",\n },\n {\n \"labels\": [\n \"1-10\",\n \"1-12\",\n \"1-14\",\n \"1-16\",\n \"1-18\",\n \"1-19\",\n \"1-8\",\n \"10-11\",\n \"10-17\",\n \"10-4\",\n \"10-7\",\n \"11-1\",\n \"11-10\",\n \"11-19\",\n \"11-20\",\n \"11-4\",\n \"11-7\",\n \"12-19\",\n \"12-20\",\n \"12-7\",\n \"13-14\",\n \"13-18\",\n \"13-19\",\n \"13-20\",\n \"13-3\",\n \"13-7\",\n \"14-10\",\n \"14-11\",\n \"14-12\",\n \"14-13\",\n \"14-14\",\n \"14-19\",\n \"14-20\",\n \"14-7\",\n \"14-8\",\n \"14-9\",\n \"15-1\",\n \"15-19\",\n \"15-6\",\n \"16-1\",\n \"16-16\",\n \"16-19\",\n \"16-20\",\n \"17-10\",\n \"17-11\",\n \"18-1\",\n \"18-10\",\n \"18-11\",\n \"18-12\",\n \"18-13\",\n \"18-14\",\n \"18-15\",\n \"18-16\",\n \"18-17\",\n \"18-19\",\n \"18-2\",\n \"18-20\",\n \"18-4\",\n \"18-5\",\n \"18-7\",\n \"18-8\",\n \"18-9\",\n \"19-1\",\n \"19-10\",\n \"19-11\",\n \"19-12\",\n \"19-13\",\n \"19-14\",\n \"19-15\",\n \"19-19\",\n \"19-2\",\n \"19-20\",\n \"19-3\",\n \"19-4\",\n \"19-6\",\n \"19-7\",\n \"19-8\",\n \"19-9\",\n \"2-1\",\n \"2-13\",\n \"2-15\",\n \"2-3\",\n \"2-4\",\n \"2-5\",\n \"2-6\",\n \"2-7\",\n \"2-8\",\n \"20-1\",\n \"20-12\",\n \"20-14\",\n \"20-15\",\n \"20-16\",\n \"20-18\",\n \"20-19\",\n \"20-20\",\n \"20-3\",\n \"20-4\",\n \"20-5\",\n \"20-7\",\n \"20-8\",\n \"3-1\",\n \"3-13\",\n \"3-18\",\n \"3-2\",\n \"3-8\",\n \"4-1\",\n \"4-10\",\n \"4-11\",\n \"5-1\",\n \"5-5\",\n \"6-1\",\n \"6-15\",\n \"6-6\",\n \"7-10\",\n \"7-11\",\n \"7-12\",\n \"7-13\",\n \"7-14\",\n \"7-7\",\n \"7-8\",\n \"7-9\",\n \"8-1\",\n \"8-13\",\n \"8-14\",\n \"8-18\",\n \"8-20\",\n \"8-3\",\n \"8-8\",\n \"9-1\",\n \"9-7\",\n ],\n \"domains\": [1, 2, 3, 4],\n \"num_examples_per_domain_per_label\": -1,\n \"pickle_path\": \"/mnt/wd500GB/CSC500/csc500-main/datasets/wisig.node3-19.stratified_ds.2022A.pkl\",\n \"source_or_target_dataset\": \"target\",\n \"x_transforms\": [\"unit_power\", \"take_200\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"W_\",\n },\n {\n \"labels\": [\n \"3123D52\",\n \"3123D65\",\n \"3123D79\",\n \"3123D80\",\n \"3123D54\",\n \"3123D70\",\n \"3123D7B\",\n \"3123D89\",\n \"3123D58\",\n \"3123D76\",\n \"3123D7D\",\n \"3123EFE\",\n \"3123D64\",\n \"3123D78\",\n \"3123D7E\",\n \"3124E4A\",\n ],\n \"domains\": [32, 38, 8, 44, 14, 50, 20, 26],\n \"num_examples_per_domain_per_label\": 2000,\n \"pickle_path\": \"/mnt/wd500GB/CSC500/csc500-main/datasets/oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl\",\n \"source_or_target_dataset\": \"source\",\n \"x_transforms\": [\"unit_power\", \"take_200\", \"resample_20Msps_to_25Msps\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"O_\",\n },\n ],\n \"seed\": 500,\n \"dataset_seed\": 500,\n}\n",
"_____no_output_____"
],
[
"# Set this to True if you want to run this template directly\nSTANDALONE = False\nif STANDALONE:\n print(\"parameters not injected, running with standalone_parameters\")\n parameters = standalone_parameters\n\nif not 'parameters' in locals() and not 'parameters' in globals():\n raise Exception(\"Parameter injection failed\")\n\n#Use an easy dict for all the parameters\np = EasyDict(parameters)\n\nif \"x_shape\" not in p:\n p.x_shape = [2,256] # Default to this if we dont supply x_shape\n\n\nsupplied_keys = set(p.keys())\n\nif supplied_keys != required_parameters:\n print(\"Parameters are incorrect\")\n if len(supplied_keys - required_parameters)>0: print(\"Shouldn't have:\", str(supplied_keys - required_parameters))\n if len(required_parameters - supplied_keys)>0: print(\"Need to have:\", str(required_parameters - supplied_keys))\n raise RuntimeError(\"Parameters are incorrect\")",
"_____no_output_____"
],
[
"###################################\n# Set the RNGs and make it all deterministic\n###################################\nnp.random.seed(p.seed)\nrandom.seed(p.seed)\ntorch.manual_seed(p.seed)\n\ntorch.use_deterministic_algorithms(True) ",
"_____no_output_____"
],
[
"###########################################\n# The stratified datasets honor this\n###########################################\ntorch.set_default_dtype(eval(p.torch_default_dtype))",
"_____no_output_____"
],
[
"###################################\n# Build the network(s)\n# Note: It's critical to do this AFTER setting the RNG\n###################################\nx_net = build_sequential(p.x_net)",
"_____no_output_____"
],
[
"start_time_secs = time.time()",
"_____no_output_____"
],
[
"p.domains_source = []\np.domains_target = []\n\n\ntrain_original_source = []\nval_original_source = []\ntest_original_source = []\n\ntrain_original_target = []\nval_original_target = []\ntest_original_target = []",
"_____no_output_____"
],
[
"# global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), \"unit_power\") # unit_power, unit_mag\n# global_x_transform_func = lambda x: normalize(x, \"unit_power\") # unit_power, unit_mag",
"_____no_output_____"
],
[
"def add_dataset(\n labels,\n domains,\n pickle_path,\n x_transforms,\n episode_transforms,\n domain_prefix,\n num_examples_per_domain_per_label,\n source_or_target_dataset:str,\n iterator_seed=p.seed,\n dataset_seed=p.dataset_seed,\n n_shot=p.n_shot,\n n_way=p.n_way,\n n_query=p.n_query,\n train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),\n):\n \n if x_transforms == []: x_transform = None\n else: x_transform = get_chained_transform(x_transforms)\n \n if episode_transforms == []: episode_transform = None\n else: raise Exception(\"episode_transforms not implemented\")\n \n episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1])\n\n\n eaf = Episodic_Accessor_Factory(\n labels=labels,\n domains=domains,\n num_examples_per_domain_per_label=num_examples_per_domain_per_label,\n iterator_seed=iterator_seed,\n dataset_seed=dataset_seed,\n n_shot=n_shot,\n n_way=n_way,\n n_query=n_query,\n train_val_test_k_factors=train_val_test_k_factors,\n pickle_path=pickle_path,\n x_transform_func=x_transform,\n )\n\n train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test()\n train = Lazy_Iterable_Wrapper(train, episode_transform)\n val = Lazy_Iterable_Wrapper(val, episode_transform)\n test = Lazy_Iterable_Wrapper(test, episode_transform)\n\n if source_or_target_dataset==\"source\":\n train_original_source.append(train)\n val_original_source.append(val)\n test_original_source.append(test)\n\n p.domains_source.extend(\n [domain_prefix + str(u) for u in domains]\n )\n elif source_or_target_dataset==\"target\":\n train_original_target.append(train)\n val_original_target.append(val)\n test_original_target.append(test)\n p.domains_target.extend(\n [domain_prefix + str(u) for u in domains]\n )\n else:\n raise Exception(f\"invalid source_or_target_dataset: {source_or_target_dataset}\")\n ",
"_____no_output_____"
],
[
"for ds in p.datasets:\n add_dataset(**ds)",
"_____no_output_____"
],
[
"# from steves_utils.CORES.utils import (\n# ALL_NODES,\n# ALL_NODES_MINIMUM_1000_EXAMPLES,\n# ALL_DAYS\n# )\n\n# add_dataset(\n# labels=ALL_NODES,\n# domains = ALL_DAYS,\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"cores.stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"target\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"cores_{u}\"\n# )",
"_____no_output_____"
],
[
"# from steves_utils.ORACLE.utils_v2 import (\n# ALL_DISTANCES_FEET,\n# ALL_RUNS,\n# ALL_SERIAL_NUMBERS,\n# )\n\n\n# add_dataset(\n# labels=ALL_SERIAL_NUMBERS,\n# domains = list(set(ALL_DISTANCES_FEET) - {2,62}),\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"source\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"oracle1_{u}\"\n# )\n",
"_____no_output_____"
],
[
"# from steves_utils.ORACLE.utils_v2 import (\n# ALL_DISTANCES_FEET,\n# ALL_RUNS,\n# ALL_SERIAL_NUMBERS,\n# )\n\n\n# add_dataset(\n# labels=ALL_SERIAL_NUMBERS,\n# domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}),\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"source\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"oracle2_{u}\"\n# )",
"_____no_output_____"
],
[
"# add_dataset(\n# labels=list(range(19)),\n# domains = [0,1,2],\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"metehan.stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"target\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"met_{u}\"\n# )",
"_____no_output_____"
],
[
"# # from steves_utils.wisig.utils import (\n# # ALL_NODES_MINIMUM_100_EXAMPLES,\n# # ALL_NODES_MINIMUM_500_EXAMPLES,\n# # ALL_NODES_MINIMUM_1000_EXAMPLES,\n# # ALL_DAYS\n# # )\n\n# import steves_utils.wisig.utils as wisig\n\n\n# add_dataset(\n# labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES,\n# domains = wisig.ALL_DAYS,\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"wisig.node3-19.stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"target\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"wisig_{u}\"\n# )",
"_____no_output_____"
],
[
"###################################\n# Build the dataset\n###################################\ntrain_original_source = Iterable_Aggregator(train_original_source, p.seed)\nval_original_source = Iterable_Aggregator(val_original_source, p.seed)\ntest_original_source = Iterable_Aggregator(test_original_source, p.seed)\n\n\ntrain_original_target = Iterable_Aggregator(train_original_target, p.seed)\nval_original_target = Iterable_Aggregator(val_original_target, p.seed)\ntest_original_target = Iterable_Aggregator(test_original_target, p.seed)\n\n# For CNN We only use X and Y. And we only train on the source.\n# Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader\n\ntransform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only\n\ntrain_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)\nval_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)\ntest_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)\n\ntrain_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)\nval_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)\ntest_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)\n\ndatasets = EasyDict({\n \"source\": {\n \"original\": {\"train\":train_original_source, \"val\":val_original_source, \"test\":test_original_source},\n \"processed\": {\"train\":train_processed_source, \"val\":val_processed_source, \"test\":test_processed_source}\n },\n \"target\": {\n \"original\": {\"train\":train_original_target, \"val\":val_original_target, \"test\":test_original_target},\n \"processed\": {\"train\":train_processed_target, \"val\":val_processed_target, \"test\":test_processed_target}\n },\n})",
"_____no_output_____"
],
[
"from steves_utils.transforms import get_average_magnitude, get_average_power\n\nprint(set([u for u,_ in val_original_source]))\nprint(set([u for u,_ in val_original_target]))\n\ns_x, s_y, q_x, q_y, _ = next(iter(train_processed_source))\nprint(s_x)\n\n# for ds in [\n# train_processed_source,\n# val_processed_source,\n# test_processed_source,\n# train_processed_target,\n# val_processed_target,\n# test_processed_target\n# ]:\n# for s_x, s_y, q_x, q_y, _ in ds:\n# for X in (s_x, q_x):\n# for x in X:\n# assert np.isclose(get_average_magnitude(x.numpy()), 1.0)\n# assert np.isclose(get_average_power(x.numpy()), 1.0)\n ",
"{'O_32', 'O_38', 'O_20', 'O_50', 'O_26', 'O_8', 'O_44', 'O_14'}\n"
],
[
"###################################\n# Build the model\n###################################\n# easfsl only wants a tuple for the shape\nmodel = Steves_Prototypical_Network(x_net, device=p.device, x_shape=tuple(p.x_shape))\noptimizer = Adam(params=model.parameters(), lr=p.lr)",
"(2, 200)\n"
],
[
"###################################\n# train\n###################################\njig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)\n\njig.train(\n train_iterable=datasets.source.processed.train,\n source_val_iterable=datasets.source.processed.val,\n target_val_iterable=datasets.target.processed.val,\n num_epochs=p.n_epoch,\n num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,\n patience=p.patience,\n optimizer=optimizer,\n criteria_for_best=p.criteria_for_best,\n)",
"epoch: 1, [batch: 1 / 6720], examples_per_second: 48.3384, train_label_loss: 2.9340, \n"
],
[
"total_experiment_time_secs = time.time() - start_time_secs",
"_____no_output_____"
],
[
"###################################\n# Evaluate the model\n###################################\nsource_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)\ntarget_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)\n\nsource_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)\ntarget_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)\n\nhistory = jig.get_history()\n\ntotal_epochs_trained = len(history[\"epoch_indices\"])\n\nval_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))\n\nconfusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)\nper_domain_accuracy = per_domain_accuracy_from_confusion(confusion)\n\n# Add a key to per_domain_accuracy for if it was a source domain\nfor domain, accuracy in per_domain_accuracy.items():\n per_domain_accuracy[domain] = {\n \"accuracy\": accuracy,\n \"source?\": domain in p.domains_source\n }\n\n# Do an independent accuracy assesment JUST TO BE SURE!\n# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)\n# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)\n# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)\n# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)\n\n# assert(_source_test_label_accuracy == source_test_label_accuracy)\n# assert(_target_test_label_accuracy == target_test_label_accuracy)\n# assert(_source_val_label_accuracy == source_val_label_accuracy)\n# assert(_target_val_label_accuracy == target_val_label_accuracy)\n\nexperiment = {\n \"experiment_name\": p.experiment_name,\n \"parameters\": dict(p),\n \"results\": {\n \"source_test_label_accuracy\": source_test_label_accuracy,\n \"source_test_label_loss\": source_test_label_loss,\n \"target_test_label_accuracy\": target_test_label_accuracy,\n \"target_test_label_loss\": target_test_label_loss,\n \"source_val_label_accuracy\": source_val_label_accuracy,\n \"source_val_label_loss\": source_val_label_loss,\n \"target_val_label_accuracy\": target_val_label_accuracy,\n \"target_val_label_loss\": target_val_label_loss,\n \"total_epochs_trained\": total_epochs_trained,\n \"total_experiment_time_secs\": total_experiment_time_secs,\n \"confusion\": confusion,\n \"per_domain_accuracy\": per_domain_accuracy,\n },\n \"history\": history,\n \"dataset_metrics\": get_dataset_metrics(datasets, \"ptn\"),\n}",
"_____no_output_____"
],
[
"ax = get_loss_curve(experiment)\nplt.show()",
"_____no_output_____"
],
[
"get_results_table(experiment)",
"_____no_output_____"
],
[
"get_domain_accuracies(experiment)",
"_____no_output_____"
],
[
"print(\"Source Test Label Accuracy:\", experiment[\"results\"][\"source_test_label_accuracy\"], \"Target Test Label Accuracy:\", experiment[\"results\"][\"target_test_label_accuracy\"])\nprint(\"Source Val Label Accuracy:\", experiment[\"results\"][\"source_val_label_accuracy\"], \"Target Val Label Accuracy:\", experiment[\"results\"][\"target_val_label_accuracy\"])",
"Source Test Label Accuracy: 0.7753255208333333 Target Test Label Accuracy: 0.6873653742595585\nSource Val Label Accuracy: 0.7740234375 Target Val Label Accuracy: 0.6909969829950631\n"
],
[
"json.dumps(experiment)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c4854555d7e5ec66c645ffca82f5ce653cee2
| 148,452 |
ipynb
|
Jupyter Notebook
|
Progetto Ottimizzazione.ipynb
|
marcoguzzonato/Optimization
|
539e5476788fb7f8b24d13f2b2ec2c9b63835236
|
[
"MIT"
] | null | null | null |
Progetto Ottimizzazione.ipynb
|
marcoguzzonato/Optimization
|
539e5476788fb7f8b24d13f2b2ec2c9b63835236
|
[
"MIT"
] | null | null | null |
Progetto Ottimizzazione.ipynb
|
marcoguzzonato/Optimization
|
539e5476788fb7f8b24d13f2b2ec2c9b63835236
|
[
"MIT"
] | null | null | null | 186.030075 | 30,639 | 0.685723 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a9c4c5812cba32bb34f18d8df9b9b718df23664
| 72,310 |
ipynb
|
Jupyter Notebook
|
examples/ViscosityModel.ipynb
|
jwsiegel2510/pycalphad
|
2873edff1912af44b1ea5a416591a7c41b7e7228
|
[
"MIT"
] | 1 |
2020-05-24T10:50:58.000Z
|
2020-05-24T10:50:58.000Z
|
examples/ViscosityModel.ipynb
|
BoKimi/pycalphad
|
023f95713daa2e7054924b25225531f9054f9f80
|
[
"MIT"
] | null | null | null |
examples/ViscosityModel.ipynb
|
BoKimi/pycalphad
|
023f95713daa2e7054924b25225531f9054f9f80
|
[
"MIT"
] | null | null | null | 178.103448 | 13,924 | 0.767847 |
[
[
[
"# Custom Models in pycalphad: Viscosity",
"_____no_output_____"
],
[
"## Viscosity Model Background\n\nWe are going to take a CALPHAD-based property model from the literature and use it to predict the viscosity of Al-Cu-Zr liquids.\n\nFor a binary alloy liquid under small undercooling, Gąsior suggested an entropy model of the form\n$$\\eta = (\\sum_i x_i \\eta_i ) (1 - 2\\frac{S_{ex}}{R})$$\n\nwhere $\\eta_i$ is the viscosity of the element $i$, $x_i$ is the mole fraction, $S_{ex}$ is the excess entropy, and $R$ is the gas constant.\n\nFor more details on this model, see \n\n1. M.E. Trybula, T. Gancarz, W. Gąsior, *Density, surface tension and viscosity of liquid binary Al-Zn and ternary Al-Li-Zn alloys*, Fluid Phase Equilibria 421 (2016) 39-48, [doi:10.1016/j.fluid.2016.03.013](http://dx.doi.org/10.1016/j.fluid.2016.03.013).\n\n2. Władysław Gąsior, *Viscosity modeling of binary alloys: Comparative studies*, Calphad 44 (2014) 119-128, [doi:10.1016/j.calphad.2013.10.007](http://dx.doi.org/10.1016/j.calphad.2013.10.007).\n\n3. Chenyang Zhou, Cuiping Guo, Changrong Li, Zhenmin Du, *Thermodynamic assessment of the phase equilibria and prediction of glass-forming ability of the Al–Cu–Zr system*, Journal of Non-Crystalline Solids 461 (2017) 47-60, [doi:10.1016/j.jnoncrysol.2016.09.031](https://doi.org/10.1016/j.jnoncrysol.2016.09.031).",
"_____no_output_____"
]
],
[
[
"from pycalphad import Database",
"_____no_output_____"
]
],
[
[
"## TDB Parameters\nWe can calculate the excess entropy of the liquid using the Al-Cu-Zr thermodynamic database from Zhou et al.\n\nWe add three new parameters to describe the viscosity (in Pa-s) of the pure elements Al, Cu, and Zr:\n```\n $ Viscosity test parameters\n PARAMETER ETA(LIQUID,AL;0) 2.98150E+02 +0.000281*EXP(12300/(8.3145*T)); 6.00000E+03 \n N REF:0 !\n PARAMETER ETA(LIQUID,CU;0) 2.98150E+02 +0.000657*EXP(21500/(8.3145*T)); 6.00000E+03 \n N REF:0 !\n PARAMETER ETA(LIQUID,ZR;0) 2.98150E+02 +4.74E-3 - 4.97E-6*(T-2128) ; 6.00000E+03 \n N REF:0 !\n```\n\nGreat! However, if we try to load the database now, we will get an error. This is because `ETA` parameters are not supported by default in pycalphad, so we need to tell pycalphad's TDB parser that \"ETA\" should be on the list of supported parameter types.",
"_____no_output_____"
]
],
[
[
"dbf = Database('alcuzr-viscosity.tdb')",
"Failed while parsing: PARAMETER ETA(LIQUID,AL;0) 2.98150E+02 +0.000281*EXP(12300/(8.3145*T)); 6.00000E+03 N REF:0 \nTokens: None\n"
]
],
[
[
"### Adding the `ETA` parameter to the TDB parser ",
"_____no_output_____"
]
],
[
[
"import pycalphad.io.tdb_keywords\npycalphad.io.tdb_keywords.TDB_PARAM_TYPES.append('ETA')",
"_____no_output_____"
]
],
[
[
"Now the database will load:",
"_____no_output_____"
]
],
[
[
"dbf = Database('alcuzr-viscosity.tdb')",
"_____no_output_____"
]
],
[
[
"## Writing the Custom Viscosity Model\n\nNow that we have our `ETA` parameters in the database, we need to write a `Model` class to tell pycalphad how to compute viscosity. All custom models are subclasses of the pycalphad `Model` class.\n\nWhen the `ViscosityModel` is constructed, the `build_phase` method is run and we need to construct the viscosity model after doing all the other initialization using a new method `build_viscosity`. The implementation of `build_viscosity` needs to do four things:\n1. Query the Database for all the `ETA` parameters\n2. Compute their weighted sum\n3. Compute the excess entropy of the liquid\n4. Plug all the values into the Gąsior equation and return the result\n\nSince the `build_phase` method sets the attribute `viscosity` to the `ViscosityModel`, we can access the property using `viscosity` as the output in pycalphad caluclations.",
"_____no_output_____"
]
],
[
[
"from tinydb import where\nimport sympy\nfrom pycalphad import Model, variables as v\n\nclass ViscosityModel(Model):\n def build_phase(self, dbe):\n super(ViscosityModel, self).build_phase(dbe)\n self.viscosity = self.build_viscosity(dbe)\n\n def build_viscosity(self, dbe):\n if self.phase_name != 'LIQUID':\n raise ValueError('Viscosity is only defined for LIQUID phase')\n phase = dbe.phases[self.phase_name]\n param_search = dbe.search\n # STEP 1\n eta_param_query = (\n (where('phase_name') == phase.name) & \\\n (where('parameter_type') == 'ETA') & \\\n (where('constituent_array').test(self._array_validity))\n )\n # STEP 2\n eta = self.redlich_kister_sum(phase, param_search, eta_param_query)\n # STEP 3\n excess_energy = self.GM - self.models['ref'] - self.models['idmix']\n #liquid_mod = Model(dbe, self.components, self.phase_name)\n ## we only want the excess contributions to the entropy\n #del liquid_mod.models['ref']\n #del liquid_mod.models['idmix']\n excess_entropy = -excess_energy.diff(v.T)\n ks = 2\n # STEP 4\n result = eta * (1 - ks * excess_entropy / v.R)\n self.eta = eta\n return result",
"_____no_output_____"
]
],
[
[
"## Performing Calculations\n\nNow we can create an instance of `ViscosityModel` for the liquid phase using the `Database` object we created earlier. We can verify this model has a `viscosity` attribute containing a symbolic expression for the viscosity.",
"_____no_output_____"
]
],
[
[
"mod = ViscosityModel(dbf, ['CU', 'ZR'], 'LIQUID')\nprint(mod.viscosity)",
"(1 + 0.240543628600637*(LIQUID0CU*LIQUID0ZR*(LIQUID0CU - LIQUID0ZR)**3*Piecewise((105.895 - 13.6488*log(T), (T >= 298.15) & (T < 6000.0)), (0, True)) + LIQUID0CU*LIQUID0ZR*(LIQUID0CU - LIQUID0ZR)**2*Piecewise((36.8512*log(T) - 270.5305, (T >= 298.15) & (T < 6000.0)), (0, True)) + LIQUID0CU*LIQUID0ZR*(LIQUID0CU - LIQUID0ZR)*Piecewise((75.3798 - 9.6125*log(T), (T >= 298.15) & (T < 6000.0)), (0, True)) + LIQUID0CU*LIQUID0ZR*Piecewise((392.8485 - 51.3121*log(T), (T >= 298.15) & (T < 6000.0)), (0, True)))/(1.0*LIQUID0CU + 1.0*LIQUID0ZR))*(LIQUID0CU*Piecewise((0.000657*exp(2585.84400745685/T), (T >= 298.15) & (T < 6000.0)), (0, True)) + LIQUID0ZR*Piecewise((0.01531616 - 4.97e-6*T, (T >= 298.15) & (T < 6000.0)), (0, True)))\n"
]
],
[
[
"Finally we calculate and plot the viscosity.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pycalphad import calculate\n\nmod = ViscosityModel(dbf, ['CU', 'ZR'], 'LIQUID')\n\ntemp = 2100\n# NOTICE: we need to tell pycalphad about our model for this phase\nmodels = {'LIQUID': mod}\nres = calculate(dbf, ['CU', 'ZR'], 'LIQUID', P=101325, T=temp, model=models, output='viscosity') \n\nfig = plt.figure(figsize=(6,6))\nax = fig.gca()\nax.scatter(res.X.sel(component='ZR'), 1000 * res.viscosity.values)\nax.set_xlabel('X(ZR)')\nax.set_ylabel('Viscosity (mPa-s)')\nax.set_xlim((0,1))\nax.set_title('Viscosity at {}K'.format(temp));",
"_____no_output_____"
]
],
[
[
"We repeat the calculation for Al-Cu.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pycalphad import calculate\n\ntemp = 1300\nmodels = {'LIQUID': ViscosityModel} # we can also use Model class\nres = calculate(dbf, ['CU', 'AL'], 'LIQUID', P=101325, T=temp, model=models, output='viscosity')\n\nfig = plt.figure(figsize=(6,6))\nax = fig.gca()\nax.scatter(res.X.sel(component='CU'), 1000 * res.viscosity.values)\nax.set_xlabel('X(CU)')\nax.set_ylabel('Viscosity (mPa-s)')\nax.set_xlim((0,1))\nax.set_title('Viscosity at {}K'.format(temp));",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9c5008226870d4c67c1898891c8989259357bb
| 102,216 |
ipynb
|
Jupyter Notebook
|
Python_paper/Section3_3.ipynb
|
LaGauffre/SMCCompoMo
|
242feab1f1a6f923b682cfb8b033bb9c96317dc3
|
[
"MIT"
] | 1 |
2021-06-18T01:42:08.000Z
|
2021-06-18T01:42:08.000Z
|
Python_paper/Section3_3.ipynb
|
LaGauffre/SMCCompoMo
|
242feab1f1a6f923b682cfb8b033bb9c96317dc3
|
[
"MIT"
] | null | null | null |
Python_paper/Section3_3.ipynb
|
LaGauffre/SMCCompoMo
|
242feab1f1a6f923b682cfb8b033bb9c96317dc3
|
[
"MIT"
] | null | null | null | 173.247458 | 31,704 | 0.874374 |
[
[
[
"# Section 3.3",
"_____no_output_____"
]
],
[
[
"%run preamble.py\ndanish = pd.read_csv(\"../Data/danish.csv\").x.values",
"_____no_output_____"
]
],
[
[
"# MLE of composite models",
"_____no_output_____"
]
],
[
[
"parms, BIC, AIC = mle_composite(danish, (1,1,1), \"gam-par\")\nfit_gam_par = pd.DataFrame(np.append(parms, [AIC, BIC])).T\nfit_gam_par.columns = [\"shape\", \"tail\", \"thres\", \"AIC\",\"BIC\"]\nprint(fit_gam_par)\nparms, BIC, AIC = mle_composite(danish, (1,1,1), \"wei-par\")\nfit_wei_par = pd.DataFrame(np.append(parms, [AIC, BIC])).T\nfit_wei_par.columns = [\"shape\", \"tail\", \"thres\", \"AIC\",\"BIC\"]\nprint(fit_wei_par)\nparms, BIC, AIC = mle_composite(danish, (0.5,1,1), \"lnorm-par\")\nfit_lnorm_par = pd.DataFrame(np.append(parms, [AIC, BIC])).T\nfit_lnorm_par.columns = [\"shape\", \"tail\", \"thres\", \"AIC\",\"BIC\"]\nprint(fit_lnorm_par)",
" shape tail thres AIC BIC\n0 35.667524 1.313008 1.155924 7723.681796 7741.144319\n shape tail thres AIC BIC\n0 14.033784 1.261477 1.002991 7686.75154 7704.214063\n shape tail thres AIC BIC\n0 0.196517 1.328223 1.20743 7737.728264 7755.190787\n"
]
],
[
[
"# Bayesian inference and model comparison using SMC",
"_____no_output_____"
]
],
[
[
"np.random.seed(333)\nmodel_prior, a, b = \"gamma\", 0.1*np.array([1,1,1]), 0.1*np.array([1, 1, 1])\npopSize, verbose, smc_method, paralell, nproc = 1000, True, \"likelihood_anealing\", True, 20\nloss_models = ['lnorm-par', \"wei-par\", \"gam-par\"]\n%time traces_like, res_df_like = fit_composite_models_smc(danish,loss_models, model_prior, a, b, popSize, verbose, smc_method, paralell, nproc)\n",
"Fitting lnorm-par model\nFitting wei-par model\nFitting gam-par model\nCPU times: user 1min 13s, sys: 2.36 s, total: 1min 16s\nWall time: 1min 22s\n"
],
[
"np.random.seed(333)\nmodel_prior, a, b = \"gamma\", np.array([0.1,0.1,0.1]), np.array([0.1, 0.1, 0.1])\npopSize, verbose, smc_method, paralell, nproc = 1000, True, \"data_by_batch\", True, 20\nloss_models = ['lnorm-par', \"wei-par\", \"gam-par\"]\n%time traces_data, res_df_data = fit_composite_models_smc(danish,loss_models, model_prior, a, b, popSize, verbose, smc_method, paralell, nproc)\n",
"Fitting lnorm-par model\nFitting wei-par model\n"
]
],
[
[
"## Fitting the gamma-Pareto model",
"_____no_output_____"
]
],
[
[
"np.random.seed(333)\nfig, axs = plt.subplots(1, 3, figsize=(5, 3.5))\nloss_model = \"gam-par\"\nparms_names = ['shape', 'tail', 'thres' ]\nx_labs = ['Shape', 'Tail', 'Threshold']\nfor k in range(3):\n# positions = np.linspace(min(trace_gibbs_gam_par[parms_names[k]]), max(trace_gibbs_gam_par[parms_names[k]]), 1000)\n# kernel = st.gaussian_kde(trace_gibbs_gam_par[parms_names[k]])\n# axs[k].plot(positions, kernel(positions), lw=3, label = \"Gibbs\", color = \"blue\")\n positions = np.linspace(min(traces_like[loss_model][parms_names[k]].values), \n max(traces_like[loss_model][parms_names[k]].values), 1000)\n kernel = st.gaussian_kde(traces_like[loss_model][parms_names[k]].values)\n axs[k].plot(positions, kernel(positions), lw=3, label = \"SMC simulated annealing\", \n color = \"blue\", linestyle = \"dotted\")\n positions = np.linspace(min(traces_data[loss_model][parms_names[k]].values), \n max(traces_data[loss_model][parms_names[k]].values), 1000)\n kernel = st.gaussian_kde(traces_data[loss_model][parms_names[k]].values)\n axs[k].plot(positions, kernel(positions), lw=3, label = \"SMC data by batches\", \n color = \"blue\", linestyle = \"dashed\")\n\n axs[k].axvline(fit_gam_par[parms_names[k]].values, color = \"black\", linestyle = \"dotted\", label = \"mle\")\n axs[k].set_yticks([])\n axs[k].set_xlabel(x_labs[k])\n axs[k].set_xticks(np.round(\n traces_like[loss_model][parms_names[k]].quantile([0.05, 0.95]).values, 2))\n\nhandles, labels = axs[0].get_legend_handles_labels()\nfig.legend(handles, labels, ncol = 2, borderaxespad=-0.2, loc='upper center', \n frameon=False)\n# fig.tight_layout()\nsns.despine()\n\nplt.savefig(\"../Figures/smc_posterior_danish_gamma_par_en.pdf\")",
"_____no_output_____"
]
],
[
[
"## Fitting the Weibull-Pareto model",
"_____no_output_____"
]
],
[
[
"np.random.seed(333)\nfig, axs = plt.subplots(1, 3, figsize=(5, 3.5))\nloss_model = \"wei-par\"\nfor k in range(3):\n# positions = np.linspace(min(trace_gibbs_wei_par[parms_names[k]]), max(trace_gibbs_wei_par[parms_names[k]]), 1000)\n# kernel = st.gaussian_kde(trace_gibbs_wei_par[parms_names[k]])\n# axs[k].plot(positions, kernel(positions), lw=3, label = \"Gibbs\", color = \"green\")\n positions = np.linspace(min(traces_like[loss_model][parms_names[k]].values), \n max(traces_like[loss_model][parms_names[k]].values), 1000)\n kernel = st.gaussian_kde(traces_like[loss_model][parms_names[k]].values)\n axs[k].plot(positions, kernel(positions), lw=3, label = \"SMC simulated annealing\", \n color = \"green\", linestyle = \"dotted\")\n positions = np.linspace(min(traces_data[loss_model][parms_names[k]].values), \n max(traces_data[loss_model][parms_names[k]].values), 1000)\n kernel = st.gaussian_kde(traces_data[loss_model][parms_names[k]].values)\n axs[k].plot(positions, kernel(positions), lw=3, label = \"SMC data by batches\", \n color = \"green\", linestyle = \"dashed\")\n\n axs[k].axvline(fit_wei_par[parms_names[k]].values, color = \"black\", linestyle = \"dotted\", label = \"mle\")\n axs[k].set_yticks([])\n axs[k].set_xlabel(x_labs[k])\n axs[k].set_xticks(np.round(\n traces_like[loss_model][parms_names[k]].quantile([0.05, 0.95]).values, 2))\n\nhandles, labels = axs[0].get_legend_handles_labels()\nfig.legend(handles, labels, ncol = 2, borderaxespad=-0.2, loc='upper center', \n frameon=False)\nsns.despine()\nprint(fit_gam_par[parms_names[0]].values)\nplt.savefig(\"../Figures/smc_posterior_danish_weibull_par_en.pdf\")",
"[35.66752418]\n"
]
],
[
[
"## Fitting the lognormal-Pareto model",
"_____no_output_____"
]
],
[
[
"np.random.seed(333)\nfig, axs = plt.subplots(1, 3, figsize=(5, 3.5))\nloss_model = \"lnorm-par\"\nfor k in range(3):\n# positions = np.linspace(min(trace_gibbs_lnorm_par[parms_names[k]]), max(trace_gibbs_lnorm_par[parms_names[k]]), 1000)\n# kernel = st.gaussian_kde(trace_gibbs_lnorm_par[parms_names[k]])\n# axs[k].plot(positions, kernel(positions), lw=3, label = \"Gibbs\", color = \"red\")\n positions = np.linspace(min(traces_like[loss_model][parms_names[k]].values), \n max(traces_like[loss_model][parms_names[k]].values), 1000)\n kernel = st.gaussian_kde(traces_like[loss_model][parms_names[k]].values)\n axs[k].plot(positions, kernel(positions), lw=3, label = \"SMC simulated annealing\", \n color = \"red\", linestyle = \"dotted\")\n positions = np.linspace(min(traces_data[loss_model][parms_names[k]].values), \n max(traces_data[loss_model][parms_names[k]].values), 1000)\n kernel = st.gaussian_kde(traces_data[loss_model][parms_names[k]].values)\n axs[k].plot(positions, kernel(positions), lw=3, label = \"SMC data by batches\", \n color = \"red\", linestyle = \"dashed\")\n\n axs[k].axvline(fit_lnorm_par[parms_names[k]].values, color = \"black\", linestyle = \"dotted\", label = \"mle\")\n axs[k].set_yticks([])\n axs[k].set_xlabel(x_labs[k])\n axs[k].set_xticks(np.round(\n traces_like[loss_model][parms_names[k]].quantile([0.05, 0.95]).values, 2))\n\nhandles, labels = axs[0].get_legend_handles_labels()\nfig.legend(handles, labels, ncol = 2, borderaxespad=-0.2, loc='upper center', \n frameon=False)\nsns.despine()\nprint(fit_gam_par[parms_names[0]].values)\nplt.savefig(\"../Figures/smc_posterior_danish_lnorm_par_en.pdf\")",
"[35.66752418]\n"
],
[
"print(res_df_data.to_latex(index = False,float_format=\"%.2f\", columns = [\"loss_model\",\"log_marg\",\"model_evidence\", \"DIC\", \"WAIC\"]))\nres_df_data",
"\\begin{tabular}{lrrrr}\n\\toprule\nloss\\_model & log\\_marg & model\\_evidence & DIC & WAIC \\\\\n\\midrule\n lnorm-par & -3881.87 & 0.00 & 7725.54 & 7744.58 \\\\\n wei-par & -3857.10 & 1.00 & 7674.33 & 7689.73 \\\\\n gam-par & -3877.91 & 0.00 & 7711.89 & 7729.27 \\\\\n\\bottomrule\n\\end{tabular}\n\n"
],
[
"print(res_df_like.to_latex(index = False, float_format=\"%.2f\", columns = [\"loss_model\",\"log_marg\",\"model_evidence\", \"DIC\", \"WAIC\"]))\nres_df_like",
"\\begin{tabular}{lrrrr}\n\\toprule\nloss\\_model & log\\_marg & model\\_evidence & DIC & WAIC \\\\\n\\midrule\n lnorm-par & -3882.81 & 0.00 & 7725.32 & 7746.56 \\\\\n wei-par & -3858.76 & 1.00 & 7674.27 & 7690.02 \\\\\n gam-par & -3878.31 & 0.00 & 7711.64 & 7730.78 \\\\\n\\bottomrule\n\\end{tabular}\n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a9c52dbc877d6db3330030fc581daa9b9e0bf2a
| 1,988 |
ipynb
|
Jupyter Notebook
|
kubeflow/notebooks/00_Explore_Environment.ipynb
|
ThomasHenckel/pipeline
|
6302abd631bc8461cc92c48c135f285ab866a9f8
|
[
"Apache-2.0"
] | null | null | null |
kubeflow/notebooks/00_Explore_Environment.ipynb
|
ThomasHenckel/pipeline
|
6302abd631bc8461cc92c48c135f285ab866a9f8
|
[
"Apache-2.0"
] | null | null | null |
kubeflow/notebooks/00_Explore_Environment.ipynb
|
ThomasHenckel/pipeline
|
6302abd631bc8461cc92c48c135f285ab866a9f8
|
[
"Apache-2.0"
] | 1 |
2019-06-30T09:56:38.000Z
|
2019-06-30T09:56:38.000Z
| 19.300971 | 79 | 0.509557 |
[
[
[
"import tensorflow as tf\nprint('TensorFlow Version %s' % tf.__version__)",
"_____no_output_____"
],
[
"import sys\nprint('Python Version %s' % sys.version)",
"_____no_output_____"
],
[
"!pip list",
"_____no_output_____"
],
[
"%%bash\nconda list",
"_____no_output_____"
],
[
"import tensorflow as tf\n\ntpu_name = 'tpu-us-central1-a-00'\ntpu_zone = 'us-central1-a'\ntpu_project = 'pipelineai2'\n\ntpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu=tpu_name,\n zone=tpu_zone,\n project=tpu_project)\n\nprint(tpu_cluster_resolver.get_master())",
"_____no_output_____"
],
[
"%%html\n\n<iframe src=\"https://pipeline.ai\" width=\"800px\" height=\"600px\"/>",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c54f0e119f04aa8b84102f770860f4391c409
| 50,077 |
ipynb
|
Jupyter Notebook
|
_notebooks/2021-05-07-Basics of Poisson.ipynb
|
peiyiHung/mywebsite
|
4f6bb8dab272960d39e84c04b545f5417278a081
|
[
"Apache-2.0"
] | 1 |
2020-12-16T13:40:27.000Z
|
2020-12-16T13:40:27.000Z
|
_notebooks/2021-05-07-Basics of Poisson.ipynb
|
peiyiHung/mywebsite
|
4f6bb8dab272960d39e84c04b545f5417278a081
|
[
"Apache-2.0"
] | 1 |
2021-07-03T06:24:55.000Z
|
2021-07-03T06:24:56.000Z
|
_notebooks/2021-05-07-Basics of Poisson.ipynb
|
peiyiHung/mywebsite
|
4f6bb8dab272960d39e84c04b545f5417278a081
|
[
"Apache-2.0"
] | null | null | null | 157.971609 | 38,992 | 0.874793 |
[
[
[
"# \"[Prob] Basics of the Poisson Distribution\"\n> \"Some useful facts about the Poisson distribution\"\n\n- toc:false\n- branch: master\n- badges: false\n- comments: true\n- author: Peiyi Hung\n- categories: [category, learning, probability]",
"_____no_output_____"
],
[
"# Introduction",
"_____no_output_____"
],
[
"The Poisson distribution is an important discrete probability distribution prevalent in a variety of fields. In this post, I will present some useful facts about the Poisson distribution. Here's the concepts I will discuss in the post:\n\n* PMF, expectation and variance of Poisson\n* In what situation we can use it?\n* Sum of indepentent Poisson is also a Poisson\n* Relationship with the Binomial distribution",
"_____no_output_____"
],
[
"# PMF, Expectation and Variance",
"_____no_output_____"
],
[
"First, we define what's Poisson distribution.",
"_____no_output_____"
],
[
"Let X be a Poisson random variable with a parameter $\\lambda$, where $\\lambda >0$. The pmf of X would be:\n$$P(X=x) = \\frac{e^{-\\lambda}\\lambda^{x}}{x!}, \\quad \\text{for } k = 0, 1,2,3,\\dots$$\nwhere $x$ can only be non-negative integer.\n\nThis is a valid pmf since \n$$\\sum_{k=0}^{\\infty} \\frac{e^{-\\lambda}\\lambda^{k}}{k!} = e^{-\\lambda}\\sum_{k=0}^{\\infty} \\frac{\\lambda^{k}}{k!}= e^{-\\lambda}e^{\\lambda}=1$$\nwhere $\\displaystyle\\sum_{k=0}^{\\infty} \\frac{\\lambda^{k}}{k!}$ is the Taylor expansion of $e^{\\lambda}$.",
"_____no_output_____"
],
[
"The expectation and the variance of the Poisson distribution are both $\\lambda$. The derivation of this result is just some pattern recognition of $\\sum_{k=0}^{\\infty} \\frac{\\lambda^{k}}{k!}=e^{\\lambda}$, so I omit it here.",
"_____no_output_____"
],
[
"# In what situation can we use it?",
"_____no_output_____"
],
[
"The Poisson distribution is often applied to the situation where we are counting the number of successes or an event happening in a time interval or a particular region, and there are a large number of trials with a small probability of success. The parameter $\\lambda$ is the rate parameter which indicates the average number of successes in a time interval or a region.\n\nHere are some examples:\n* The number of emails you receive in an hour.\n* The number of chips in a chocolate chip cookie.\n* The number of earthquakes in a year in some region of the world.\n\nAlso, let's consider an example probability problem.\n\n**Example problem 1**\n> Raindrops are falling at an average rate of 20 drops per square inch per minute. Find the probability that the region has no rain drops in a given 1-minute time interval.\n\nThe success in this problem is one raindrop. The average rate is 20, so $\\lambda=20$. Let $X$ be the raindrops that region has in a minute. We would model $X$ with Pois$(20)$, so the probability we concerned would be\n$$P(X=0) = \\frac{e^{-20}20^0}{0!}=e^{-20} \\approx 2.0611\\times 10 ^{-9}$$\n\nIf we are concerned with raindrops in a 3-second time interval in 5 square inches, then $$\\lambda = 20\\times\\frac{1}{20} \\text{ minutes} \\times5 \\text{ square inches} = 5$$\nLet $Y$ be raindrops in a 3-second time interval. $Y$ would be Pois$(5)$, so $P(Y=0) = e^{-5} \\approx 0.0067$.",
"_____no_output_____"
],
[
"# Sum of Independent Poisson",
"_____no_output_____"
],
[
"The sum of independent Poisson would also be Poisson. Let $X$ be Pois$(\\lambda_1)$ and $Y$ be Pois$(\\lambda_2)$. If $T=X+Y$, then $T \\sim \\text{Pois}(\\lambda_1 + \\lambda_2)$.\n\nTo get pmf of $T$, we should first apply the law of total probability:\n\n$$\nP(X+Y=t) = \\sum_{k=0}^{t}P(X+Y=t|X=k)P(X=k) \n$$\nSince they are independent, we got\n$$\n\\sum_{k=0}^{t}P(X+Y=t|X=k)P(X=k) = \\sum_{k=0}^{t}P(Y=t-k)P(X=k)\n$$\nNext, we plug in the pmf of Poisson:\n$$\n\\sum_{k=0}^{t}P(Y=t-k)P(X=k) = \\sum_{k=0}^{t}\\frac{e^{-\\lambda_2}\\lambda_2^{t-k}}{(t-k)!}\\frac{e^{-\\lambda_2}\\lambda_1^k}{k!} = \\frac{e^{-(\\lambda_1+\\lambda_2)}}{t!}\\sum_{k=0}^{t} {t \\choose k}\\lambda_1^{k}\\lambda_2^{t-k}\n$$\nFinally, by Binomial theorem, we got\n$$\nP(X+Y=t) = \\frac{e^{-(\\lambda_1+\\lambda_2)}(\\lambda_1+\\lambda_2)^t}{t!}\n$$\nwhich is the pmf of Pois$(\\lambda_1 + \\lambda_2)$.",
"_____no_output_____"
],
[
"# Relationship with the Binomial distribution",
"_____no_output_____"
],
[
"We can obtain Poisson from Binomial and can also obtain Binomial to Poisson. Let's first see how we get the Binomial distribution from the Poisson distribution",
"_____no_output_____"
],
[
"**From Poisson to Binomial**\n\nIf $X \\sim$ Pois$(\\lambda_1)$ and $Y \\sim$ Pois$(\\lambda_2)$, and they are independent, then the conditional distribution of $X$ given $X+Y=n$ is Bin$(n, \\lambda_1/(\\lambda_1 + \\lambda_2))$. Let's derive the pmf of $X$ given $X+Y=n$.\n\nBy Bayes' rule and the indenpendence between $X$ and $Y$:\n$$\nP(X=x|X+Y=n) = \\frac{P(X+Y=n|X=x)P(X=x)}{P(X+Y=n)} = \\frac{P(Y=n-k)P(X=x)}{P(X+Y=n)}\n$$\n\nFrom the previous section, we know $X+Y \\sim$ Poin$(\\lambda_1 + \\lambda_2)$. Use this fact, we get\n$$\nP(X=x|X+Y=n) = \\frac{ \\big(\\frac{e^{-\\lambda_2}\\lambda_2^{n-k}}{(n-k)!}\\big) \\big( \\frac{e^{\\lambda_1\\lambda_1^k}}{k!} \\big)}{ \\frac{e^{-(\\lambda_1 + \\lambda_2)}(\\lambda_1 + \\lambda_2)^n}{n!}} = {n\\choose k}\\bigg(\\frac{\\lambda_1}{\\lambda_1+\\lambda_2}\\bigg)^k \\bigg(\\frac{\\lambda_2}{\\lambda_1+\\lambda_2}\\bigg)^{n-k}\n$$\nwhich is the Bin$(n, \\lambda_1/(\\lambda_1 + \\lambda_2))$ pmf.\n\n**From Binomial to Poisson**\n\nWe can approximate Binomial by Poisson when $n \\rightarrow \\infty$ and $p \\rightarrow 0$, and $\\lambda = np$.\n\nThe pmf of Binomial is\n$$\nP(X=k) = {n \\choose k}p^{k}(1-p)^{n-k} = {n \\choose k}\\big(\\frac{\\lambda}{n}\\big)^{k}\\big(1-\\frac{\\lambda}{n}\\big)^n\\big(1-\\frac{\\lambda}{n}\\big)^{-k}\n$$\nBy some algebra manipulation, we got\n$$\nP(X=k) = \\frac{\\lambda^{k}}{k!}\\frac{n(n-1)\\dots(n-k+1)}{n^k}\\big(1-\\frac{\\lambda}{n}\\big)^n\\big(1-\\frac{\\lambda}{n}\\big)^{-k}\n$$\nWhen $n \\rightarrow \\infty$, we got:\n$$\n\\frac{n(n-1)\\dots(n-k+1)}{n^k} \\rightarrow 1,\\\\\n\\big(1-\\frac{\\lambda}{n}\\big)^n \\rightarrow e^{-\\lambda}, \\text{and}\\\\ \n\\big(1-\\frac{\\lambda}{n}\\big)^{-k} \\rightarrow 1\n$$\nTherefore, $P(X=k) = \\frac{e^{-\\lambda}\\lambda^k}{k!}$ when $n \\rightarrow \\infty$.\n\nLet's see an example on how to use Poisson to approximate Binomial.",
"_____no_output_____"
],
[
"**Example problem 2**\n>Ten million people enter a certain lottery. For each person, the chance of winning is one in ten million, independently. Find a simple, good approximation for the PMF of the number of people who win the lottery.\n\nLet $X$ be the number of people winning the lottery. $X$ would be Bin$(10000000, 1/10000000)$ and $E(X) = 1$. We can approximate the pmf of $X$ by Pois$(1)$:\n$$\nP(X=k) \\approx \\frac{1}{e\\cdot k!}\n$$",
"_____no_output_____"
],
[
"Let's see if this approximation is accurate by Python code.",
"_____no_output_____"
]
],
[
[
"#collapse-hide\n\nfrom scipy.stats import binom\nfrom math import factorial, exp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef pois(k):\n return 1 / (exp(1) * factorial(k))\n\nn = 10000000\np = 1/10000000\n\nk = np.arange(10)\nbinomial = binom.pmf(k, n, p)\npoisson = [pois(i) for i in k]\n\nfig, ax = plt.subplots(ncols=2, nrows=1, figsize=(15, 4), dpi=120)\nax[0].plot(k, binomial)\nax[0].set_title(\"PMF of Binomial\")\nax[0].set_xlabel(r\"$X=k$\")\nax[0].set_xticks(k)\n\nax[1].plot(k, poisson)\nax[1].set_title(\"Approximation by Poisson\")\nax[1].set_xlabel(r\"X=k\")\nax[1].set_xticks(k)\n\nplt.tight_layout();",
"_____no_output_____"
]
],
[
[
"The approximation is quite accurate since these two graphs are almost identical.",
"_____no_output_____"
],
[
"**Reference**\n1. *Introduction to Probability* by Joe Blitzstein and Jessica Hwang.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a9c557b9f818fd38b37d08dc03d50980cda439c
| 11,752 |
ipynb
|
Jupyter Notebook
|
bronze/.ipynb_checkpoints/B54_Superdense_Coding-checkpoint.ipynb
|
QRussia/basics-of-quantum-computing-translate
|
2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
bronze/.ipynb_checkpoints/B54_Superdense_Coding-checkpoint.ipynb
|
QRussia/basics-of-quantum-computing-translate
|
2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
bronze/.ipynb_checkpoints/B54_Superdense_Coding-checkpoint.ipynb
|
QRussia/basics-of-quantum-computing-translate
|
2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | 37.909677 | 309 | 0.539398 |
[
[
[
"<table> <tr>\n <td style=\"background-color:#ffffff;\">\n <a href=\"http://qworld.lu.lv\" target=\"_blank\"><img src=\"..\\images\\qworld.jpg\" width=\"25%\" align=\"left\"> </a></td>\n <td style=\"background-color:#ffffff;vertical-align:bottom;text-align:right;\">\n prepared by <a href=\"http://abu.lu.lv\" target=\"_blank\">Abuzer Yakaryilmaz</a> (<a href=\"http://qworld.lu.lv/index.php/qlatvia/\" target=\"_blank\">QLatvia</a>)\n </td> \n</tr></table>",
"_____no_output_____"
],
[
"<table width=\"100%\"><tr><td style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;\">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\I}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & 1} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $",
"_____no_output_____"
],
[
"<h2>Entanglement and Superdense Coding</h2>\n\n[Watch Lecture](https://youtu.be/ZzRcItzUF2U)\n\nAsja has a qubit, initially set to $ \\ket{0} $.\n\nBalvis has a qubit, initially set to $ \\ket{0} $.",
"_____no_output_____"
],
[
"<h3> Entanglement </h3>\n \nAsja applies Hadamard operator to her qubit. \n\nThe quantum state of Asja's qubit is $ \\stateplus $.\n\nThen, Asja and Balvis combine their qubits. Their quantum state is\n\n$ \\stateplus \\otimes \\vzero = \\myvector{ \\frac{1}{\\sqrt{2}} \\\\ 0 \\\\ \\frac{1}{\\sqrt{2}} \\\\ 0 } $.\n",
"_____no_output_____"
],
[
"Asja and Balvis apply CNOT operator on two qubits.\n\nThe new quantum state is\n\n$ \\CNOT \\myvector{ \\frac{1}{\\sqrt{2}} \\\\ 0 \\\\ \\frac{1}{\\sqrt{2}} \\\\ 0 } = \\myvector{ \\frac{1}{\\sqrt{2}} \\\\ 0 \\\\0 \\\\ \\frac{1}{\\sqrt{2}} } = \\frac{1}{\\sqrt{2}}\\ket{00} + \\frac{1}{\\sqrt{2}}\\ket{11} $.\n\nAt this moment, Asja's and Balvis' qubits are correlated to each other.\n\nIf we measure both qubits, we can observe either state $ \\ket{00} $ or state $ \\ket{11} $. \n\nSuppose that Asja observes her qubit secretly. \n<ul>\n <li> When Asja sees the result $ \\ket{0} $, then Balvis' qubit also collapses to state $ \\ket{0} $. Balvis cannot observe state $ \\ket{1} $. </li>\n <li> When Asja sees the result $ \\ket{1} $, then Balvis' qubit also collapses to state $ \\ket{1} $. Balvis cannot observe state $ \\ket{0} $. </li>\n</ul>\n \nExperimental results have confirmed that this happens even if there is a physical distance between Asja's and Balvis' qubits. \n\nIt seems correlated quantum particales can \"affect each other\" instantly, even if they are in the different part of the universe. \n\nIf two qubits are correlated in this way, then we say that they are <b>entangled</b>.\n\n<i> <u>Technical note</u>: \n \nIf the quantum state of two qubits can be written as $ \\ket{u} \\otimes \\ket{v} $, then two qubits are not correlated, where $ \\ket{u} $ and $ \\ket{v} $ are the quantum states of the first and second qubits.\n\nOn the other hand, if the quantum state of two qubits cannot be written as $ \\ket{u} \\otimes \\ket{v} $, then there is an entanglement between the qubits.\n</i>\n\n<b> Entangled qubits can be useful </b>",
"_____no_output_____"
],
[
"<h3> The quantum communication </h3>\n\nAfter having the entanglement, Balvis takes his qubit and goes away.\n\nAsja will send two classical bits of information by only sending her qubit.\n\n<img src=\"../images/superdense_coding.png\">\n\n<font size=\"-2\">source: https://fi.m.wikipedia.org/wiki/Tiedosto:Superdense_coding.png </font>",
"_____no_output_____"
],
[
"Now, we describe this protocol.\n\nAsja has two bits of classical information: $ a,b \\in \\{0,1\\} $. \n\nThere are four possible values for the pair $ (a,b) $: $ (0,0), (0,1), (1,0),\\mbox{ or } (1,1) $. \n\nIf $a$ is 1, then Asja applies z-gate, i.e., $ Z = \\Z $, to her qubit.\n\nIf $b$ is 1, then Asja applies x-gate (NOT operator) to her qubit.\n\nThen, Asja sends her qubit to Balvis.",
"_____no_output_____"
],
[
"<h3> After the communication </h3>\n\nBalvis has both qubits.\n\nBalvis applies cx-gate (CNOT operator), where Asja's qubit is the controller.\n\nThen, Balvis applies h-gate (Hadamard operator) to Asja's qubit.\n\nBalvis measures both qubits. \n\nThe measurement result will be exactly $ (a,b) $.",
"_____no_output_____"
],
[
"<h3> Task 1</h3>\n\nVerify the correctness of the above protocol.\n\nFor each pair of $ (a,b) \\in \\left\\{ (0,0), (0,1), (1,0),(1,1) \\right\\} $:\n- Create a quantum curcuit with two qubits: Asja's and Balvis' qubits\n- Both are initially set to $ \\ket{0} $\n- Apply h-gate (Hadamard) to the Asja's qubit\n- Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)\n\nAssume that both qubits are separated from each other.\n\n<ul>\n <li> If $ a $ is 1, then apply z-gate to Asja's qubit. </li>\n <li> If $ b $ is 1, then apply x-gate (NOT) to Asja's qubit. </li>\n</ul>\n\nAssume that Asja sends her qubit to Balvis.\n- Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)\n- Apply h-gate (Hadamard) to the Asja's qubit\n- Measure both qubits and compare the results with pair $ (a,b) $",
"_____no_output_____"
]
],
[
[
"# import all necessary objects and methods for quantum circuits\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nall_pairs = ['00','01','10','11']\n\n#\n# your code is here\n#\n",
"_____no_output_____"
]
],
[
[
"<a href=\"B54_Superdense_Coding_Solutions.ipynb#task1\">click for our solution</a>",
"_____no_output_____"
],
[
"<h3> Task 2 </h3>\n\nVerify each case by tracing the state vector (on paper). \n\n_Hint: Representing quantum states as the linear combinations of basis states makes calculation easier._",
"_____no_output_____"
],
[
"<h3> Task 3</h3>\n\nCan the above set-up be used by Balvis?\n\nVerify that the following modified protocol allows Balvis to send two classical bits by sending only his qubit.\n\nFor each pair of $ (a,b) \\in \\left\\{ (0,0), (0,1), (1,0),(1,1) \\right\\} $:\n- Create a quantum curcuit with two qubits: Asja's and Balvis' qubits\n- Both are initially set to $ \\ket{0} $\n- Apply h-gate (Hadamard) to the Asja's qubit\n- Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)\n\nAssume that both qubits are separated from each other.\n\n<ul>\n <li> If $ a $ is 1, then apply z-gate to Balvis' qubit. </li>\n <li> If $ b $ is 1, then apply x-gate (NOT) to Balvis' qubit. </li>\n</ul>\n\nAssume that Balvis sends his qubit to Asja.\n- Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)\n- Apply h-gate (Hadamard) to the Asja's qubit\n- Measure both qubits and compare the results with pair $ (a,b) $",
"_____no_output_____"
]
],
[
[
"# import all necessary objects and methods for quantum circuits\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nall_pairs = ['00','01','10','11']\n\n#\n# your code is here\n#\n",
"_____no_output_____"
]
],
[
[
"<a href=\"B54_Superdense_Coding_Solutions.ipynb#task3\">click for our solution</a>",
"_____no_output_____"
],
[
"<h3> Task 4 </h3>\n\nVerify each case by tracing the state vector (on paper). \n\n_Hint: Representing quantum states as the linear combinations of basis states makes calculation easier._",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a9c5e5b4b3402fcbb48f863ad480691213dd6d7
| 5,981 |
ipynb
|
Jupyter Notebook
|
notebooks/transcript_to_split_BUILD_wavs.ipynb
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | null | null | null |
notebooks/transcript_to_split_BUILD_wavs.ipynb
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | null | null | null |
notebooks/transcript_to_split_BUILD_wavs.ipynb
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | 1 |
2021-07-28T02:13:21.000Z
|
2021-07-28T02:13:21.000Z
| 26.117904 | 130 | 0.539876 |
[
[
[
"# Transcript to BUILD wavs",
"_____no_output_____"
]
],
[
[
"from IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:100% !important; }</style>\"))",
"_____no_output_____"
],
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline",
"_____no_output_____"
],
[
"from glob import glob\nimport os\nfrom matplotlib.pylab import *\nimport librosa\nimport torch\nfrom epoch_time import epoch_time\nfrom tqdm.notebook import tqdm\nfrom txt_to_stm import txt_to_stm\nimport pandas as pd\nimport numpy as np\nfrom padarray import padarray\nfrom to_samples import to_samples\nfrom torch.utils.data import TensorDataset, DataLoader\nimport audioread\nimport random\nimport soundfile as sf\nfrom pathlib import Path",
"_____no_output_____"
],
[
"os.getcwd()",
"_____no_output_____"
],
[
"stage='NIST'\nsample_rate=8000\nwindow = sample_rate\nH=window",
"_____no_output_____"
],
[
"transcripts = list(sorted(glob(f'{stage}/*/build/transcription/*.txt')))\nlen(transcripts)",
"_____no_output_____"
],
[
"audio_files=[x.replace('/transcription/', '/audio/').replace('.txt','.wav') for x in transcripts]",
"_____no_output_____"
],
[
"for transcript_file in tqdm(transcripts):\n audio_file = transcript_file.replace('/transcription/', '/audio/').replace('.txt','.wav')\n if not os.path.exists(audio_file):\n print('missing', audio_file)\n continue\n \n # Create split dirs\n audio_dir=os.path.dirname(audio_file)\n audio_split_dir=audio_dir.replace('/audio', '/audio_split')\n Path(audio_split_dir).mkdir(parents=True, exist_ok=True)\n transcript_dir=os.path.dirname(transcript_file)\n transcript_split_dir=transcript_dir.replace('/transcription', '/transcription_split')\n Path(transcript_split_dir).mkdir(parents=True, exist_ok=True)\n \n # Load audio\n file = \"_\".join(os.path.basename(transcript_file).split(\"_\")[:-1])\n channel = os.path.basename(transcript_file).split(\"_\")[-1].split(\".\")[-2]\n transcript_df = pd.read_csv(transcript_file, sep = \"\\n\", header = None, names = [\"content\"])\n result = txt_to_stm(transcript_df, file, channel)\n speech=[(float(x[-3]), float(x[-2]), x[-1]) for x in result if len(x)==6]\n x_np,sr=librosa.load(audio_file, sr=sample_rate)\n with audioread.audio_open(audio_file) as f:\n sr = f.samplerate\n if sr != sample_rate:\n print('RESIZING', sr, audio_file)\n sf.write(audio_file, x_np, sample_rate)\n \n # Split audio\n speech_segments=[(int(a*sample_rate), int(b*sample_rate), words) for (a,b,words) in speech]\n for i, (lower, upper, words) in enumerate(speech_segments):\n audio_split_file=f\"{audio_file[0:-4].replace('/audio/','/audio_split/')}_{i:03d}.wav\"\n sf.write(audio_split_file, x_np[lower:upper], sample_rate)\n\n transcript_split_file=f\"{transcript_file[0:-4].replace('/transcription/','/transcription_split/')}_{i:03d}.txt\"\n with open(transcript_split_file,'w') as f:\n f.write(words)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c69d7a5eb254668120f03043f8fb606a84261
| 18,486 |
ipynb
|
Jupyter Notebook
|
doc/handbook/InitialValues/InitialValues.ipynb
|
FDYdarmstadt/BoSSS
|
974f3eee826424a213e68d8d456d380aeb7cd7e9
|
[
"Apache-2.0"
] | 22 |
2017-06-08T05:53:17.000Z
|
2021-05-25T13:12:17.000Z
|
doc/handbook/InitialValues/InitialValues.ipynb
|
FDYdarmstadt/BoSSS
|
974f3eee826424a213e68d8d456d380aeb7cd7e9
|
[
"Apache-2.0"
] | 1 |
2020-07-20T15:32:56.000Z
|
2020-07-20T15:34:22.000Z
|
doc/handbook/InitialValues/InitialValues.ipynb
|
FDYdarmstadt/BoSSS
|
974f3eee826424a213e68d8d456d380aeb7cd7e9
|
[
"Apache-2.0"
] | 12 |
2018-01-05T19:52:35.000Z
|
2021-05-07T07:49:27.000Z
| 26.295875 | 158 | 0.456508 |
[
[
[
"\r\n# Introduction\r\n",
"_____no_output_____"
]
],
[
[
"#r \"BoSSSpad.dll\"\r\nusing System;\r\nusing System.Collections.Generic;\r\nusing System.Linq;\r\nusing ilPSP;\r\nusing ilPSP.Utils;\r\nusing BoSSS.Platform;\r\nusing BoSSS.Platform.LinAlg;\r\nusing BoSSS.Foundation;\r\nusing BoSSS.Foundation.XDG;\r\nusing BoSSS.Foundation.Grid;\r\nusing BoSSS.Foundation.Grid.Classic;\r\nusing BoSSS.Foundation.Grid.RefElements;\r\nusing BoSSS.Foundation.IO;\r\nusing BoSSS.Solution;\r\nusing BoSSS.Solution.Control;\r\nusing BoSSS.Solution.GridImport;\r\nusing BoSSS.Solution.Statistic;\r\nusing BoSSS.Solution.Utils;\r\nusing BoSSS.Solution.AdvancedSolvers;\r\nusing BoSSS.Solution.Gnuplot;\r\nusing BoSSS.Application.BoSSSpad;\r\nusing BoSSS.Application.XNSE_Solver;\r\nusing static BoSSS.Application.BoSSSpad.BoSSSshell;\r\nInit();\r\n",
"_____no_output_____"
]
],
[
[
"# Note: \r\n- Setting Boundary values and initial values is similar; \r\n- For most solvers, inital and boundary values are set the same way;\r\n- We will use the incompressible solver as an example:",
"_____no_output_____"
]
],
[
[
"using BoSSS.Application.XNSE_Solver;",
"_____no_output_____"
]
],
[
[
"Create a control object:",
"_____no_output_____"
]
],
[
[
"var C = new XNSE_Control();",
"_____no_output_____"
]
],
[
[
"\r\n# 1 From Formulas\r\nIf the Formula is simple enough to be represented by C\\# code,\r\nit can be embedded in the control file.\r\n\r\nHowever, the code bust be put into a string, since it is not \r\npossible to serialize classes/objects from the notebook\r\ninto a control object:",
"_____no_output_____"
]
],
[
[
"string code = \r\n \"static class MyInitialValue {\" // class must be static!\r\n \r\n // Warning: static constants are allowed,\r\n // but any changes outside of the current text box in BoSSSpad\r\n // will not be recorded for the code that is passed to the solver.\r\n+ \" public static double alpha = 0.7;\"\r\n \r\n // a method, which should be used for an initial value,\r\n // must be static!\r\n+ \" public static double VelocityX(double[] X, double t) {\"\r\n+ \" double x = X[0];\"\r\n+ \" double y = X[1];\"\r\n+ \" return Math.Sin(x*y*alpha);\"\r\n+ \" }\" \r\n+ \"}\";",
"_____no_output_____"
],
[
"var fo = new BoSSS.Solution.Control.Formula(\"MyInitialValue.VelocityX\", \r\n true, code);",
"_____no_output_____"
]
],
[
[
"Use the BoSSSpad-intrinsic **GetFormulaObject** to set tie inital value:",
"_____no_output_____"
]
],
[
[
"C.AddInitialValue(\"VelocityX\", fo);",
"_____no_output_____"
],
[
"/// Deprecated:\r\n/// Note: such a declaration is very restrictive;\r\n/// \\code{GetFormulaObject} works only for \r\n/// \\begin{itemize}\r\n/// \\item a static class\r\n/// \\item no dependence on any external parameters\r\n/// \\end{itemize}\r\n/// E.g. the following code would only change the behavior in BoSSSpad,\r\n/// but not the code that is passed to the solver:",
"_____no_output_____"
],
[
"//Deprecated:\r\n//MyInitialValue.alpha = 0.5;\r\n//MyInitialValue.VelocityX(new double[]{ 0.5, 0.5 }, 0.0);",
"_____no_output_____"
],
[
"C.InitialValues[\"VelocityX\"].Evaluate(new double[]{ 0.5, 0.5 }, 0.0)",
"_____no_output_____"
]
],
[
[
"# 2 Advanced functions\r\n\r\nSome more advanced mathematical functions, e.g.\r\nJacobian elliptic functions $\\text{sn}(u|m)$, $\\text{cn}(u|m)$ and $\\text{dn}(u|m)$\r\nare available throug the GNU Scientific Library, for which BoSSS provides\r\nbindings, see e.g.\r\n**BoSSS.Platform.GSL.gsl\\_sf\\_elljac\\_e**",
"_____no_output_____"
],
[
"\r\n## 2.1 From MATLAB code\r\nAsssume e.g. the following MATLAB code; obviously, this could \r\nalso be implemented in C\\#, we yust use something smple for demonstration:",
"_____no_output_____"
]
],
[
[
"string[] MatlabCode = new string[] {\r\n@\"[n,d2] = size(X_values);\",\r\n@\"u=zeros(2,n);\",\r\n@\"for k=1:n\",\r\n@\"X=[X_values(k,1),X_values(k,2)];\",\r\n@\"\",\r\n@\"u_x_main = -(-sqrt(X(1).^ 2 + X(2).^ 2) / 0.3e1 + 0.4e1 / 0.3e1 * (X(1).^ 2 + X(2).^ 2) ^ (-0.1e1 / 0.2e1)) * sin(atan2(X(2), X(1)));\",\r\n@\"u_y_main = (-sqrt(X(1).^ 2 + X(2).^ 2) / 0.3e1 + 0.4e1 / 0.3e1 * (X(1).^ 2 + X(2).^ 2) ^ (-0.1e1 / 0.2e1)) * cos(atan2(X(2), X(1)));\",\r\n@\"\", \r\n@\"u(1,k)=u_x_main;\",\r\n@\"u(2,k)=u_y_main;\",\r\n@\"end\" };",
"_____no_output_____"
]
],
[
[
"We can evaluate this code in **BoSSS** using the MATLAB connector;\r\nWe encapsulate it in a **ScalarFunction** which allows \r\n**vectorized** evaluation \r\n(multiple evaluatiuons in one function call) e\r\nof some function.\r\n\r\nThis is much more efficient, since there will be significant overhead\r\nfor calling MATLAB (starting MATLAB, checking the license, \r\ntransfering data, etc.).",
"_____no_output_____"
]
],
[
[
"using ilPSP.Connectors.Matlab;",
"_____no_output_____"
],
[
"ScalarFunction VelocityXInitial = \r\ndelegate(MultidimensionalArray input, MultidimensionalArray output) {\r\n int N = input.GetLength(0); // number of points which we evaluate \r\n // at once.\r\n var output_vec = MultidimensionalArray.Create(2, N); // the MATLAB code\r\n // returns an entire vector.\r\n using(var bmc = new BatchmodeConnector()) {\r\n bmc.PutMatrix(input,\"X_values\");\r\n \r\n foreach(var line in MatlabCode) {\r\n bmc.Cmd(line); \r\n }\r\n \r\n bmc.GetMatrix(output_vec, \"u\");\r\n \r\n bmc.Execute(); // Note: 'Execute' has to be *after* 'GetMatrix'\r\n }\r\n output.Set(output_vec.ExtractSubArrayShallow(0,-1)); // extract row 0 from \r\n // 'output_vec' and store it in 'output'\r\n};",
"_____no_output_____"
]
],
[
[
"We test our implementation:",
"_____no_output_____"
]
],
[
[
"var inputTest = MultidimensionalArray.Create(3,2); // set some test values for input\r\ninputTest.SetColumn(0, GenericBlas.Linspace(1,2,3));\r\ninputTest.SetColumn(1, GenericBlas.Linspace(2,3,3));\r\n \r\nvar outputTest = MultidimensionalArray.Create(3); // allocate memory for output",
"_____no_output_____"
],
[
"VelocityXInitial(inputTest, outputTest);",
"_____no_output_____"
]
],
[
[
"We recive the following velocity values for our input coordinates:",
"_____no_output_____"
]
],
[
[
"\r\noutputTest.To1DArray()",
"_____no_output_____"
]
],
[
[
"\r\n# Projecting the MATLAB function to a DG field\r\n\r\nAs for a standard calculation, we create a mesh, save it to some database\r\nand set the mesh in the control object.",
"_____no_output_____"
]
],
[
[
"var nodes = GenericBlas.Linspace(1,2,11);\r\nGridCommons grid = Grid2D.Cartesian2DGrid(nodes,nodes);",
"_____no_output_____"
],
[
"var db = CreateTempDatabase();",
"_____no_output_____"
],
[
"db.SaveGrid(ref grid);",
"_____no_output_____"
],
[
"C.SetGrid(grid);",
"_____no_output_____"
]
],
[
[
"We create a DG field for the $x$-velocity on our grid:",
"_____no_output_____"
]
],
[
[
"var gdata = new GridData(grid);",
"_____no_output_____"
],
[
"var b = new Basis(gdata, 3); // use DG degree 2",
"_____no_output_____"
],
[
"var VelX = new SinglePhaseField(b,\"VelocityX\"); // important: name the DG field\r\n// equal to initial value name",
"_____no_output_____"
]
],
[
[
"Finally, we are able to project the MATLAB function onto the DG field:",
"_____no_output_____"
]
],
[
[
"//VelX.ProjectField(VelocityXInitial);",
"_____no_output_____"
]
],
[
[
"One might want to check the data visually, so it can be exported\r\nin the usual fashion",
"_____no_output_____"
]
],
[
[
"//Tecplot(\"initial\",0.0,2,VelX);",
"_____no_output_____"
]
],
[
[
"\r\n# Storing the initial value in the database and linking it in the control object\r\n",
"_____no_output_____"
],
[
"The DG field with the initial value can be stored in the database.\r\nthis will create a dummy session.",
"_____no_output_____"
]
],
[
[
"BoSSSshell.WorkflowMgm.Init(\"TestProject\");",
"_____no_output_____"
],
[
"var InitalValueTS = db.SaveTimestep(VelX); // further fields an be \r\n// appended",
"_____no_output_____"
],
[
"BoSSSshell.WorkflowMgm.Sessions",
"_____no_output_____"
],
[
"/// Now, we can use this timestep as a restart-value for the simulation:",
"_____no_output_____"
],
[
"C.SetRestart(InitalValueTS);",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c7a873c8598249c6baf35a88279f5eb62c6e3
| 242,744 |
ipynb
|
Jupyter Notebook
|
jupyter/Chapter09/alpha_beta.ipynb
|
mberkanbicer/software
|
89f8004f567129216b92c156bbed658a9c03745a
|
[
"Apache-2.0"
] | null | null | null |
jupyter/Chapter09/alpha_beta.ipynb
|
mberkanbicer/software
|
89f8004f567129216b92c156bbed658a9c03745a
|
[
"Apache-2.0"
] | null | null | null |
jupyter/Chapter09/alpha_beta.ipynb
|
mberkanbicer/software
|
89f8004f567129216b92c156bbed658a9c03745a
|
[
"Apache-2.0"
] | null | null | null | 627.245478 | 95,792 | 0.941725 |
[
[
[
"# ***Introduction to Radar Using Python and MATLAB***\n## Andy Harrison - Copyright (C) 2019 Artech House\n<br/>\n\n# Alpha Beta Filter\n***",
"_____no_output_____"
],
[
"Referring to Section 9.1.1, the alpha-beta filter, is a simplified filter for parameter estimation and smoothing. The alpha-beta filter is related to Kalman filters but does not require a detailed system model. It presumes that the system is approximated by two internal states. The first state is determined by integrating the second state over time. The radar measurements are the observations of the first model state. This is a low-order approximation and may be adequate for simple tracking problems, such as tracking a target's position where the position is found from the time integral of the velocity. Assuming the velocity remains fixed over the time interval between measurements, the position is projected forward in time to predict its value at the next sampling time.",
"_____no_output_____"
],
[
"The Python sample code for the alpha beta filter is given in Listing 9.1\n***",
"_____no_output_____"
],
[
"Set the start, step and end times (s)",
"_____no_output_____"
]
],
[
[
"start = 0.0\n\nend = 20.0\n\nstep = 0.1",
"_____no_output_____"
]
],
[
[
"Calculate the number of updates and create the time array with the `linspace` routine from `scipy`",
"_____no_output_____"
]
],
[
[
"from numpy import linspace\n\nnumber_of_updates = round( (end - start) / step) + 1\n\nt, dt = linspace(start, end, number_of_updates, retstep=True)",
"_____no_output_____"
]
],
[
[
"Set the initial position (m) and initial velocity (m/s)",
"_____no_output_____"
]
],
[
[
"initial_position = 5.0\n\ninitial_velocity = 0.5",
"_____no_output_____"
]
],
[
[
"Set the noise variance and the factors (alpha, beta) for the filter",
"_____no_output_____"
]
],
[
[
"noise_variance = 2.0\n\nalpha = 0.1\n\nbeta = 0.001",
"_____no_output_____"
]
],
[
[
"Calculate the true position",
"_____no_output_____"
]
],
[
[
"x_true = initial_position + initial_velocity * t",
"_____no_output_____"
]
],
[
[
"Create the measurements using the random number routines from `scipy`",
"_____no_output_____"
]
],
[
[
"from numpy import random, sqrt\n\nz = x_true + sqrt(noise_variance) * (random.rand(number_of_updates) - 0.5)",
"_____no_output_____"
]
],
[
[
"Initialize the state and create the empty filter estimates",
"_____no_output_____"
]
],
[
[
"xk_1 = 0.0\n\nvk_1 = 0.0\n\n\nx_filt = []\n\nv_filt = []\n\nr_filt = []",
"_____no_output_____"
]
],
[
[
"Perform the alpha-beta filtering",
"_____no_output_____"
]
],
[
[
"# Loop over all measurements\n\nfor zk in z:\n\n # Predict the next state\n\n xk = xk_1 + vk_1 * dt\n\n vk = vk_1\n\n\n # Calculate the residual\n\n rk = zk - xk\n\n\n # Correct the predicted state\n\n xk += alpha * rk\n\n vk += beta / dt * rk\n\n\n # Set the current state as previous\n\n xk_1 = xk\n\n vk_1 = vk\n\n\n x_filt.append(xk)\n\n v_filt.append(vk)\n\n r_filt.append(rk)",
"_____no_output_____"
]
],
[
[
"Display the results of the alpha beta filter using the `matplotlib` routines",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot as plt\n\nfrom numpy import ones_like\n\n\n# Set the figure size\n\nplt.rcParams[\"figure.figsize\"] = (15, 10)\n\n\n# Position\n\nplt.figure()\n\nplt.plot(t, x_true, '', label='True')\n\nplt.plot(t, z, ':', label='Measurement')\n\nplt.plot(t, x_filt, '--', label='Filtered')\n\nplt.ylabel('Position (m)', size=12)\n\nplt.legend(loc='best', prop={'size': 10})\n\n# Set the plot title and labels\n\nplt.title('Alpha-Beta Filter', size=14)\n\nplt.xlabel('Time (s)', size=12)\n\n\n# Set the tick label size\n\nplt.tick_params(labelsize=12)\n\n\n# Turn on the grid\n\nplt.grid(linestyle=':', linewidth=0.5)\n\n\n# Velocity\n \nplt.figure()\n\nplt.plot(t, initial_velocity * ones_like(t), '', label='True')\n\nplt.plot(t, v_filt, '--', label='Filtered')\n\nplt.ylabel('Velocity (m/s)', size=12)\n\nplt.legend(loc='best', prop={'size': 10})\n\n\n# Set the plot title and labels\n\nplt.title('Alpha-Beta Filter', size=14)\n\nplt.xlabel('Time (s)', size=12)\n\n\n# Set the tick label size\n\nplt.tick_params(labelsize=12)\n\n\n# Turn on the grid\n\nplt.grid(linestyle=':', linewidth=0.5)\n\n\n# Residual\n\nplt.figure()\n\nplt.plot(t, r_filt, '')\n\nplt.ylabel('Residual (m)', size=12)\n\n\n# Set the plot title and labels\n\nplt.title('Alpha-Beta Filter', size=14)\n\nplt.xlabel('Time (s)', size=12)\n\n\n# Set the tick label size\n\nplt.tick_params(labelsize=12)\n\n\n# Turn on the grid\n\nplt.grid(linestyle=':', linewidth=0.5)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9c814b5e609c78497228fcbac2249ec4e917f9
| 50,036 |
ipynb
|
Jupyter Notebook
|
inst/prototypes/precomputed_quantities_mash.ipynb
|
zouyuxin/mmbr
|
7a7ab16386ddb6bb3fdca06b86035d66cde19245
|
[
"MIT"
] | null | null | null |
inst/prototypes/precomputed_quantities_mash.ipynb
|
zouyuxin/mmbr
|
7a7ab16386ddb6bb3fdca06b86035d66cde19245
|
[
"MIT"
] | null | null | null |
inst/prototypes/precomputed_quantities_mash.ipynb
|
zouyuxin/mmbr
|
7a7ab16386ddb6bb3fdca06b86035d66cde19245
|
[
"MIT"
] | null | null | null | 87.01913 | 1,831 | 0.550124 |
[
[
[
"# Pre-computing various second-moment related quantities\n\nThis saves computation for M&M by precomputing and re-using quantitaties shared between iterations. It mostly saves $O(R^3)$ computations. This vignette shows results agree with the original version. Cannot use unit test due to numerical discrepency between `chol` of amardillo and R -- this has been shown problematic for some computations. I'll have to improve `mashr` for it.",
"_____no_output_____"
]
],
[
[
"muffled_chol = function(x, ...)\nwithCallingHandlers(chol(x, ...),\n warning = function(w) {\n if (grepl(\"the matrix is either rank-deficient or indefinite\", w$message))\n invokeRestart(\"muffleWarning\")\n } )",
"_____no_output_____"
],
[
"set.seed(1)\nlibrary(mashr)\nsimdata = simple_sims(500,5,1)\ndata = mash_set_data(simdata$Bhat, simdata$Shat, alpha = 0)\nU.c = cov_canonical(data)\ngrid = mashr:::autoselect_grid(data,sqrt(2))\nUlist = mashr:::normalize_Ulist(U.c)\nxUlist = expand_cov(Ulist,grid,TRUE)\nllik_mat0 = mashr:::calc_lik_rcpp(t(data$Bhat),t(data$Shat),data$V,\n matrix(0,0,0), simplify2array(xUlist),T,T)$data\nsvs = data$Shat[1,] * t(data$V * data$Shat[1,])\nsigma_rooti = list()\nfor (i in 1:length(xUlist)) sigma_rooti[[i]] = t(backsolve(muffled_chol(svs + xUlist[[i]], pivot=T), diag(nrow(svs))))\nllik_mat = mashr:::calc_lik_common_rcpp(t(data$Bhat),\n simplify2array(sigma_rooti),\n T)$data",
"Loading required package: ashr\n"
],
[
"head(llik_mat0)",
"_____no_output_____"
],
[
"head(llik_mat)",
"_____no_output_____"
],
[
"rows <- which(apply(llik_mat,2,function (x) any(is.infinite(x))))\nif (length(rows) > 0)\n warning(paste(\"Some mixture components result in non-finite likelihoods,\",\n \"either\\n\",\"due to numerical underflow/overflow,\",\n \"or due to invalid covariance matrices\",\n paste(rows,collapse=\", \"), \"\\n\"))\nloglik_null = llik_mat[,1]\nlfactors = apply(llik_mat,1,max)\nllik_mat = llik_mat - lfactors\nmixture_posterior_weights = mashr:::compute_posterior_weights(1/ncol(llik_mat), exp(llik_mat))\npost0 = mashr:::calc_post_rcpp(t(data$Bhat), t(data$Shat), matrix(0,0,0), matrix(0,0,0), \n data$V,\n matrix(0,0,0), matrix(0,0,0), \n simplify2array(xUlist),\n t(mixture_posterior_weights),\n T, 4)",
"_____no_output_____"
],
[
"Vinv = solve(svs)\nU0 = list()\nfor (i in 1:length(xUlist)) U0[[i]] = xUlist[[i]] %*% solve(Vinv %*% xUlist[[i]] + diag(nrow(xUlist[[i]])))",
"_____no_output_____"
],
[
"post = mashr:::calc_post_precision_rcpp(t(data$Bhat), t(data$Shat), matrix(0,0,0), matrix(0,0,0), \n data$V,\n matrix(0,0,0), matrix(0,0,0), \n Vinv,\n simplify2array(U0),\n t(mixture_posterior_weights),\n 4)",
"_____no_output_____"
],
[
"head(post$post_mean)",
"_____no_output_____"
],
[
"head(post0$post_mean)",
"_____no_output_____"
],
[
"head(post$post_cov)",
"_____no_output_____"
],
[
"head(post0$post_cov)",
"_____no_output_____"
]
],
[
[
"Now test the relevant `mmbr` interface:",
"_____no_output_____"
]
],
[
[
"simulate_multivariate = function(n=100,p=100,r=2) {\n set.seed(1)\n res = mmbr::mmbr_sim1(n,p,r,4,center_scale=TRUE)\n res$L = 10\n return(res)\n}\nattach(simulate_multivariate(r=2))",
"_____no_output_____"
],
[
"prior_var = V[1,1]\nresidual_var = as.numeric(var(y))\ndata = mmbr:::DenseData$new(X,y)\nA = mmbr:::BayesianSimpleRegression$new(ncol(X), residual_var, prior_var)\nA$fit(data, save_summary_stats = T)\nnull_weight = 0\nmash_init = mmbr:::MashInitializer$new(list(V), 1, 1 - null_weight, null_weight)",
"_____no_output_____"
],
[
"residual_covar = cov(y)\nmash_init$precompute_cov_matrices(data, residual_covar)",
"_____no_output_____"
],
[
"B = mmbr:::MashRegression$new(ncol(X), residual_covar, mash_init)",
"_____no_output_____"
],
[
"B$fit(data, save_summary_stats = T)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c8cd90b70a8cb983063e4fe16025a3c064a41
| 386,703 |
ipynb
|
Jupyter Notebook
|
Final_Model.ipynb
|
reddyvishnu41/image_captioning
|
a07c0c26b01dba42387d732803c4f88d584dc694
|
[
"MIT"
] | null | null | null |
Final_Model.ipynb
|
reddyvishnu41/image_captioning
|
a07c0c26b01dba42387d732803c4f88d584dc694
|
[
"MIT"
] | null | null | null |
Final_Model.ipynb
|
reddyvishnu41/image_captioning
|
a07c0c26b01dba42387d732803c4f88d584dc694
|
[
"MIT"
] | null | null | null | 1,231.538217 | 178,740 | 0.957844 |
[
[
[
"import sys\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\nfrom data_loader import get_loader\nfrom torchvision import transforms\n\n# TODO #1: Define a transform to pre-process the testing images.\ntransform_test = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Create the data loader.\ndata_loader = get_loader(transform=transform_test, \n mode='test')",
"Vocabulary successfully loaded from vocab.pkl file!\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Obtain sample image before and after pre-processing.\norig_image, image = next(iter(data_loader))\n\n# Visualize sample image, before pre-processing.\nplt.imshow(np.squeeze(orig_image))\nplt.title('example image')\nplt.show()",
"_____no_output_____"
],
[
"import torch\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"_____no_output_____"
],
[
"# Watch for any changes in model.py, and re-load it automatically.\n% load_ext autoreload\n% autoreload 2\n\nimport os\nimport torch\nfrom model import EncoderCNN, DecoderRNN\n\n# TODO #2: Specify the saved models to load.\nencoder_file = 'encoder-3.pkl'\ndecoder_file = 'decoder-3.pkl'\n\n# TODO #3: Select appropriate values for the Python variables below.\nembed_size = 512\nhidden_size = 512\n\n# The size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\n\n# Initialize the encoder and decoder, and set each to inference mode.\nencoder = EncoderCNN(embed_size)\nencoder.eval()\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\ndecoder.eval()\n\n# Load the trained weights.\nencoder.load_state_dict(torch.load(os.path.join('./models', encoder_file)))\ndecoder.load_state_dict(torch.load(os.path.join('./models', decoder_file)))\n\n# Move models to GPU if CUDA is available.\nencoder.to(device)\ndecoder.to(device)",
"Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /root/.torch/models/resnet50-19c8e357.pth\n100%|██████████| 102502400/102502400 [00:01<00:00, 63858247.55it/s]\n"
],
[
"# Move image Pytorch Tensor to GPU if CUDA is available.\nimage = image.to(device)\n\n# Obtain the embedded image features.\nfeatures = encoder(image).unsqueeze(1)\n\n# Pass the embedded image features through the model to get a predicted caption.\noutput = decoder.sample(features)\nprint('example output:', output)\n\nassert (type(output)==list), \"Output needs to be a Python list\" \nassert all([type(x)==int for x in output]), \"Output should be a list of integers.\" \nassert all([x in data_loader.dataset.vocab.idx2word for x in output]), \"Each entry in the output needs to correspond to an integer that indicates a token in the vocabulary.\"",
"example output: [0, 3, 169, 139, 3, 755, 54, 3, 33, 18, 1, 294, 3, 30, 1, 39, 32, 185, 13, 32]\n"
],
[
"# TODO #4: Complete the function.\ndef clean_sentence(output):\n sentence=\" \"\n for i in output:\n word=data_loader.dataset.vocab.idx2word[i]\n if(i==0):\n continue\n elif(i==18):\n break\n else:\n sentence=sentence + \" \" + word\n return sentence",
"_____no_output_____"
],
[
"sentence = clean_sentence(output)\nprint('example sentence:', sentence)\n\nassert type(sentence)==str, 'Sentence needs to be a Python string!'",
"example sentence: a man riding a skateboard down a street\n"
],
[
"def get_prediction():\n orig_image, image = next(iter(data_loader))\n plt.imshow(np.squeeze(orig_image))\n plt.title('Sample Image')\n plt.show()\n image = image.to(device)\n features = encoder(image).unsqueeze(1)\n output = decoder.sample(features) \n sentence = clean_sentence(output)\n print(sentence)",
"_____no_output_____"
],
[
"get_prediction()",
"_____no_output_____"
],
[
"get_prediction()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c9966d65710f8a960664ae23e2296cc30703f
| 11,511 |
ipynb
|
Jupyter Notebook
|
basic_image_recognition.ipynb
|
ldhagen/docker-junyper
|
443402376c37dfa567ff98456cab2753c86ba388
|
[
"MIT"
] | null | null | null |
basic_image_recognition.ipynb
|
ldhagen/docker-junyper
|
443402376c37dfa567ff98456cab2753c86ba388
|
[
"MIT"
] | null | null | null |
basic_image_recognition.ipynb
|
ldhagen/docker-junyper
|
443402376c37dfa567ff98456cab2753c86ba388
|
[
"MIT"
] | null | null | null | 30.05483 | 122 | 0.472765 |
[
[
[
"From https://pythonprogramming.net/testing-visualization-and-conclusion/?completed=/basic-image-recognition-testing/",
"_____no_output_____"
]
],
[
[
"!apt-get install -y unzip\n!wget https://pythonprogramming.net/static/downloads/image-recognition/tutorialimages.zip\n!unzip tutorialimages.zip\n!cp images/numbers/3.8.png images/test.png",
"_____no_output_____"
],
[
"%matplotlib inline\nfrom PIL import Image\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport time\n\ndef threshold(imageArray):\n balanceAr = []\n newAr = imageArray\n for eachRow in imageArray:\n for eachPix in eachRow:\n avgNum = reduce(lambda x, y: x + y, eachPix[:3]) / len(eachPix[:3])\n balanceAr.append(avgNum)",
"_____no_output_____"
],
[
"i = Image.open('images/numbers/0.1.png')\niar = np.array(i)\ni2 = Image.open('images/numbers/y0.4.png')\niar2 = np.array(i2)\ni3 = Image.open('images/numbers/y0.5.png')\niar3 = np.array(i3)\ni4 = Image.open('images/sentdex.png')\niar4 = np.array(i4)\n\n\nfig = plt.figure()\nax1 = plt.subplot2grid((8,6),(0,0), rowspan=4, colspan=3)\nax2 = plt.subplot2grid((8,6),(4,0), rowspan=4, colspan=3)\nax3 = plt.subplot2grid((8,6),(0,3), rowspan=4, colspan=3)\nax4 = plt.subplot2grid((8,6),(4,3), rowspan=4, colspan=3)\n\nax1.imshow(iar)\nax2.imshow(iar2)\nax3.imshow(iar3)\nax4.imshow(iar4)\n\n\nplt.show()",
"_____no_output_____"
],
[
"def threshold(imageArray):\n balanceAr = []\n newAr = imageArray\n for eachRow in imageArray:\n for eachPix in eachRow:\n avgNum = reduce(lambda x, y: x + y, eachPix[:3]) / len(eachPix[:3])\n balanceAr.append(avgNum)\n balance = reduce(lambda x, y: x + y, balanceAr) / len(balanceAr)\n for eachRow in newAr:\n for eachPix in eachRow:\n if reduce(lambda x, y: x + y, eachPix[:3]) / len(eachPix[:3]) > balance:\n eachPix[0] = 255\n eachPix[1] = 255\n eachPix[2] = 255\n eachPix[3] = 255\n else:\n eachPix[0] = 0\n eachPix[1] = 0\n eachPix[2] = 0\n eachPix[3] = 255\n return newAr",
"_____no_output_____"
],
[
"i = Image.open('images/numbers/0.1.png')\niar = np.array(i)\ni2 = Image.open('images/numbers/y0.4.png')\niar2 = np.array(i2)\ni3 = Image.open('images/numbers/y0.5.png')\niar3 = np.array(i3)\ni4 = Image.open('images/sentdex.png')\niar4 = np.array(i4)\n\n\niar = threshold(iar)\niar2 = threshold(iar2)\niar3 = threshold(iar3)\niar4 = threshold(iar4)\n\nfig = plt.figure()\nax1 = plt.subplot2grid((8,6),(0,0), rowspan=4, colspan=3)\nax2 = plt.subplot2grid((8,6),(4,0), rowspan=4, colspan=3)\nax3 = plt.subplot2grid((8,6),(0,3), rowspan=4, colspan=3)\nax4 = plt.subplot2grid((8,6),(4,3), rowspan=4, colspan=3)\n\nax1.imshow(iar)\nax2.imshow(iar2)\nax3.imshow(iar3)\nax4.imshow(iar4)\n\n\nplt.show()",
"_____no_output_____"
],
[
"def createExamples():\n numberArrayExamples = open('numArEx.txt','a')\n numbersWeHave = range(1,10)\n for eachNum in numbersWeHave:\n #print eachNum\n for furtherNum in numbersWeHave:\n # you could also literally add it *.1 and have it create\n # an actual float, but, since in the end we are going\n # to use it as a string, this way will work.\n print(str(eachNum)+'.'+str(furtherNum))\n imgFilePath = 'images/numbers/'+str(eachNum)+'.'+str(furtherNum)+'.png'\n ei = Image.open(imgFilePath)\n eiar = np.array(ei)\n eiarl = str(eiar.tolist())\n\n print(eiarl)\n lineToWrite = str(eachNum)+'::'+eiarl+'\\n'\n numberArrayExamples.write(lineToWrite)",
"_____no_output_____"
],
[
"createExamples()",
"_____no_output_____"
],
[
"from PIL import Image\nimport numpy as np\n\nimport time\nfrom collections import Counter\n\n\ndef whatNumIsThis(filePath):\n\n matchedAr = []\n loadExamps = open('numArEx.txt','r').read()\n loadExamps = loadExamps.split('\\n')\n \n i = Image.open(filePath)\n iar = np.array(i)\n iarl = iar.tolist()\n\n inQuestion = str(iarl)\n\n for eachExample in loadExamps:\n try:\n splitEx = eachExample.split('::')\n currentNum = splitEx[0]\n currentAr = splitEx[1]\n \n eachPixEx = currentAr.split('],')\n eachPixInQ = inQuestion.split('],')\n\n x = 0\n\n while x < len(eachPixEx):\n if eachPixEx[x] == eachPixInQ[x]:\n matchedAr.append(int(currentNum))\n\n x+=1\n except Exception as e:\n print(str(e))\n \n print(matchedAr)\n x = Counter(matchedAr)\n print(x)\n print(x[0])\n\nwhatNumIsThis('images/test.png')",
"_____no_output_____"
],
[
"from PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom collections import Counter\n\nfrom matplotlib import style\nstyle.use(\"ggplot\")\n\ndef createExamples():\n numberArrayExamples = open('numArEx.txt','a')\n numbersWeHave = range(1,10)\n for eachNum in numbersWeHave:\n for furtherNum in numbersWeHave:\n\n imgFilePath = 'images/numbers/'+str(eachNum)+'.'+str(furtherNum)+'.png'\n ei = Image.open(imgFilePath)\n eiar = np.array(ei)\n eiarl = str(eiar.tolist())\n\n lineToWrite = str(eachNum)+'::'+eiarl+'\\n'\n numberArrayExamples.write(lineToWrite)\n\n\n\n \n \ndef threshold(imageArray):\n balanceAr = []\n newAr = imageArray\n for eachPart in imageArray:\n for theParts in eachPart:\n\t\t\t# for the reduce(lambda x, y: x + y, theParts[:3]) / len(theParts[:3])\n\t\t\t# in Python 3, just use: from statistics import mean\n\t\t\t# then do avgNum = mean(theParts[:3])\n avgNum = reduce(lambda x, y: x + y, theParts[:3]) / len(theParts[:3])\n balanceAr.append(avgNum)\n balance = reduce(lambda x, y: x + y, balanceAr) / len(balanceAr)\n for eachRow in newAr:\n for eachPix in eachRow:\n if reduce(lambda x, y: x + y, eachPix[:3]) / len(eachPix[:3]) > balance:\n eachPix[0] = 255\n eachPix[1] = 255\n eachPix[2] = 255\n eachPix[3] = 255\n else:\n eachPix[0] = 0\n eachPix[1] = 0\n eachPix[2] = 0\n eachPix[3] = 255\n return newAr\n\n\n\ndef whatNumIsThis(filePath):\n\n matchedAr = []\n loadExamps = open('numArEx.txt','r').read()\n loadExamps = loadExamps.split('\\n')\n i = Image.open(filePath)\n iar = np.array(i)\n iarl = iar.tolist()\n inQuestion = str(iarl)\n for eachExample in loadExamps:\n try:\n splitEx = eachExample.split('::')\n currentNum = splitEx[0]\n currentAr = splitEx[1]\n eachPixEx = currentAr.split('],')\n eachPixInQ = inQuestion.split('],')\n x = 0\n while x < len(eachPixEx):\n if eachPixEx[x] == eachPixInQ[x]:\n matchedAr.append(int(currentNum))\n\n x+=1\n except Exception as e:\n print(str(e))\n \n x = Counter(matchedAr)\n print(x)\n graphX = []\n graphY = []\n\n ylimi = 0\n\n for eachThing in x:\n graphX.append(eachThing)\n graphY.append(x[eachThing])\n ylimi = x[eachThing]\n\n\n\n fig = plt.figure()\n ax1 = plt.subplot2grid((4,4),(0,0), rowspan=1, colspan=4)\n ax2 = plt.subplot2grid((4,4),(1,0), rowspan=3,colspan=4)\n \n ax1.imshow(iar)\n ax2.bar(graphX,graphY,align='center')\n plt.ylim(400)\n \n xloc = plt.MaxNLocator(12)\n ax2.xaxis.set_major_locator(xloc)\n\n plt.show()\n\nwhatNumIsThis('images/test.png')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9c9ae27c99a7d8a3e030ee471e6d03361136a7
| 32,141 |
ipynb
|
Jupyter Notebook
|
Examples/Other Algorithms on MNIST/CVAE.ipynb
|
RV29/VAE
|
e2a46080b0dfc4ea16756486a50ff4c9726320e4
|
[
"MIT"
] | null | null | null |
Examples/Other Algorithms on MNIST/CVAE.ipynb
|
RV29/VAE
|
e2a46080b0dfc4ea16756486a50ff4c9726320e4
|
[
"MIT"
] | null | null | null |
Examples/Other Algorithms on MNIST/CVAE.ipynb
|
RV29/VAE
|
e2a46080b0dfc4ea16756486a50ff4c9726320e4
|
[
"MIT"
] | null | null | null | 91.309659 | 20,758 | 0.790455 |
[
[
[
"# Convolutional Variational Autoencoder taken from TensorFlow Tutorials\n# https://www.tensorflow.org/tutorials/generative/cvae\n# preferably run with a GPU or on Google Colab etc",
"_____no_output_____"
],
[
"# to generate gifs\n!pip install -q imageio\n\nimport tensorflow as tf\n\nimport os\nimport time\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport PIL\nimport imageio\n\nfrom IPython import display",
"_____no_output_____"
],
[
"(train_images, _), (test_images, _) = tf.keras.datasets.mnist.load_data()",
"_____no_output_____"
],
[
"train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\ntest_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')\n\n# Normalizing the images to the range of [0., 1.]\ntrain_images /= 255.\ntest_images /= 255.\n\n# Binarization\n#train_images[train_images >= .5] = 1.\n#train_images[train_images < .5] = 0.\n#test_images[test_images >= .5] = 1.\n#test_images[test_images < .5] = 0.",
"_____no_output_____"
],
[
"TRAIN_BUF = 60000\nBATCH_SIZE = 100\n\nTEST_BUF = 10000",
"_____no_output_____"
],
[
"train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(TRAIN_BUF).batch(BATCH_SIZE)\ntest_dataset = tf.data.Dataset.from_tensor_slices(test_images).shuffle(TEST_BUF).batch(BATCH_SIZE)",
"_____no_output_____"
],
[
"class CVAE(tf.keras.Model):\n def __init__(self, latent_dim):\n super(CVAE, self).__init__()\n self.latent_dim = latent_dim\n self.inference_net = tf.keras.Sequential(\n [\n tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),\n tf.keras.layers.Conv2D(\n filters=32, kernel_size=3, strides=(2, 2), activation='relu'),\n tf.keras.layers.Conv2D(\n filters=64, kernel_size=3, strides=(2, 2), activation='relu'),\n tf.keras.layers.Flatten(),\n # No activation\n tf.keras.layers.Dense(latent_dim + latent_dim),\n ]\n )\n\n self.generative_net = tf.keras.Sequential(\n [\n tf.keras.layers.InputLayer(input_shape=(latent_dim,)),\n tf.keras.layers.Dense(units=7*7*32, activation=tf.nn.relu),\n tf.keras.layers.Reshape(target_shape=(7, 7, 32)),\n tf.keras.layers.Conv2DTranspose(\n filters=64,\n kernel_size=3,\n strides=(2, 2),\n padding=\"SAME\",\n activation='relu'),\n tf.keras.layers.Conv2DTranspose(\n filters=32,\n kernel_size=3,\n strides=(2, 2),\n padding=\"SAME\",\n activation='relu'),\n # No activation\n tf.keras.layers.Conv2DTranspose(\n filters=1, kernel_size=3, strides=(1, 1), padding=\"SAME\"),\n ]\n )\n\n @tf.function\n def sample(self, eps=None):\n if eps is None:\n eps = tf.random.normal(shape=(100, self.latent_dim))\n return self.decode(eps, apply_sigmoid=True)\n\n def encode(self, x):\n mean, logvar = tf.split(self.inference_net(x), num_or_size_splits=2, axis=1)\n return mean, logvar\n\n def reparameterize(self, mean, logvar):\n eps = tf.random.normal(shape=mean.shape)\n return eps * tf.exp(logvar * .5) + mean\n\n def decode(self, z, apply_sigmoid=False):\n logits = self.generative_net(z)\n if apply_sigmoid:\n probs = tf.sigmoid(logits)\n return probs\n\n return logits",
"_____no_output_____"
],
[
"optimizer = tf.keras.optimizers.Adam(1e-4)\n\ndef log_normal_pdf(sample, mean, logvar, raxis=1):\n log2pi = tf.math.log(2. * np.pi)\n return tf.reduce_sum(\n -.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),\n axis=raxis)\n\[email protected]\ndef compute_loss(model, x):\n mean, logvar = model.encode(x)\n z = model.reparameterize(mean, logvar)\n x_logit = model.decode(z)\n\n cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)\n logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])\n logpz = log_normal_pdf(z, 0., 0.)\n logqz_x = log_normal_pdf(z, mean, logvar)\n return -tf.reduce_mean(logpx_z + logpz - logqz_x)\n\[email protected]\ndef compute_apply_gradients(model, x, optimizer):\n with tf.GradientTape() as tape:\n loss = compute_loss(model, x)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))",
"_____no_output_____"
],
[
"epochs = 100\nlatent_dim = 50\nnum_examples_to_generate = 9\n\n# keeping the random vector constant for generation (prediction) so\n# it will be easier to see the improvement.\nrandom_vector_for_generation = tf.random.normal(\n shape=[num_examples_to_generate, latent_dim])\nmodel = CVAE(latent_dim)",
"_____no_output_____"
],
[
"def generate_and_save_images(model, epoch, test_input):\n predictions = model.sample(test_input)\n fig = plt.figure(figsize=(3,3))\n\n for i in range(predictions.shape[0]):\n plt.subplot(3, 3, i+1)\n plt.imshow(predictions[i, :, :, 0], cmap='gray')\n plt.axis('off')\n\n # tight_layout minimizes the overlap between 2 sub-plots\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()",
"_____no_output_____"
],
[
"generate_and_save_images(model, 0, random_vector_for_generation)\n\nfor epoch in range(1, epochs + 1):\n start_time = time.time()\n for train_x in train_dataset:\n compute_apply_gradients(model, train_x, optimizer)\n end_time = time.time()\n\n if epoch % 1 == 0:\n loss = tf.keras.metrics.Mean()\n for test_x in test_dataset:\n loss(compute_loss(model, test_x))\n elbo = -loss.result()\n display.clear_output(wait=False)\n print('Epoch: {}, Test set ELBO: {}, '\n 'time elapse for current epoch {}'.format(epoch,\n elbo,\n end_time - start_time))\n generate_and_save_images(\n model, epoch, random_vector_for_generation)",
"Epoch: 100, Test set ELBO: -99.40341186523438, time elapse for current epoch 3.835003614425659\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9cad75a8c7ce576ab3df1c542dc8ec3c92ae67
| 15,596 |
ipynb
|
Jupyter Notebook
|
notebook/2_PowerSystems_examples/network_matrices.ipynb
|
raphaelsaavedra/SIIPExamples.jl
|
d7304c84fe5382db2ff4c20f058bc1e5d01cae8c
|
[
"BSD-3-Clause"
] | null | null | null |
notebook/2_PowerSystems_examples/network_matrices.ipynb
|
raphaelsaavedra/SIIPExamples.jl
|
d7304c84fe5382db2ff4c20f058bc1e5d01cae8c
|
[
"BSD-3-Clause"
] | null | null | null |
notebook/2_PowerSystems_examples/network_matrices.ipynb
|
raphaelsaavedra/SIIPExamples.jl
|
d7304c84fe5382db2ff4c20f058bc1e5d01cae8c
|
[
"BSD-3-Clause"
] | null | null | null | 45.205797 | 1,942 | 0.598551 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a9cb17de2a4830adfc4df5badcca14f3867fc3c
| 4,868 |
ipynb
|
Jupyter Notebook
|
Recsys.ipynb
|
ShehzadaAlam/NLP---Recommendation-System
|
0d695e4a269637e7bc77d599812772701928bc4c
|
[
"MIT"
] | 1 |
2021-07-29T19:11:53.000Z
|
2021-07-29T19:11:53.000Z
|
Recsys.ipynb
|
ShehzadaAlam/Recommender-System
|
0d695e4a269637e7bc77d599812772701928bc4c
|
[
"MIT"
] | null | null | null |
Recsys.ipynb
|
ShehzadaAlam/Recommender-System
|
0d695e4a269637e7bc77d599812772701928bc4c
|
[
"MIT"
] | null | null | null | 26.032086 | 135 | 0.573131 |
[
[
[
"## Recommendation System\n----",
"_____no_output_____"
],
[
"#### Loading Packages and Dependencies",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport nltk\nimport warnings\nfrom rake_nltk import Rake\nfrom textacy import preprocessing\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nwarnings.simplefilter('ignore')\nsns.set_style('whitegrid')",
"_____no_output_____"
]
],
[
[
"#### Preprocessing Dataset",
"_____no_output_____"
]
],
[
[
"dataset = pd.read_csv('https://query.data.world/s/uikepcpffyo2nhig52xxeevdialfl7') #Loading the dataset\ndataset = dataset[['Title', 'Genre','Director', 'Writer', 'Actors', 'Plot']] #Selecting only the columns which contain text\ndataset.dropna(inplace=True) #Dropping all null values \ndataset.set_index('Title', inplace=True) #Setting up index as title\nfor columns in dataset.columns:\n dataset[columns] = dataset[columns].str.lower() # converting entire dataframe to lowercase\n dataset[columns] = dataset[columns].apply(lambda x: preprocessing.remove.remove_punctuation(x)) # removing punctuations\n dataset[columns] = dataset[columns] + ' '",
"_____no_output_____"
]
],
[
[
"#### Getting Description Keywords",
"_____no_output_____"
]
],
[
[
"def get_word_dict(text):\n r=Rake()\n r.extract_keywords_from_text(text)\n return ' '.join(r.get_word_degrees().keys())\n\ndataset['Description Keywords'] = dataset.sum(axis=1) #Concatenating entire dataset into one column \ndataset.drop(columns=dataset.columns[dataset.columns != 'Description Keywords'], inplace=True) # Dropping rest of the columns\ndataset['Description Keywords'] = dataset['Description Keywords'].apply(lambda x: get_word_dict(x)) #Extracting the keywords",
"_____no_output_____"
]
],
[
[
"#### Creating Cosine Matrix",
"_____no_output_____"
]
],
[
[
"count_vect = CountVectorizer(ngram_range=(1,3),stop_words=nltk.corpus.stopwords.words('English'),lowercase=True) #creating BOW\ncount_mat = count_vect.fit_transform(dataset['Description Keywords'])\ncosine_matrix = cosine_similarity(count_mat,count_mat)",
"_____no_output_____"
]
],
[
[
"#### Create Movie Recomendation",
"_____no_output_____"
]
],
[
[
"def Movie_recommendation(movie):\n try:\n movie_title = pd.Series(dataset.index)\n ind = movie_title[movie_title==movie.title()].index[0]\n print(\"you've Entered:\",movie)\n print()\n print('Recommendation:')\n for movie_ind in list(pd.Series(cosine_matrix[ind]).sort_values(ascending=False).head(6).index[1:]):\n print(movie_title[movie_ind])\n except:\n print('Sorry, movie not found')",
"_____no_output_____"
]
],
[
[
"#### Generating Recommendation",
"_____no_output_____"
]
],
[
[
"Movie_recommendation('The Matrix')",
"you've Entered: The Matrix\n\nRecommendation:\nThe Terminator\nV for Vendetta\nMemento\nTerminator 2: Judgment Day\nThe Avengers\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9cb91d348215598b6e348fe39eeeceace71226
| 3,354 |
ipynb
|
Jupyter Notebook
|
feature_selection/feature_selection_notebook.ipynb
|
cristianmusic7/udacity-ud-120-solutions
|
a1ca5e19aabd4dc7df8e3ebe658d342dcc0a5e17
|
[
"MIT"
] | 1 |
2021-05-05T07:12:05.000Z
|
2021-05-05T07:12:05.000Z
|
feature_selection/feature_selection_notebook.ipynb
|
cristianmusic7/udacity-ud-120-solutions
|
a1ca5e19aabd4dc7df8e3ebe658d342dcc0a5e17
|
[
"MIT"
] | null | null | null |
feature_selection/feature_selection_notebook.ipynb
|
cristianmusic7/udacity-ud-120-solutions
|
a1ca5e19aabd4dc7df8e3ebe658d342dcc0a5e17
|
[
"MIT"
] | null | null | null | 31.345794 | 153 | 0.602564 |
[
[
[
"#!/usr/bin/python\n\nimport pickle\nimport numpy\nnumpy.random.seed(42)\n\n\n### The words (features) and authors (labels), already largely processed.\n### These files should have been created from the previous (Lesson 10)\n### mini-project.\nwords_file = \"../text_learning/your_word_data.pkl\" \nauthors_file = \"../text_learning/your_email_authors.pkl\"\nword_data = pickle.load( open(words_file, \"rb\"))\nauthors = pickle.load( open(authors_file, \"rb\") )\n\n\n\n### test_size is the percentage of events assigned to the test set (the\n### remainder go into training)\n### feature matrices changed to dense representations for compatibility with\n### classifier functions in versions 0.15.2 and earlier\nfrom sklearn import cross_validation\nfeatures_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nvectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,\n stop_words='english')\nfeatures_train = vectorizer.fit_transform(features_train)\nfeatures_test = vectorizer.transform(features_test).toarray()\n\nprint(vectorizer.get_feature_names()[18849])\nprint(vectorizer.get_feature_names()[21323])\n\n### a classic way to overfit is to use a small number\n### of data points and a large number of features;\n### train on only 150 events to put ourselves in this regime\nfeatures_train = features_train[:150].toarray()\nlabels_train = labels_train[:150]\n\n\n\n### your code goes here\nfrom sklearn import tree\nclf = tree.DecisionTreeClassifier(min_samples_split=40)\nclf = clf.fit(features_train, labels_train)\npred = clf.predict(features_test)\n\nbestfeature = [data for data in clf.feature_importances_ if data > 0.2 ]\n\nprint(bestfeature)\nprint(clf.feature_importances_.tolist().index(bestfeature[0]))\nprint(clf.feature_importances_.tolist().index(bestfeature[1]))\n\nfrom sklearn.metrics import accuracy_score\nacc = accuracy_score(labels_test, pred)\nprint(acc)\n\n\n",
"fax\nhouectect\n[0.21629799316658735, 0.42077235102655097]\n18849\n21323\n0.816268486917\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a9cbbbd55acc3783cb9e60825762f56b18ab424
| 46,860 |
ipynb
|
Jupyter Notebook
|
content/labs/lab2/cs109b_lab2_smooths_and_GAMs.ipynb
|
rahuliem/2019-CS109B-1
|
bacb726bc6ce2887da05d76d5f0f481e0b062db1
|
[
"MIT"
] | 1 |
2022-03-11T17:47:29.000Z
|
2022-03-11T17:47:29.000Z
|
content/labs/lab2/cs109b_lab2_smooths_and_GAMs.ipynb
|
rahuliem/2019-CS109B-1
|
bacb726bc6ce2887da05d76d5f0f481e0b062db1
|
[
"MIT"
] | null | null | null |
content/labs/lab2/cs109b_lab2_smooths_and_GAMs.ipynb
|
rahuliem/2019-CS109B-1
|
bacb726bc6ce2887da05d76d5f0f481e0b062db1
|
[
"MIT"
] | null | null | null | 28.314199 | 344 | 0.556637 |
[
[
[
"# <img style=\"float: left; padding-right: 10px; width: 45px\" src=\"https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png\"> CS109B Data Science 2: Advanced Topics in Data Science \n## Lab 2 - Smoothers and Generalized Additive Models\n\n\n**Harvard University**<br>\n**Spring 2019**<br>\n**Instructors:** Mark Glickman and Pavlos Protopapas<br>\n**Lab Instructors:** Will Claybaugh<br>\n**Contributors:** Paul Tyklin and Will Claybaugh\n\n---",
"_____no_output_____"
]
],
[
[
"## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES\nimport requests\nfrom IPython.core.display import HTML\nstyles = requests.get(\"https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css\").text\nHTML(styles)",
"_____no_output_____"
]
],
[
[
"## Learning Goals\n\nThe main goal of this lab is to get familiar with calling R functions within Python. Along the way, we'll learn about the \"formula\" interface to statsmodels, which gives an intuitive way of specifying regression models, and we'll review the different approaches to fitting curves.\n\nKey Skills:\n- Importing (base) R functions\n- Importing R library functions\n- Populating vectors R understands\n- Populating dataframes R understands\n- Populating formulas R understands\n- Running models in R\n- Getting results back to Python\n- Getting model predictions in R\n- Plotting in R\n- Reading R's documentation",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n%matplotlib inline ",
"_____no_output_____"
]
],
[
[
"## Linear/Polynomial Regression (Python, Review)\nHopefully, you remember working with Statsmodels during 109a",
"_____no_output_____"
],
[
"Reading data and (some) exploring in Pandas:",
"_____no_output_____"
]
],
[
[
"diab = pd.read_csv(\"data/diabetes.csv\")\nprint(\"\"\"\n# Variables are:\n# subject: subject ID number\n# age: age diagnosed with diabetes\n# acidity: a measure of acidity called base deficit\n# y: natural log of serum C-peptide concentration\n#\n# Original source is Sockett et al. (1987)\n# mentioned in Hastie and Tibshirani's book \n# \"Generalized Additive Models\".\n\"\"\")\n\ndisplay(diab.head())\ndisplay(diab.dtypes)\ndisplay(diab.describe())",
"_____no_output_____"
]
],
[
[
"Plotting with matplotlib:",
"_____no_output_____"
]
],
[
[
"ax0 = diab.plot.scatter(x='age',y='y',c='Red',title=\"Diabetes data\") #plotting direclty from pandas!\nax0.set_xlabel(\"Age at Diagnosis\")\nax0.set_ylabel(\"Log C-Peptide Concentration\");",
"_____no_output_____"
]
],
[
[
"Linear regression with statsmodels. \n\n- Previously, we worked from a vector of target values and a design matrix we built ourself (e.g. from PolynomialFeatures). \n- Now, Statsmodels' *formula interface* can help build the target value and design matrix for you.",
"_____no_output_____"
]
],
[
[
"#Using statsmodels\nimport statsmodels.formula.api as sm\n\n\nmodel1 = sm.ols('y ~ age',data=diab)\nfit1_lm = model1.fit()",
"_____no_output_____"
]
],
[
[
"Build a data frame to predict values on (sometimes this is just the test or validation set)\n - Very useful for making pretty plots of the model predcitions -- predict for TONS of values, not just whatever's in the training set",
"_____no_output_____"
]
],
[
[
"x_pred = np.linspace(0,16,100)\n\npredict_df = pd.DataFrame(data={\"age\":x_pred})\npredict_df.head()",
"_____no_output_____"
]
],
[
[
"Use `get_prediction(<data>).summary_frame()` to get the model's prediction (and error bars!)",
"_____no_output_____"
]
],
[
[
"prediction_output = fit1_lm.get_prediction(predict_df).summary_frame()\nprediction_output.head()",
"_____no_output_____"
]
],
[
[
"Plot the model and error bars",
"_____no_output_____"
]
],
[
[
"ax1 = diab.plot.scatter(x='age',y='y',c='Red',title=\"Diabetes data with least-squares linear fit\")\nax1.set_xlabel(\"Age at Diagnosis\")\nax1.set_ylabel(\"Log C-Peptide Concentration\")\n\n\nax1.plot(predict_df.age, prediction_output['mean'],color=\"green\")\nax1.plot(predict_df.age, prediction_output['mean_ci_lower'], color=\"blue\",linestyle=\"dashed\")\nax1.plot(predict_df.age, prediction_output['mean_ci_upper'], color=\"blue\",linestyle=\"dashed\");\n\nax1.plot(predict_df.age, prediction_output['obs_ci_lower'], color=\"skyblue\",linestyle=\"dashed\")\nax1.plot(predict_df.age, prediction_output['obs_ci_upper'], color=\"skyblue\",linestyle=\"dashed\");",
"_____no_output_____"
]
],
[
[
"<div class=\"discussion\"><b>Discussion</b></div>\n\n- What are the dark error bars? \n- What are the light error bars?",
"_____no_output_____"
],
[
"<div class=\"exercise\"><b>Exercise 1</b></div>\n\n1. Fit a 3rd degree polynomial model and plot the model+error bars\n - Route1: Build a design df with a column for each of `age`, `age**2`, `age**3`\n - Route2: Just edit the formula",
"_____no_output_____"
],
[
"**Answers**:\n\n1. ",
"_____no_output_____"
]
],
[
[
"# your code here\n",
"_____no_output_____"
]
],
[
[
"2. ",
"_____no_output_____"
]
],
[
[
"# your code here\n",
"_____no_output_____"
]
],
[
[
"## Linear/Polynomial Regression, but make it R",
"_____no_output_____"
],
[
"This is the meat of the lab. After this section we'll know everything we need to in order to work with R models. The rest of the lab is just applying these concepts to run particular models. This section therefore is your 'cheat sheet' for working in R.\n\nWhat we need to know:\n- Importing (base) R functions\n- Importing R Library functions\n- Populating vectors R understands\n- Populating DataFrames R understands\n- Populating Formulas R understands\n- Running models in R\n- Getting results back to Python\n- Getting model predictions in R\n- Plotting in R\n- Reading R's documentation",
"_____no_output_____"
],
[
"**Importing R functions**",
"_____no_output_____"
]
],
[
[
"# if you're on JupyterHub you may need to specify the path to R\n\n#import os\n#os.environ['R_HOME'] = \"/usr/share/anaconda3/lib/R\"\n\nimport rpy2.robjects as robjects",
"_____no_output_____"
],
[
"r_lm = robjects.r[\"lm\"]\nr_predict = robjects.r[\"predict\"]\n#r_plot = robjects.r[\"plot\"] # more on plotting later\n\n#lm() and predict() are two of the most common functions we'll use",
"_____no_output_____"
]
],
[
[
"**Importing R libraries**",
"_____no_output_____"
]
],
[
[
"from rpy2.robjects.packages import importr\n#r_cluster = importr('cluster')\n#r_cluster.pam;",
"_____no_output_____"
]
],
[
[
"**Populating vectors R understands**",
"_____no_output_____"
]
],
[
[
"r_y = robjects.FloatVector(diab['y'])\nr_age = robjects.FloatVector(diab['age'])\n# What happens if we pass the wrong type?\n# How does r_age display?\n# How does r_age print?",
"_____no_output_____"
]
],
[
[
"**Populating Data Frames R understands**",
"_____no_output_____"
]
],
[
[
"diab_r = robjects.DataFrame({\"y\":r_y, \"age\":r_age})\n# How does diab_r display?\n# How does diab_r print?",
"_____no_output_____"
]
],
[
[
"**Populating formulas R understands**",
"_____no_output_____"
]
],
[
[
"simple_formula = robjects.Formula(\"y~age\")\nsimple_formula.environment[\"y\"] = r_y #populate the formula's .environment, so it knows what 'y' and 'age' refer to\nsimple_formula.environment[\"age\"] = r_age",
"_____no_output_____"
]
],
[
[
"**Running Models in R**",
"_____no_output_____"
]
],
[
[
"diab_lm = r_lm(formula=simple_formula) # the formula object is storing all the needed variables",
"_____no_output_____"
],
[
"simple_formula = robjects.Formula(\"y~age\") # reset the formula\ndiab_lm = r_lm(formula=simple_formula, data=diab_r) #can also use a 'dumb' formula and pass a dataframe",
"_____no_output_____"
]
],
[
[
"**Getting results back to Python**",
"_____no_output_____"
]
],
[
[
"diab_lm #the result is already 'in' python, but it's a special object",
"_____no_output_____"
],
[
"print(diab_lm.names) # view all names",
"_____no_output_____"
],
[
"diab_lm[0] #grab the first element",
"_____no_output_____"
],
[
"diab_lm.rx2(\"coefficients\") #use rx2 to get elements by name!",
"_____no_output_____"
],
[
"np.array(diab_lm.rx2(\"coefficients\")) #r vectors can be converted to numpy (but rarely needed)",
"_____no_output_____"
]
],
[
[
"**Getting Predictions**",
"_____no_output_____"
]
],
[
[
"# make a df to predict on (might just be the validation or test dataframe)\npredict_df = robjects.DataFrame({\"age\": robjects.FloatVector(np.linspace(0,16,100))})\n\n# call R's predict() function, passing the model and the data \npredictions = r_predict(diab_lm, predict_df)",
"_____no_output_____"
],
[
"x_vals = predict_df.rx2(\"age\")",
"_____no_output_____"
],
[
"ax = diab.plot.scatter(x='age',y='y',c='Red',title=\"Diabetes data\")\nax.set_xlabel(\"Age at Diagnosis\")\nax.set_ylabel(\"Log C-Peptide Concentration\");\n\nax.plot(x_vals,predictions); #plt still works with r vectors as input!",
"_____no_output_____"
]
],
[
[
"**Plotting in R**",
"_____no_output_____"
]
],
[
[
"%load_ext rpy2.ipython",
"_____no_output_____"
]
],
[
[
"- The above turns on the %R \"magic\"\n- R's plot() command responds differently based on what you hand to it; Different models get different plots!\n - For any specific model search for plot.modelname. E.g. for a GAM model, search plot.gam for any details of plotting a GAM model\n- The %R \"magic\" runs R code in 'notebook' mode, so figures display nicely\n - Ahead of the `plot(<model>)` code we pass in the variables R needs to know about (`-i` is for \"input\")",
"_____no_output_____"
]
],
[
[
"%R -i diab_lm plot(diab_lm);",
"_____no_output_____"
]
],
[
[
"**Reading R's documentation**",
"_____no_output_____"
],
[
"The documentation for the `lm()` funciton is [here](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/lm.html), and a prettier version (same content) is [here](https://www.rdocumentation.org/packages/stats/versions/3.5.2/topics/lm). When googling, perfer rdocumentation.org when possible.\nSections:\n - **Usage**: gives the function signature, including all optional arguments\n - **Arguments**: What each function input controls\n - **Details**: additional info on what the funciton *does* and how arguments interact. **Often the right place to start reading**\n - **Value**: the structure of the object returned by the function\n - **Refferences**: The relevant academic papers\n - **See Also**: other functions of interest",
"_____no_output_____"
],
[
"<div class=\"exercise\"><b>Exercise 2</b></div>\n\n1. Add confidence intervals calculated in R to the linear regression plot above. Use the `interval=` argument to `r_predict()` (documentation [here](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/predict.lm.html)). You will have to work with a matrix returned by R.\n2. Fit a 5th degree polynomial to the diabetes data in R. Search the web for an easier method than writing out a formula with all 5 polynomial terms.",
"_____no_output_____"
],
[
"**Answers**\n\n1. ",
"_____no_output_____"
]
],
[
[
"# your code here",
"_____no_output_____"
]
],
[
[
"2.",
"_____no_output_____"
]
],
[
[
"# your code here",
"_____no_output_____"
]
],
[
[
"## Lowess Smoothing\nLowess Smoothing is implemented in both Python and R. We'll use it as another example as we transition languages. ",
"_____no_output_____"
],
[
"<div class=\"discussion\"><b>Discussion</b></div>\n\n - What is lowess smoothing? Which 109a models is it related to?\n - How explainable is lowess?\n - What are the tunable parameters?",
"_____no_output_____"
],
[
"**In Python**",
"_____no_output_____"
]
],
[
[
"from statsmodels.nonparametric.smoothers_lowess import lowess as lowess\n\nss1 = lowess(diab['y'],diab['age'],frac=0.15)\nss2 = lowess(diab['y'],diab['age'],frac=0.25)\nss3 = lowess(diab['y'],diab['age'],frac=0.7)\nss4 = lowess(diab['y'],diab['age'],frac=1)",
"_____no_output_____"
],
[
"ss1[:10,:] # we get back simple a smoothed y value for each x value in the data",
"_____no_output_____"
]
],
[
[
"Notice the clean code to plot different models. We'll see even cleaner code in a minute",
"_____no_output_____"
]
],
[
[
"for cur_model, cur_frac in zip([ss1,ss2,ss3,ss4],[0.15,0.25,0.7,1]):\n\n ax = diab.plot.scatter(x='age',y='y',c='Red',title=\"Lowess Fit, Fraction = {}\".format(cur_frac))\n ax.set_xlabel(\"Age at Diagnosis\")\n ax.set_ylabel(\"Log C-Peptide Concentration\")\n ax.plot(cur_model[:,0],cur_model[:,1],color=\"blue\")\n \n plt.show()",
"_____no_output_____"
]
],
[
[
"<div class=\"discussion\"><b>Discussion</b></div>\n\n1. Which model has high variance, which has high bias? \n2. What makes a model high variance or high bias?",
"_____no_output_____"
],
[
"**In R** \n\nWe need to:\n - Import the loess function\n - Send data over to R\n - Call the function and get results",
"_____no_output_____"
]
],
[
[
"r_loess = robjects.r['loess.smooth'] #extract R function\nr_y = robjects.FloatVector(diab['y'])\nr_age = robjects.FloatVector(diab['age'])\n\nss1_r = r_loess(r_age,r_y, span=0.15, degree=1)",
"_____no_output_____"
],
[
"ss1_r #again, a smoothed y value for each x value in the data",
"_____no_output_____"
]
],
[
[
"<div class=\"exercise\"><b>Exercise 3</b></div>\n\nPredict the output of\n1. `ss1_r[0]`\n2. `ss1_r.rx2(\"y\")`",
"_____no_output_____"
],
[
"1.",
"_____no_output_____"
],
[
"*your answer here* ",
"_____no_output_____"
],
[
"2.",
"_____no_output_____"
],
[
"*your answer here* ",
"_____no_output_____"
],
[
"**Varying span** \nNext, some extremely clean code to fit and plot models with various parameter settings. (Though the `zip()` method seen earlier is great when e.g. the label and the parameter differ)",
"_____no_output_____"
]
],
[
[
"for cur_frac in [0.15,0.25,0.7,1]:\n \n cur_smooth = r_loess(r_age,r_y, span=cur_frac)\n\n ax = diab.plot.scatter(x='age',y='y',c='Red',title=\"Lowess Fit, Fraction = {}\".format(cur_frac))\n ax.set_xlabel(\"Age at Diagnosis\")\n ax.set_ylabel(\"Log C-Peptide Concentration\")\n ax.plot(cur_smooth[0], cur_smooth[1], color=\"blue\")\n \n plt.show()",
"_____no_output_____"
]
],
[
[
"<div class=\"discussion\"><b>Discussion</b></div>\n\n- Mark wasn't kidding; the Python and R results differ for frac=.15. Thoughts?\n- Why isn't the bottom plot a straight line? We're using 100% of the data in each window...",
"_____no_output_____"
],
[
"## Smoothing Splines\nFrom this point forward, we're working with R functions; these models aren't (well) supported in Python.\n\nFor clarity: this is the fancy spline model that minimizes $MSE - \\lambda\\cdot\\text{wiggle penalty}$ $=$ $\\sum_{i=1}^N \\left(y_i - f(x_i)\\right)^2 - \\lambda \\int \\left(f''(x)\\right)^2$, across all possible functions $f$. The winner will always be a continuous, cubic polynomial with a knot at each data point",
"_____no_output_____"
],
[
"<div class=\"discussion\"><b>Discussion</b></div>\n\n- Any idea why the winner is cubic?\n- How interpretable is this model?\n- What are the tunable parameters?",
"_____no_output_____"
]
],
[
[
"r_smooth_spline = robjects.r['smooth.spline'] #extract R function\n\n# run smoothing function\nspline1 = r_smooth_spline(r_age, r_y, spar=0)",
"_____no_output_____"
]
],
[
[
"<div class=\"exercise\"><b>Exercise 4</b></div>\n\n1. We actually set the spar parameter, a scale-free value that translates to a $\\lambda$ through a complex expression. Inspect the 'spline1' result and extract the implied value of $\\lambda$\n2. Working from the fitting/plotting loop examples above, produce a plot like the one below for spar = [0,.5,.9,2], including axes labels and title.",
"_____no_output_____"
],
[
"1.",
"_____no_output_____"
]
],
[
[
"# your answer here\n",
"_____no_output_____"
]
],
[
[
"2.",
"_____no_output_____"
]
],
[
[
"# your answer here\n",
"_____no_output_____"
]
],
[
[
"**CV** \nR's `smooth_spline` funciton has built-in CV to find a good lambda. See package [docs](https://www.rdocumentation.org/packages/stats/versions/3.5.2/topics/smooth.spline).",
"_____no_output_____"
]
],
[
[
"spline_cv = r_smooth_spline(r_age, r_y, cv=True) \n\nlambda_cv = spline_cv.rx2(\"lambda\")[0]\n\nax19 = diab.plot.scatter(x='age',y='y',c='Red',title=\"smoothing spline with $\\lambda=$\"+str(np.round(lambda_cv,4))+\", chosen by cross-validation\")\nax19.set_xlabel(\"Age at Diagnosis\")\nax19.set_ylabel(\"Log C-Peptide Concentration\")\nax19.plot(spline_cv.rx2(\"x\"),spline_cv.rx2(\"y\"),color=\"darkgreen\");",
"_____no_output_____"
]
],
[
[
"<div class=\"discussion\"><b>Discussion</b></div>\n\n - Does the selected model look reasonable?\n - How would you describe the effect of age at diagnosis on C_peptide concentration?\n - What are the costs/benefits of the (fancy) spline model, relative to the linear regression we fit above?",
"_____no_output_____"
],
[
"## Natural & Basis Splines\nHere, we take a step backward on model complexity, but a step forward in coding complexity. We'll be working with R's formula interface again, so we will need to populate Formulas and DataFrames.",
"_____no_output_____"
],
[
"<div class=\"discussion\"><b>Discussion</b></div>\n\n- In what way are Natural and Basis splines less complex than the splines we were just working with?\n- What makes a spline 'natural'?\n- What makes a spline 'basis'?\n- What are the tuning parameters?",
"_____no_output_____"
]
],
[
[
"#We will now work with a new dataset, called GAGurine.\n#The dataset description (from the R package MASS) is below:\n#Data were collected on the concentration of a chemical GAG \n# in the urine of 314 children aged from zero to seventeen years. \n# The aim of the study was to produce a chart to help a paediatrican\n# to assess if a child's GAG concentration is ‘normal’.\n\n#The variables are:\n# Age: age of child in years.\n# GAG: concentration of GAG (the units have been lost).",
"_____no_output_____"
],
[
"GAGurine = pd.read_csv(\"data/GAGurine.csv\")\ndisplay(GAGurine.head())\n\nax31 = GAGurine.plot.scatter(x='Age',y='GAG',c='black',title=\"GAG in urine of children\")\nax31.set_xlabel(\"Age\");\nax31.set_ylabel(\"GAG\");",
"_____no_output_____"
]
],
[
[
"Standard stuff: import function, convert variables to R format, call function",
"_____no_output_____"
]
],
[
[
"from rpy2.robjects.packages import importr\nr_splines = importr('splines')\n\n# populate R variables\nr_gag = robjects.FloatVector(GAGurine['GAG'].values)\nr_age = robjects.FloatVector(GAGurine['Age'].values)\nr_quarts = robjects.FloatVector(np.quantile(r_age,[.25,.5,.75])) #woah, numpy functions run on R objects!",
"_____no_output_____"
]
],
[
[
"What happens when we call the ns or bs functions from r_splines?",
"_____no_output_____"
]
],
[
[
"ns_design = r_splines.ns(r_age, knots=r_quarts)\nbs_design = r_splines.bs(r_age, knots=r_quarts)",
"_____no_output_____"
],
[
"print(ns_design)",
"_____no_output_____"
]
],
[
[
"`ns` and `bs` return design matrices, not model objects! That's because they're meant to work with `lm`'s formula interface. To get a model object we populate a formula including `ns(<var>,<knots>)` and fit to data",
"_____no_output_____"
]
],
[
[
"r_lm = robjects.r['lm']\nr_predict = robjects.r['predict']\n\n# populate the formula\nns_formula = robjects.Formula(\"Gag ~ ns(Age, knots=r_quarts)\")\nns_formula.environment['Gag'] = r_gag\nns_formula.environment['Age'] = r_age\nns_formula.environment['r_quarts'] = r_quarts\n \n# fit the model\nns_model = r_lm(ns_formula)",
"_____no_output_____"
]
],
[
[
"Predict like usual: build a dataframe to predict on and call `predict()`",
"_____no_output_____"
]
],
[
[
"# predict\npredict_frame = robjects.DataFrame({\"Age\": robjects.FloatVector(np.linspace(0,20,100))})\n\nns_out = r_predict(ns_model, predict_frame)",
"_____no_output_____"
],
[
"ax32 = GAGurine.plot.scatter(x='Age',y='GAG',c='grey',title=\"GAG in urine of children\")\nax32.set_xlabel(\"Age\")\nax32.set_ylabel(\"GAG\")\nax32.plot(predict_frame.rx2(\"Age\"),ns_out, color='red')\nax32.legend([\"Natural spline, knots at quartiles\"]);",
"_____no_output_____"
]
],
[
[
"<div class=\"exercise\"><b>Exercise 5</b></div>\n\n1. Fit a basis spline model with the same knots, and add it to the plot above\n2. Fit a basis spline with 8 knots placed at [2,4,6...14,16] and add it to the plot above",
"_____no_output_____"
],
[
"**Answers:** \n1.",
"_____no_output_____"
]
],
[
[
"# your answer here\n",
"_____no_output_____"
]
],
[
[
"2.",
"_____no_output_____"
]
],
[
[
"# your answer here\n",
"_____no_output_____"
],
[
"#%R -i overfit_model plot(overfit_model)\n# we'd get the same diagnostic plot we get from an lm model",
"_____no_output_____"
]
],
[
[
"## GAMs\nWe come, at last, to our most advanced model. The coding here isn't any more complex than we've done before, though the behind-the-scenes is awesome.\n\nFirst, let's get our (multivariate!) data",
"_____no_output_____"
]
],
[
[
"kyphosis = pd.read_csv(\"data/kyphosis.csv\")\n\nprint(\"\"\"\n# kyphosis - wherther a particular deformation was present post-operation\n# age - patient's age in months\n# number - the number of vertebrae involved in the operation\n# start - the number of the topmost vertebrae operated on\n\n\"\"\")\ndisplay(kyphosis.head())\ndisplay(kyphosis.describe(include='all'))\ndisplay(kyphosis.dtypes)",
"_____no_output_____"
],
[
"#If there are errors about missing R packages, run the code below:\n\n#r_utils = importr('utils')\n#r_utils.install_packages('codetools')\n#r_utils.install_packages('gam')",
"_____no_output_____"
]
],
[
[
"To fit a GAM, we\n - Import the `gam` library\n - Populate a formula including `s(<var>)` on variables we want to fit smooths for\n - Call `gam(formula, family=<string>)` where `family` is a string naming a probability distribution, chosen based on how the response variable is thought to occur. \n - Rough `family` guidelines:\n - Response is binary or \"N occurances out of M tries\", e.g. number of lab rats (out of 10) developing disease: chooose `\"binomial\"`\n - Response is a count with no logical upper bound, e.g. number of ice creams sold: choose `\"poisson\"`\n - Response is real, with normally-distributed noise, e.g. person's height: choose `\"gaussian\"` (the default)",
"_____no_output_____"
]
],
[
[
"#There is a Python library in development for using GAMs (https://github.com/dswah/pyGAM)\n# but it is not yet as comprehensive as the R GAM library, which we will use here instead.\n\n# R also has the mgcv library, which implements some more advanced/flexible fitting methods\n\nr_gam_lib = importr('gam')\nr_gam = r_gam_lib.gam\n\nr_kyph = robjects.FactorVector(kyphosis[[\"Kyphosis\"]].values)\nr_Age = robjects.FloatVector(kyphosis[[\"Age\"]].values)\nr_Number = robjects.FloatVector(kyphosis[[\"Number\"]].values)\nr_Start = robjects.FloatVector(kyphosis[[\"Start\"]].values)\n\nkyph1_fmla = robjects.Formula(\"Kyphosis ~ s(Age) + s(Number) + s(Start)\")\nkyph1_fmla.environment['Kyphosis']=r_kyph\nkyph1_fmla.environment['Age']=r_Age\nkyph1_fmla.environment['Number']=r_Number\nkyph1_fmla.environment['Start']=r_Start\n\n\nkyph1_gam = r_gam(kyph1_fmla, family=\"binomial\")",
"_____no_output_____"
]
],
[
[
"The fitted gam model has a lot of interesting data within it",
"_____no_output_____"
]
],
[
[
"print(kyph1_gam.names)",
"_____no_output_____"
]
],
[
[
"Remember plotting? Calling R's `plot()` on a gam model is the easiest way to view the fitted splines",
"_____no_output_____"
]
],
[
[
"%R -i kyph1_gam plot(kyph1_gam, residuals=TRUE,se=TRUE, scale=20);",
"_____no_output_____"
]
],
[
[
"Prediction works like normal (build a data frame to predict on, if you don't already have one, and call `predict()`). However, predict always reports the sum of the individual variable effects. If `family` is non-default this can be different from the actual prediction for that point.\n\nFor instance, we're doing a 'logistic regression' so the raw prediction is log odds, but we can get probability by using in `predict(..., type=\"response\")`",
"_____no_output_____"
]
],
[
[
"kyph_new = robjects.DataFrame({'Age': robjects.IntVector((84,85,86)), \n 'Start': robjects.IntVector((5,3,1)), \n 'Number': robjects.IntVector((1,6,10))})\n\nprint(\"Raw response (so, Log odds):\")\ndisplay(r_predict(kyph1_gam, kyph_new))\nprint(\"Scaled response (so, probabilty of kyphosis):\")\ndisplay(r_predict(kyph1_gam, kyph_new, type=\"response\"))",
"_____no_output_____"
]
],
[
[
"<div class=\"discussion\"><b>Discussion</b></div>\n<div class=\"exercise\"><b>Exercise 6</b></div>\n\n1. What lambda did we use? \n2. What is the model telling us about the effects of age, starting vertebrae, and number of vertebae operated on\n3. If we fit a logistic regression instead, which variables might want quadratic terms. What is the cost and benefit of a logistic regression model versus a GAM?\n4. Critique the model: \n - What is it assuming? Are the assumptions reasonable\n - Are we using the right data?\n - Does the model's story about the world make sense?",
"_____no_output_____"
],
[
"## Appendix",
"_____no_output_____"
],
[
"GAMs and smoothing splines support hypothesis tets to compare models. (We can always compare models via out-of-sample prediction quality (i.e. performance on a validation set), but statistical ideas like hypothesis tests yet information criteria allow us to use all data for training *and* still compare the quality of model A to model B)",
"_____no_output_____"
]
],
[
[
"r_anova = robjects.r[\"anova\"]\n\nkyph0_fmla = robjects.Formula(\"Kyphosis~1\")\nkyph0_fmla.environment['Kyphosis']=r_kyph\n\nkyph0_gam = r_gam(kyph0_fmla, family=\"binomial\")\nprint(r_anova(kyph0_gam, kyph1_gam, test=\"Chi\"))",
"_____no_output_____"
]
],
[
[
"**Explicitly joining spline functions**",
"_____no_output_____"
]
],
[
[
"def h(x, xi, pow_arg): #pow is a reserved keyword in Python\n if (x > xi):\n return pow((x-xi),pow_arg)\n else:\n return 0\nh = np.vectorize(h,otypes=[np.float]) #default behavior is to return ints, which gives incorrect answer\n#also, vectorize does not play nicely with default arguments, so better to set directly (e.g., pow_arg=1)",
"_____no_output_____"
],
[
"xvals = np.arange(0,10.1,0.1)\nax20 = plt.plot(xvals,h(xvals,4,1),color=\"red\")\n_ = plt.title(\"Truncated linear basis function with knot at x=4\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$(x-4)_+$\") #note the use of TeX in the label",
"_____no_output_____"
],
[
"ax21 = plt.plot(xvals,h(xvals,4,3),color=\"red\")\n_ = plt.title(\"Truncated cubic basis function with knot at x=4\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$(x-4)_+^3$\")",
"_____no_output_____"
],
[
"ax22 = plt.plot(xvals,2+xvals+3*h(xvals,2,1)-4*h(xvals,5,1)+0.5*h(xvals,8,1),color=\"red\")\n_ = plt.title(\"Piecewise linear spline with knots at x=2, 5, and 8\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")",
"_____no_output_____"
]
],
[
[
"Comparing splines to the (noisy) model that generated them.",
"_____no_output_____"
]
],
[
[
"x = np.arange(0.1,10,9.9/100) \nfrom scipy.stats import norm\n#ppf (percent point function) is the rather unusual name for\n#the quantile or inverse CDF function in SciPy\ny = norm.ppf(x/10) + np.random.normal(0,0.4,100)\nax23 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"3 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols('y~x+h(x,2,1)+h(x,5,1)+h(x,8,1)',data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
],
[
"ax24 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"6 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols('y~x+h(x,1,1)+h(x,2,1)+h(x,3.5,1)+h(x,5,1)+h(x,6.5,1)+h(x,8,1)',data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
],
[
"ax25 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"9 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols('y~x+h(x,1,1)+h(x,2,1)+h(x,3,1)+h(x,4,1)+h(x,5,1)+h(x,6,1)+h(x,7,1)+h(x,8,1)+h(x,9,1)',data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
],
[
"regstr = 'y~x+'\nfor i in range(1,26):\n regstr += 'h(x,'+str(i/26*10)+',1)+'\nregstr = regstr[:-1] #drop last +\nax26 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"25 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols(regstr,data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
]
],
[
[
"### Exercise:\n\nTry generating random data from different distributions and fitting polynomials of different degrees to it. What do you observe?\n",
"_____no_output_____"
]
],
[
[
"# try it here",
"_____no_output_____"
],
[
"#So, we see that increasing the number of knots results in a more polynomial-like fit",
"_____no_output_____"
],
[
"#Next, we look at cubic splines with increasing numbers of knots\nax27 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"3 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols('y~x+np.power(x,2)+np.power(x,3)+h(x,2,3)+h(x,5,3)+h(x,8,3)',data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
],
[
"ax28 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"6 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols('y~x+np.power(x,2)+np.power(x,3)+h(x,1,3)+h(x,2,3)+h(x,3.5,3)+h(x,5,3)+h(x,6.5,3)+h(x,8,3)',data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
],
[
"ax29 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"9 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols('y~x+np.power(x,2)+np.power(x,3)+h(x,1,3)+h(x,2,3)+h(x,3,3)+h(x,4,3)+h(x,5,3)+h(x,6,3)+h(x,7,3)+h(x,8,3)+h(x,9,3)',data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
],
[
"regstr2 = 'y~x+np.power(x,2)+np.power(x,3)+'\nfor i in range(1,26):\n regstr2 += 'h(x,'+str(i/26*10)+',3)+'\nregstr2 = regstr2[:-1] #drop last +\nax30 = plt.scatter(x,y,facecolors='none', edgecolors='black')\n_ = plt.title(\"25 knots\")\n_ = plt.xlabel(\"$x$\")\n_ = plt.ylabel(\"$y$\")\n_ = plt.plot(x,sm.ols(regstr2,data={'x':x,'y':y}).fit().predict(),color=\"darkblue\",linewidth=2)\n_ = plt.plot(x,norm.ppf(x/10),color=\"red\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9cbd62e914e70618bffb4c68e6c9050c8df78e
| 10,030 |
ipynb
|
Jupyter Notebook
|
docs/tutorials/itemsets/LCM_on_chess.ipynb
|
sdall/scikit-mine
|
a8b63035ff1d766924788d16df136eb1dcc49da3
|
[
"BSD-3-Clause"
] | 51 |
2020-06-03T14:58:51.000Z
|
2022-01-17T11:05:13.000Z
|
docs/tutorials/itemsets/LCM_on_chess.ipynb
|
sdall/scikit-mine
|
a8b63035ff1d766924788d16df136eb1dcc49da3
|
[
"BSD-3-Clause"
] | 111 |
2020-04-21T09:13:53.000Z
|
2022-01-16T19:03:19.000Z
|
docs/tutorials/itemsets/LCM_on_chess.ipynb
|
sdall/scikit-mine
|
a8b63035ff1d766924788d16df136eb1dcc49da3
|
[
"BSD-3-Clause"
] | 7 |
2020-03-19T17:45:45.000Z
|
2022-01-07T14:51:34.000Z
| 25.717949 | 131 | 0.373779 |
[
[
[
"## Linear time Closed item set Miner\nLCM looks for closed itemset with respect to an input minimum support",
"_____no_output_____"
],
[
"#### load the chess dataset",
"_____no_output_____"
]
],
[
[
"from skmine.datasets.fimi import fetch_chess\nchess = fetch_chess()\nchess.head()",
"_____no_output_____"
],
[
"chess.shape",
"_____no_output_____"
]
],
[
[
"#### fit_discover()\nfit_discover makes pattern discovery more user friendly by outputting pretty formatted\npatterns, instead of the traditional tabular format used in the `scikit` community",
"_____no_output_____"
]
],
[
[
"from skmine.itemsets import LCM\nlcm = LCM(min_supp=2000, n_jobs=4)\n# minimum support of 2000, running on 4 processes\n%time patterns = lcm.fit_discover(chess)",
"CPU times: user 473 ms, sys: 410 ms, total: 883 ms\nWall time: 9.56 s\n"
],
[
"patterns.shape",
"_____no_output_____"
]
],
[
[
"This format in which patterns are rendered makes post hoc analysis easier\n\nHere we filter patterns with a length strictly superior to 3",
"_____no_output_____"
]
],
[
[
"patterns[patterns.itemset.map(len) > 3]",
"_____no_output_____"
]
],
[
[
"`Note`\n\nEven when setting a very high minimum support threshold, we discovered more than 60K from only 3196 original transactions.\nThis is a good illustration of the so-called **pattern explosion problem**",
"_____no_output_____"
],
[
"------------\nWe could also get the top-k patterns in terms of supports, with a single line of code",
"_____no_output_____"
]
],
[
[
"patterns.nlargest(10, columns=['support']) # top 10 patterns",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a9cc10ee8099668c964ad5eeeac50eac38f370e
| 4,571 |
ipynb
|
Jupyter Notebook
|
04. Python Numpy/08 diagonals.ipynb
|
yacper/Quant101
|
92048ed93e8d053bb258b126d86844bd84efca2e
|
[
"Apache-2.0"
] | null | null | null |
04. Python Numpy/08 diagonals.ipynb
|
yacper/Quant101
|
92048ed93e8d053bb258b126d86844bd84efca2e
|
[
"Apache-2.0"
] | null | null | null |
04. Python Numpy/08 diagonals.ipynb
|
yacper/Quant101
|
92048ed93e8d053bb258b126d86844bd84efca2e
|
[
"Apache-2.0"
] | null | null | null | 16.09507 | 51 | 0.419383 |
[
[
[
"# 对角线",
"_____no_output_____"
],
[
"这里,使用与之前不同的导入方法:",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"使用numpy中的函数前,需要加上 `np.`:",
"_____no_output_____"
]
],
[
[
"a = np.array([11,21,31,12,22,32,13,23,33])\na.shape = 3,3\na",
"_____no_output_____"
]
],
[
[
"查看它的对角线元素:",
"_____no_output_____"
]
],
[
[
"a.diagonal()",
"_____no_output_____"
]
],
[
[
"可以使用偏移来查看它的次对角线,正数表示右移,负数表示左移:",
"_____no_output_____"
]
],
[
[
"a.diagonal(offset=1)",
"_____no_output_____"
],
[
"a.diagonal(offset=-1)",
"_____no_output_____"
]
],
[
[
"可以使用花式索引来得到对角线:",
"_____no_output_____"
]
],
[
[
"i = [0,1,2]\na[i, i]",
"_____no_output_____"
]
],
[
[
"可以更新对角线的值:",
"_____no_output_____"
]
],
[
[
"a[i, i] = 2\na",
"_____no_output_____"
]
],
[
[
"修改次对角线的值:",
"_____no_output_____"
]
],
[
[
"i = np.array([0,1])\na[i, i + 1] = 1\na",
"_____no_output_____"
],
[
"a[i + 1, i] = -1\na",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9cc3b253a2c99683185349a400221ec2d3d4d1
| 20,539 |
ipynb
|
Jupyter Notebook
|
7.25(1).ipynb
|
xym99/mysql
|
e9c8d81a60f74722f461948f45c595f1fe347f98
|
[
"Apache-2.0"
] | null | null | null |
7.25(1).ipynb
|
xym99/mysql
|
e9c8d81a60f74722f461948f45c595f1fe347f98
|
[
"Apache-2.0"
] | null | null | null |
7.25(1).ipynb
|
xym99/mysql
|
e9c8d81a60f74722f461948f45c595f1fe347f98
|
[
"Apache-2.0"
] | null | null | null | 17.332489 | 954 | 0.421734 |
[
[
[
"# 列表List\n- 一个列表可以储存任意大小的数据集合,你可以理解为他是一个容器",
"_____no_output_____"
]
],
[
[
"def b():\n pass#列表储存数据",
"_____no_output_____"
],
[
"a=[1,2,1,5,'ab',True,b]\na",
"_____no_output_____"
],
[
"c='zxc'\nlist(c)",
"_____no_output_____"
],
[
"\"\".join(['a','b'])",
"_____no_output_____"
]
],
[
[
"## 先来一个例子爽一爽\n",
"_____no_output_____"
],
[
"## 创建一个列表\n- a = [1,2,3,4,5]",
"_____no_output_____"
],
[
"## 列表的一般操作\n",
"_____no_output_____"
]
],
[
[
"s='aaa'\ns*8",
"_____no_output_____"
],
[
"a=100\nb=[1,2,34,5]\na in b",
"_____no_output_____"
],
[
"a=[100]\nb=[1,2,3,4,a]\na in b",
"_____no_output_____"
],
[
"a=[1,2]\nb=[3]\na+b",
"_____no_output_____"
],
[
"a=[1,2]\nb=[3]\nb+a",
"_____no_output_____"
],
[
"a=[1,2,34,[1000,2333]]\na",
"_____no_output_____"
],
[
"a[3]",
"_____no_output_____"
],
[
"a[3][1]",
"_____no_output_____"
],
[
"a=[1,2,34,[1000,[2888],2333]]\na",
"_____no_output_____"
],
[
"a[3][1][0]",
"_____no_output_____"
],
[
"b=[1,2,3,4,5]",
"_____no_output_____"
],
[
"b[1]=100",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"b=[1,2,3,4,5,6,7,8,9,10]",
"_____no_output_____"
],
[
"b[1:11:2]",
"_____no_output_____"
],
[
"for i in range(0,10,2):\n b[i]=100\nb",
"_____no_output_____"
],
[
"[1,2][4,5][7,8][10,11]",
"_____no_output_____"
],
[
"for i in range(0,10,3):\n print(b[i:i+2])\n",
"[1, 2]\n[4, 5]\n[7, 8]\n[10]\n"
],
[
"陈贺大傻子=[1,2,3,[1,2,3,[3,4,5,6]]]\nlen(陈贺大傻子)",
"_____no_output_____"
],
[
"hhh=[1,2,3,[5,6,7]]",
"_____no_output_____"
],
[
"count=0\nfor i in hhh:\n if type(i)==list:\n for j in i:\n count=count+1\n else:\n count=count+1\nlen(hhh)\n ",
"_____no_output_____"
],
[
"a=[1,2,3]\nfor i in a:\n print(i)",
"1\n2\n3\n"
],
[
"a=[1,2,3,True]\nmax(a)",
"_____no_output_____"
],
[
"hhh=[1,2,3,[5,6,7]]",
"_____no_output_____"
],
[
"a=[1,2,3]\nb=[2,3,4]\na>b",
"_____no_output_____"
],
[
"b=[4,3,2,1]\n",
"_____no_output_____"
],
[
"length=len(b)\nfor i in range(length):\n for j in range ",
"_____no_output_____"
],
[
"def Dx(b):\n n=len(b)\n for i in range(0,n-1):\n for j in range(0,n-1-i):\n if b[j]>b[j+1]:\n b[j],b[j+1]=b[j+1],b[j]\nDx(b)\nprint(b)",
"[1, 2, 3, 4]\n"
]
],
[
[
"# 列表索引操作\n- Mylist[index]\n- 正序索引,逆序索引\n- 列表一定注意越界\n- ",
"_____no_output_____"
],
[
"## 列表切片操作\n- Mylist[start:end]\n- 正序切片,逆序切片",
"_____no_output_____"
],
[
"## 列表 +、*、in 、not in",
"_____no_output_____"
],
[
"## 使用for循环遍历元素\n- for 循环可以遍历一切可迭代元素",
"_____no_output_____"
],
[
"## EP:\n- 使用while 循环遍历列表",
"_____no_output_____"
],
[
"## 列表的比较\n- \\>,<,>=,<=,==,!=",
"_____no_output_____"
],
[
"## 列表生成式\n[x for x in range(10)]",
"_____no_output_____"
],
[
"## 列表的方法\n",
"_____no_output_____"
]
],
[
[
"d=[1,2,3,4,5]\nd.remove(3)\nd",
"_____no_output_____"
],
[
"a=[1,2,3]\nb=100\na.append(b)#只能接受一个元素,元素里可有多个\na",
"_____no_output_____"
],
[
"a=[1,2,5,3,[5,6],8]\na.count(5)",
"_____no_output_____"
],
[
"a=[1,2,3]\nb=[100,22]\nb.extend(a)",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"a.extend(b)\na",
"_____no_output_____"
],
[
"c=[1,2,3,4,5]\nc.insert(0,100)\nc.insert(3,100)",
"_____no_output_____"
],
[
"c=[1,2,3,4,5,6,7,8,9]",
"_____no_output_____"
],
[
"for i in range(0,len(c)+3,3):\n c.insert(i,100)\nc",
"_____no_output_____"
],
[
"d=[1,2,4,5,3,7](作业)",
"_____no_output_____"
],
[
"d=[1,2,4,5,3,7]\nfor i in d:tm\n if i%2==0:\n print('')\n else:\n print('100',i)\n",
"_____no_output_____"
]
],
[
[
"## 将字符串分割成列表\n- split 按照自定义的内容拆分",
"_____no_output_____"
],
[
"## EP:\n\n",
"_____no_output_____"
],
[
"## 列表的复制\n- copy 浅复制\n- deepcopy import copy 深复制\n- http://www.pythontutor.com/visualize.html#mode=edit",
"_____no_output_____"
],
[
"## 列表排序\n- sort\n- sorted\n- 列表的多级排序\n - 匿名函数",
"_____no_output_____"
]
],
[
[
"(lambda x:print(x))(100)",
"100\n"
],
[
"*强制命名",
"_____no_output_____"
],
[
"c=[1,2,3,4]\nc.sort(reverse=True)\nc",
"_____no_output_____"
]
],
[
[
"## EP:\n- 手动排序该列表[5,3,8,0,17],以升序或者降序",
"_____no_output_____"
],
[
"- 1\n",
"_____no_output_____"
]
],
[
[
"a=eval(input('请输入成绩列表:'))\nb=max(a)\nfor i in a:\n if i>=b-10:\n print(i,'A')\n elif i>=b-20:\n print(i,'B')\n elif i>=b-30:\n print(i,'C') \n elif i>=b-40:\n print(i,'D')\n else:\n print(i,'F')",
"请输入成绩列表:54,87,33,23\n54 D\n87 A\n33 F\n23 F\n"
]
],
[
[
"- 2\n",
"_____no_output_____"
]
],
[
[
"a,b,c,d= int(input('请输入一个整数列表:'))\nprint(a,b,c,d[::-1])",
"请输入一个整数列表:54,87,55,39\n"
]
],
[
[
"- 3\n",
"_____no_output_____"
]
],
[
[
"a=eval(input('Enter integers between 1 and 100:'))\nfor i in a:\n b=a.count(i) \n print(i,'occurs',b,'times')",
"Enter integers between 1 and 100:2,5,6,5,4,3,23,43,2\n2 occurs 2 times\n5 occurs 2 times\n6 occurs 1 times\n5 occurs 2 times\n4 occurs 1 times\n3 occurs 1 times\n23 occurs 1 times\n43 occurs 1 times\n2 occurs 2 times\n"
]
],
[
[
"- 4\n",
"_____no_output_____"
],
[
"- 5\n",
"_____no_output_____"
],
[
"- 6\n",
"_____no_output_____"
],
[
"- 7\n\n",
"_____no_output_____"
],
[
"- 8\n",
"_____no_output_____"
],
[
"- 9\n",
"_____no_output_____"
],
[
"- 10\n",
"_____no_output_____"
],
[
"- 11\n",
"_____no_output_____"
],
[
"- 12\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a9cc5c9388b7a4206ef6add96f8df034514e403
| 179,886 |
ipynb
|
Jupyter Notebook
|
notebooks/eda_notebooks/telco-customer-churn-eda-predictions-shap.ipynb
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/eda_notebooks/telco-customer-churn-eda-predictions-shap.ipynb
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/eda_notebooks/telco-customer-churn-eda-predictions-shap.ipynb
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null | 55.640582 | 45,720 | 0.702434 |
[
[
[
"<br>\n<h1 style = \"font-size:30px; font-weight : bold; color : black; text-align: center; border-radius: 10px 15px;\"> Telco Customer Churn: EDA, Predictions and Feature Importance with SHAP </h1>\n<br>",
"_____no_output_____"
],
[
"# Goals\nPerform an Exploratory Data Analysis (EDA) to visualize and understand:\n* The distribution of values of the target and features;\n* The relationship between each feature and the likelihood of customer churn.\n\nPredict churn using 20% of data as test set using the following models:\n* Logistic Regression;\n* Random Forest;\n* XGBoost;\n* Catboost.\n\nUnderstand how each feature impacts the predicted value using:\n* Feature Importance;\n* SHAP.",
"_____no_output_____"
],
[
"# <a id='0'>Content</a>\n\n- <a href='#1'>Dataset Information</a> \n- <a href='#2'>Importing Packages and Dataset + Data Cleaning</a> \n- <a href='#3'>Exploratory Data Analysis</a> \n - <a href='#31'>Demographic Features</a> \n - <a href='#32'>Services Related Features</a>\n - <a href='#33'>Account Information Features (categorical)</a>\n - <a href='#34'>Account Information Features (numerical)</a>\n- <a href='#4'>Creating and Evaluating Models</a>\n - <a href='#41'>Logistic Regression</a> \n - <a href='#42'>Random Forest</a>\n - <a href='#43'>Random Forest w/preprocessing</a>\n - <a href='#44'>XGBoost</a> \n - <a href='#45'>CatBoost</a> \n - <a href='#46'>Feature Importance and SHAP Plot</a> \n- <a href='#5'>References</a>",
"_____no_output_____"
],
[
"## <center> If you find this notebook useful, support with an upvote! <center>",
"_____no_output_____"
],
[
"# <a id=\"1\">Dataset Information</a> ",
"_____no_output_____"
],
[
"\n### Content\n\n\"Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs.\" [IBM Sample Data Sets]\n\nEach row represents a customer, each column contains customer’s attributes described on the column Metadata.\n\nThe data set includes information about:\n\n- Customers who left within the last month – the column is called Churn\n- Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies\n- Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges\n- Demographic info about customers – gender, age range, and if they have partners and dependents",
"_____no_output_____"
],
[
"# <a id=\"2\">Importing Packages and Dataset + Data Cleaning</a> ",
"_____no_output_____"
]
],
[
[
"import pandas as pd \nimport matplotlib as mat\nimport matplotlib.pyplot as plt \nimport numpy as np\nimport seaborn as sns\n%matplotlib inline\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn import metrics\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost import XGBClassifier\nfrom catboost import CatBoostClassifier\nfrom catboost import Pool\n\nimport shap\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"df = pd.read_csv('../data/raw/data.csv')",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 7043 entries, 0 to 7042\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 customerID 7043 non-null int64 \n 1 gender 7043 non-null object \n 2 SeniorCitizen 7043 non-null int64 \n 3 Partner 7043 non-null object \n 4 Dependents 7043 non-null object \n 5 tenure 7043 non-null int64 \n 6 PhoneService 7043 non-null object \n 7 MultipleLines 7043 non-null object \n 8 InternetService 7043 non-null object \n 9 OnlineSecurity 7043 non-null object \n 10 OnlineBackup 7043 non-null object \n 11 DeviceProtection 7043 non-null object \n 12 TechSupport 7043 non-null object \n 13 StreamingTV 7043 non-null object \n 14 StreamingMovies 7043 non-null object \n 15 Contract 7043 non-null object \n 16 PaperlessBilling 7043 non-null object \n 17 PaymentMethod 7043 non-null object \n 18 MonthlyCharges 7043 non-null float64\n 19 TotalCharges 7032 non-null float64\n 20 Churn 7043 non-null object \ndtypes: float64(2), int64(3), object(16)\nmemory usage: 1.1+ MB\n"
]
],
[
[
"Apparently, there are no missing values. But there is clearly an error. ‘Total Charges’ should be numeric. We can use pd.to_numeric to convert it.",
"_____no_output_____"
]
],
[
[
"df['TotalCharges'] = pd.to_numeric(df['TotalCharges'], errors='coerce')\ndf['TotalCharges'].dtype",
"_____no_output_____"
]
],
[
[
"After changing a column from string to numeric, some values may not be recognized, resulting in missing values. Let’s check if this happened.",
"_____no_output_____"
]
],
[
[
"df['TotalCharges'].isnull().sum()",
"_____no_output_____"
]
],
[
[
"Now there are supposedly 11 missing values, but they might indicate that there were no charges for that customer up to the point when the data was obtained. The feature 'tenure' indicates for how long someone has been a customer. Let's check the number of samples with value '0' on that feature and, in case we also find 11 customers, compare if their index match those from the 'missing' values.",
"_____no_output_____"
]
],
[
[
"df['tenure'].isin([0]).sum()",
"_____no_output_____"
],
[
"print(df[df['tenure'].isin([0])].index)\nprint(df[df['TotalCharges'].isna()].index)",
"Int64Index([768, 928, 1783, 2177, 2332, 2424, 2483, 2540, 2905, 2985, 4924], dtype='int64')\nInt64Index([768, 928, 1783, 2177, 2332, 2424, 2483, 2540, 2905, 2985, 4924], dtype='int64')\n"
]
],
[
[
"We got a match here. After confirming our suspects, we can replace those missing values with '0'.",
"_____no_output_____"
]
],
[
[
"df.loc[:,'TotalCharges'] = df.loc[:,'TotalCharges'].replace(np.nan,0)\ndf['TotalCharges'].isnull().sum()",
"_____no_output_____"
]
],
[
[
"The feature 'Senior Citizen', which is categorical ('Yes' or 'No'), is set as numeric. Although all features will be changed to numeric to be used in our prediction models, I'll convert it from numeric to string for now.",
"_____no_output_____"
]
],
[
[
"df['SeniorCitizen'] = df['SeniorCitizen'].apply(str)\nsenior_map = {'0': 'No', '1': 'Yes'}\ndf['SeniorCitizen'] = df['SeniorCitizen'].map(senior_map)\n\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 7043 entries, 0 to 7042\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 customerID 7043 non-null int64 \n 1 gender 7043 non-null object \n 2 SeniorCitizen 7043 non-null object \n 3 Partner 7043 non-null object \n 4 Dependents 7043 non-null object \n 5 tenure 7043 non-null int64 \n 6 PhoneService 7043 non-null object \n 7 MultipleLines 7043 non-null object \n 8 InternetService 7043 non-null object \n 9 OnlineSecurity 7043 non-null object \n 10 OnlineBackup 7043 non-null object \n 11 DeviceProtection 7043 non-null object \n 12 TechSupport 7043 non-null object \n 13 StreamingTV 7043 non-null object \n 14 StreamingMovies 7043 non-null object \n 15 Contract 7043 non-null object \n 16 PaperlessBilling 7043 non-null object \n 17 PaymentMethod 7043 non-null object \n 18 MonthlyCharges 7043 non-null float64\n 19 TotalCharges 7043 non-null float64\n 20 Churn 7043 non-null object \ndtypes: float64(2), int64(2), object(17)\nmemory usage: 1.1+ MB\n"
]
],
[
[
"Let's finish this section by checking the possible values of categorical features and viewing descriptive statistics (df.describe) for numerical features.",
"_____no_output_____"
]
],
[
[
"for col in df.select_dtypes('object').columns:\n print(col, '- # unique values:', df[col].nunique())",
"gender - # unique values: 2\nSeniorCitizen - # unique values: 2\nPartner - # unique values: 2\nDependents - # unique values: 2\nPhoneService - # unique values: 2\nMultipleLines - # unique values: 3\nInternetService - # unique values: 3\nOnlineSecurity - # unique values: 3\nOnlineBackup - # unique values: 3\nDeviceProtection - # unique values: 3\nTechSupport - # unique values: 3\nStreamingTV - # unique values: 3\nStreamingMovies - # unique values: 3\nContract - # unique values: 3\nPaperlessBilling - # unique values: 2\nPaymentMethod - # unique values: 4\nChurn - # unique values: 2\n"
],
[
"for col in df.select_dtypes('object').columns:\n print(col, '\\n')\n print(df[col].value_counts(), '\\n')",
"gender \n\nMale 3555\nFemale 3488\nName: gender, dtype: int64 \n\nSeniorCitizen \n\nNo 5901\nYes 1142\nName: SeniorCitizen, dtype: int64 \n\nPartner \n\nNo 3641\nYes 3402\nName: Partner, dtype: int64 \n\nDependents \n\nNo 4933\nYes 2110\nName: Dependents, dtype: int64 \n\nPhoneService \n\nYes 6361\nNo 682\nName: PhoneService, dtype: int64 \n\nMultipleLines \n\nNo 3390\nYes 2971\nNo phone service 682\nName: MultipleLines, dtype: int64 \n\nInternetService \n\nFiber optic 3096\nDSL 2421\nNo 1526\nName: InternetService, dtype: int64 \n\nOnlineSecurity \n\nNo 3498\nYes 2019\nNo internet service 1526\nName: OnlineSecurity, dtype: int64 \n\nOnlineBackup \n\nNo 3088\nYes 2429\nNo internet service 1526\nName: OnlineBackup, dtype: int64 \n\nDeviceProtection \n\nNo 3095\nYes 2422\nNo internet service 1526\nName: DeviceProtection, dtype: int64 \n\nTechSupport \n\nNo 3473\nYes 2044\nNo internet service 1526\nName: TechSupport, dtype: int64 \n\nStreamingTV \n\nNo 2810\nYes 2707\nNo internet service 1526\nName: StreamingTV, dtype: int64 \n\nStreamingMovies \n\nNo 2785\nYes 2732\nNo internet service 1526\nName: StreamingMovies, dtype: int64 \n\nContract \n\nMonth-to-month 3875\nTwo year 1695\nOne year 1473\nName: Contract, dtype: int64 \n\nPaperlessBilling \n\nYes 4171\nNo 2872\nName: PaperlessBilling, dtype: int64 \n\nPaymentMethod \n\nElectronic check 2365\nMailed check 1612\nBank transfer (automatic) 1544\nCredit card (automatic) 1522\nName: PaymentMethod, dtype: int64 \n\nChurn \n\nNo 5174\nYes 1869\nName: Churn, dtype: int64 \n\n"
],
[
"df.describe().T",
"_____no_output_____"
]
],
[
[
"# <a id=\"3\">Exploratory Data Analysis</a> ",
"_____no_output_____"
],
[
"We will start our EDA by looking at the distribution of the target variable (Churn). It’s expected that the dataset is imbalanced, with less than 50% of the customers leaving the company",
"_____no_output_____"
],
[
"## Churn",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(6,4))\n\nax = sns.countplot(x=\"Churn\", data=df, palette=\"rocket\")\n\nplt.xlabel(\"Churn?\", fontsize= 12)\nplt.ylabel(\"# of Clients\", fontsize= 12)\nplt.ylim(0,7500)\nplt.xticks([0,1], ['No', 'Yes'], fontsize = 11)\n\nfor p in ax.patches:\n ax.annotate((p.get_height()), (p.get_x()+0.30, p.get_height()+300), fontsize = 14)\n \nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,5))\n\ndf['Churn'].value_counts().plot(kind='pie',labels = ['',''], autopct='%1.1f%%', colors = ['indigo','salmon'], explode = [0,0.05], textprops = {\"fontsize\":15})\n\nplt.legend(labels=['No Churn', 'Churn'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"At the period represented in this dataset, there is a 26,5% of customer churn. As we move on to analyze the features, we can compare this number with the percentage of churn found for each category, providing us a better idea on the impact of a given feature in the company’s ability to retain its customers.",
"_____no_output_____"
]
],
[
[
"#Label encoding Churn to use sns.barplot\nle = LabelEncoder()\ndf['Churn'] = le.fit_transform(df['Churn'])\ndf['Churn'].value_counts()",
"_____no_output_____"
]
],
[
[
"We can divide the features into the following groups:\n- Demographic features;\n- Services related features\n- Account information related features (categorical and numerical).\n\nFor each group, we’ll start by looking at the features’ distributions. Then, we’ll check the percentage of churn for each category to understand their relationship with the target.",
"_____no_output_____"
]
],
[
[
"demo_features = ['gender', 'SeniorCitizen', 'Partner', 'Dependents']\n\nserv_features = ['PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup'\n , 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies']\n\ncat_accinfo_features = ['Contract', 'PaperlessBilling', 'PaymentMethod']\n\nnum_accinfo_features = ['tenure', 'MonthlyCharges', 'TotalCharges']",
"_____no_output_____"
]
],
[
[
"## <a id=\"31\">Demographic Features</a> ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(18,12))\n\nfor i,col in enumerate(demo_features): \n plt.subplot(2,2,i + 1)\n \n ax = sns.countplot(data = df, x = col, palette = 'rocket')\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"# of Clients\", fontsize= 13)\n plt.ylim(0,7000)\n plt.xticks(fontsize= 15)\n plt.yticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate((p.get_height()), (p.get_x()+0.32, p.get_height()+300), fontsize= 16)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(18,12))\n\nfor i,col in enumerate(demo_features): \n plt.subplot(2,2,i + 1)\n \n ax = sns.countplot(data = df, x = col, hue=\"Churn\", palette = 'rocket')\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"# of Clients\", fontsize= 13)\n plt.ylim(0,7000)\n plt.xticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate((p.get_height()), (p.get_x()+0.14, p.get_height()+300), fontsize= 14)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,10))\n\nfor i,col in enumerate(demo_features): \n plt.subplot(2,2,i + 1)\n \n ax = sns.barplot(x = col, y = \"Churn\", data = df, palette = 'rocket', ci = None)\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"% of Churn\", fontsize= 13)\n plt.ylim(0,0.5)\n plt.xticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.35, p.get_height()+0.03),fontsize=15)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"What we can observe for each feature:\n- Gender: There is barely any difference in churn percentage between men and women;\n- Senior Citizen: The churn percentage for senior customers are above 40%, indicating a high likelihood of churn from that group;\n- Partner: Single customers are more likely to churn than customers with partners;\n- Dependents: Customers with dependents are less likely to churn than customers without any dependents.\n\nWe could go a little further and combine the two ‘family-related’ features, ‘Partner’ and ‘Dependents’ to see if, in fact, both of them contribute to the chance of customer churn or retention.\n\nIt is expected that the majority of customers with dependents are married and, for instance, it could be that the partnership has more influence on the target than the fact that a customer has or hasn’t a child. Although this might be unlikely, by analyzing both features together, we can discard such hypothesis with more confidence.",
"_____no_output_____"
]
],
[
[
"df.groupby(['Partner'])['Dependents'].value_counts()",
"_____no_output_____"
]
],
[
[
"As expected, most customers with dependents also have a partner. Yet, the number of single customers with dependents seems significant enough for us to draw some conclusions about this particular group.",
"_____no_output_____"
]
],
[
[
"df.groupby(by=['Partner', 'Dependents'])['Churn'].value_counts(normalize = True)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,4))\n\nax = sns.barplot(x = \"Dependents\", y = \"Churn\", hue = \"Partner\", data = df, palette = 'rocket', ci = None)\n\nplt.ylabel(\"% of Churn\", fontsize= 12)\nplt.ylim(0,0.5)\n\nfor p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.15, p.get_height()+0.03),fontsize=14)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can see that both features contribute to the likelihood of churn. The group of people with partners and dependents and the group with neither of those are on the extremes in terms of likelihood of churn (14% and 34%, respectively). The churn of customers with partners and without dependents falls close to the overall percentage of churn in our dataset, while the ‘opposite’ group still have a lower chance of it.",
"_____no_output_____"
],
[
"## <a id=\"32\">Services Related Features</a> ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(18,30))\n\nfor i,col in enumerate(serv_features): \n plt.subplot(5,2,i + 1)\n \n ax = sns.countplot(data = df, x = col, palette = 'rocket')\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"# of Clients\", fontsize= 13)\n plt.ylim(0,7500)\n plt.xticks(fontsize= 15)\n plt.yticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate((p.get_height()), (p.get_x()+0.31, p.get_height()+300), fontsize= 16)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"A relatively small group of customers doesn’t have internet services and an even smaller one doesn’t have phone services. One thing to keep in mind is that most services can be and/or are only provided to customers who sign the Telco’s internet service.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(18,30))\n\nfor i,col in enumerate(serv_features): \n plt.subplot(5,2,i + 1)\n \n ax = sns.countplot(data = df, x = col, hue=\"Churn\", palette = 'rocket')\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"# of Clients\", fontsize= 13)\n plt.ylim(0,7000)\n plt.xticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate((p.get_height()), (p.get_x()+0.12, p.get_height()+300), fontsize= 13)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,25))\n\nfor i,col in enumerate(serv_features): \n plt.subplot(5,2,i + 1)\n \n ax = sns.barplot(x = col, y = \"Churn\", data = df, palette = 'rocket', ci = None)\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"% of Churn\", fontsize= 13)\n plt.ylim(0,0.5)\n plt.xticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.32, p.get_height()+0.03),fontsize=15)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Curiously enough, the difference of churn between clients with and without phone services is quite small, been negligible if we take those with multiple lines out of equation. In this group of features, the real game-changing ones in terms of customer retainment are those related to internet services.\n\nIn the feature ‘InternetServices’, the percentage of churn in each category is highly different one from another. Those who don’t subscribe to the company’s internet (presumably, they only use their phone service), are the most likely to endure as their customers. The likelihood of churn from customers with DSL service is also smaller than the overall probability. \n\nThe highest percentage of churn, with over 40%, is from customers with fiber optic internet. Fiber optic tends to be faster than DSL internet, but their subscription is usually more expensive as well. We don't have the information about the fee for each service, but at least we can find the mean value of monthly charges per type of internet just to have an idea that this is the case.\n",
"_____no_output_____"
]
],
[
[
"df.groupby(by=['InternetService'])['MonthlyCharges'].mean().sort_values()",
"_____no_output_____"
]
],
[
[
"As expected, the average charges for each service are significantly different, with fiber optic been the most expensive. Without any additional information, it’s hard to draw definitive conclusions, but it seems that the cost-benefit relationship of their fiber optic service is far from been attractive enough to retain customers.\n\nSuch a high churn rate might indicate that their service’s quality is subpar in terms of speed and/or reliability. Analyzing complaints received by their customer service call center service to extract useful and specific information about their internet is a must. A survey with a significant group of customers, aiming to understand how they perceive the quality of the service, is another step to find the problem and to help defining the course of action.\n\nAs for the other services, the likelihood of churn from customers who have each one of them is actually lower than from those who haven’t. The higher differences are found in ‘TechSupport’ and ‘OnlineSecurity’, while the lower ones are found in the streaming services.\n\nLet’s calculate the average monthly charges from each category in the Tech Support and Online Security features.",
"_____no_output_____"
]
],
[
[
"print(df.groupby(by=['TechSupport'])['MonthlyCharges'].mean().sort_values(), '\\n')\nprint(df.groupby(by=['OnlineSecurity'])['MonthlyCharges'].mean().sort_values(), '\\n')\nprint(df.groupby(by=['OnlineSecurity', 'TechSupport'])['MonthlyCharges'].mean().sort_values())",
"_____no_output_____"
]
],
[
[
"Both services don’t seem to affect the subscription charges by much. If the company can quantify the cost of providing each service per customer and find out that it is relatively small, they could either reduce the extra subscription fee for those additional services or simply cut that fee and offer those services as standard for internet customers for a trial period. Given that most customers don’t subscribe to those services and given that they have a significant impact on the customer retainment, it’s possible that such strategy could result in a higher profit on the long term.\n\nLet’s see if the churn rate gets significantly lower for customers who have access to both services.",
"_____no_output_____"
]
],
[
[
"print(df.groupby(by=['TechSupport'])['OnlineSecurity'].value_counts(), '\\n')",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,4))\n\nax = sns.barplot(x = \"TechSupport\", y = \"Churn\", hue = \"OnlineSecurity\", data = df, palette = 'rocket', ci = None)\n\nplt.ylabel(\"% of Churn\", fontsize= 12)\nplt.ylim(0,1.0)\n\nfor p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.070, p.get_height()+0.03),fontsize=14)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"The differences in terms of churn rate are quite significant. While customers who don’t use neither of those services have a close to 50% chance of churn, the churn rate for those who have both is lower than 10%, supporting the previous point.",
"_____no_output_____"
],
[
"## <a id=\"33\">Account Information Features (categorical)</a> ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(12,15))\n\nfor i,col in enumerate(cat_accinfo_features): \n plt.subplot(3,1,i + 1)\n \n ax = sns.countplot(data = df, x = col, palette = 'rocket')\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"# of Clients\", fontsize= 13)\n plt.ylim(0,5000)\n plt.xticks(fontsize= 14)\n plt.yticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate((p.get_height()), (p.get_x()+0.32, p.get_height()+300), fontsize= 15)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,15))\n\nfor i,col in enumerate(cat_accinfo_features): \n plt.subplot(3,1,i + 1)\n \n ax = sns.countplot(data = df, x = col, hue=\"Churn\", palette = 'rocket')\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"# of Clients\", fontsize= 13)\n plt.ylim(0,5000)\n plt.xticks(fontsize= 13)\n\n for p in ax.patches:\n ax.annotate((p.get_height()), (p.get_x()+0.135, p.get_height()+300), fontsize= 14)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,15))\n\nfor i,col in enumerate(cat_accinfo_features): \n plt.subplot(3,1,i + 1)\n \n ax = sns.barplot(x = col, y = \"Churn\", data = df, palette = 'rocket', ci = None)\n\n plt.xlabel(col, fontsize= 14)\n plt.ylabel(\"% of Churn\", fontsize= 13)\n plt.ylim(0,0.55)\n plt.xticks(fontsize= 14)\n\n for p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.32, p.get_height()+0.02),fontsize=15)\n\nplt.tight_layout()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Naturally, in terms of contract, the highest churn rate is from the ‘month-to-month’ type, which is also the most dominant contract. What seems odd is the high chance of churn from customers who choose electronic check as payment method and opts for paperless billing. It could be, for instance, that most customers in the month-to-month contract also fall into those categories. We can check that.",
"_____no_output_____"
]
],
[
[
"print(df.groupby(by=['Contract'])['PaperlessBilling'].value_counts(normalize = True),' \\n')\nprint(df.groupby(by=['Contract'])['PaymentMethod'].value_counts(normalize = True))",
"_____no_output_____"
]
],
[
[
"When we group the dataset by contract, we can see that the percentage of customers who don’t receive their bills through the mail and that pay them via electronic check is higher for the ‘month-to-month’ type. Yet, this doesn’t seem to be enough to justify such a high churn rate for those categories. There is a good chance that we will find higher percentages of churn in them, regardless of the type of contract. Let’s see.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(12,4))\n\nax = sns.barplot(x = \"PaperlessBilling\", y = \"Churn\", hue = \"Contract\", data = df, palette = 'rocket', ci = None)\n\nplt.ylabel(\"% of Churn\", fontsize= 12)\nplt.ylim(0,0.6)\n\nfor p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.08, p.get_height()+0.03),fontsize=14)\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,4))\n\nax = sns.barplot(x = \"PaymentMethod\", y = \"Churn\", hue = \"Contract\", data = df, palette = 'rocket', ci = None)\n\nplt.ylabel(\"% of Churn\", fontsize= 12)\nplt.ylim(0,0.6)\n\nfor p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.05, p.get_height()+0.020),fontsize=14)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"The likelihood of churn is, in fact, higher for those categories, regardless of type of contract. Personally, is hard for me to see a causality, without additional information or domain knowledge, between the churn rate and the way someone receives their bill and choose to pay them. It is more likely that those two features are associated with several others. The internet service, a feature with notable differences of churn rate between each one of its categories, could present some correlation between them.",
"_____no_output_____"
]
],
[
[
"print(df.groupby(by=['InternetService'])['PaperlessBilling'].value_counts(normalize = True), '\\n')\nprint(df.groupby(by=['InternetService'])['PaymentMethod'].value_counts(normalize = True))",
"_____no_output_____"
]
],
[
[
"What stands out here in our grouping operations:\n- Customers with Internet Service = ‘No’: Less than 30% receive paperless bills and only 8% pay them with electronic check;\n- Customers with Internet Service = ‘Fiber Optic’: 77% receive paperless bills and more them 51% pay them with electronic check.\n\nWe can recall that the lowest churn rate in the internet services feature is from those customers who don’t use Telco’s internet, while the highest is found among those who use their fiber optic internet. So, we can say that those results don’t come out as a surprise. \n\nAlthough we shouldn’t conclude that the payment method or the way the bills are sent have a direct influence in the customer retainment, it is worth to point that those features will probably be useful for our prediction models.",
"_____no_output_____"
],
[
"## <a id=\"34\">Account Information Features (numerical)</a> ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(12,15))\n\nfor i,col in enumerate(num_accinfo_features): \n plt.subplot(3,1,i + 1)\n sns.distplot(df.loc[:,col])\n #plt.ticklabel_format(style='plain', axis='x') #repressing scientific notation \n plt.ylabel('')\n plt.tight_layout()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,15))\n\nfor i,col in enumerate(num_accinfo_features): \n plt.subplot(3,1,i + 1) \n sns.kdeplot(df.loc[(df['Churn'] == 0), col], label = 'No Churn', shade = True)\n sns.kdeplot(df.loc[(df['Churn'] == 1), col], label = 'Churn', shade = True)\n plt.legend()\n plt.ylabel('')\n plt.tight_layout()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"What we can observe for each feature:\n- Tenure: High concentration of churned customer in the first months.\n- Monthly Charges: High concentration of churned customer in higher values (around 60 and beyond)\n- Total Charges: Somewhat similar distributions, but the ‘No churn’ distribution have lower values.\n\nLet’s get the mean values to complement our analysis.",
"_____no_output_____"
]
],
[
[
"print(df.groupby(by=['Churn'])['tenure'].mean().sort_values(), '\\n')\nprint(df.groupby(by=['Churn'])['MonthlyCharges'].mean().sort_values(), '\\n')\nprint(df.groupby(by=['Churn'])['TotalCharges'].mean().sort_values())",
"_____no_output_____"
]
],
[
[
"As expected, the average tenure period for churned customers is lower and the average monthly charges are higher than the same metrics for retained customers. The average total charges are lower for churned customers, which is probably due to their lower tenure.\n\nThe density plot for churned customers in the ‘tenure’ feature showed a high concentration in the first months. Let’s divide this feature in bins to get the churn rate per year of service.",
"_____no_output_____"
]
],
[
[
"df['tenure_bin'] = pd.cut(df['tenure'],[-1,12,24,36,48,60,100])\ndf['tenure_bin'].value_counts(sort = False)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,4))\n\nax = sns.barplot(x = \"tenure_bin\", y = \"Churn\", data = df, palette = 'rocket', ci = None)\n\nplt.ylabel(\"% of Churn\", fontsize= 12)\nplt.ylim(0,0.6)\nplt.xticks([0,1,2,3,4,5], ['12 or less', '13 to 24', '25 to 36', '37 to 48', '49 to 60', 'more than 60'], fontsize = 12)\nplt.xlabel(\"Tenure Group (in months)\", fontsize= 12)\n\n\n\nfor p in ax.patches:\n ax.annotate(\"%.2f\" %(p.get_height()), (p.get_x()+0.25, p.get_height()+0.03),fontsize=14)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Almost 50 percent of those who became a customer for a year or less ended up leaving the company. It’s not unusual to have a higher churn rate in the first year or two for some types of business. Nevertheless, a churn rate this high in the first year indicates that the quality of the service provided fails to hold up to their new customers’ expectation.",
"_____no_output_____"
],
[
"# <a id=\"4\">Creating and Evaluating Models</a>",
"_____no_output_____"
],
[
"Now, let's move on to the predictive models. In this notebook, we will use the Area Under the Curve of Receiver Characteristic Operator (AUC-ROC or ROC-AUC) as the main metric to assess the performance of our models. The ROC-AUC measures the ability of a model is to distinguish between classes. [(Link for more information about ROC-AUC)](https://www.analyticsvidhya.com/blog/2020/06/auc-roc-curve-machine-learning/). Nevertheless, we will also check the accuracy, the classification report and the confusion matrix for each model.\n\nFirst, we will make a copy of the dataset and separate the features from the target.",
"_____no_output_____"
]
],
[
[
"X = df.copy().drop('Churn', axis = 1)\nY = df['Churn'].copy()",
"_____no_output_____"
]
],
[
[
"We’re also going to remove the customer_id and the feature ‘tenure_bin’, that we created for EDA purposes, since we’re not planning to use them",
"_____no_output_____"
]
],
[
[
"X = X.drop(['customerID', 'tenure_bin'], axis = 1)\nX",
"_____no_output_____"
],
[
"X.info()",
"_____no_output_____"
]
],
[
[
"We need to encode the features to use them in our models. We could use something like sklearn’s OrdinalEncoder for this, but I’ll do it manually. This effort will pay off later when we’ll analyze the predictions using SHAP.",
"_____no_output_____"
]
],
[
[
"gender_map = {'Female': 0, 'Male': 1}\nyes_or_no_map = {'No': 0, 'Yes': 1} #seniorcitizen, partner, dependents, phoneservice, paperlessbilling\nmultiplelines_map = {'No phone service': -1, 'No': 0, 'Yes': 1}\ninternetservice_map = {'No': -1, 'DSL': 0, 'Fiber optic': 1}\nadd_netservices_map = {'No internet service': -1, 'No': 0, 'Yes': 1} #onlinesecurity, onlinebackup, deviceprotection,techsupport,streaming services\ncontract_map = {'Month-to-month': 0, 'One year': 1, 'Two year': 2}\npaymentmethod_map = {'Electronic check': 0, 'Mailed check': 1, 'Bank transfer (automatic)': 2, 'Credit card (automatic)': 3}\n\n\nX['gender'] = X['gender'].map(gender_map).astype('int')\nX['Partner'] = X['Partner'].map(yes_or_no_map).astype('int')\nX['SeniorCitizen'] = X['SeniorCitizen'].map(yes_or_no_map).astype('int')\nX['Dependents'] = X['Dependents'].map(yes_or_no_map).astype('int')\nX['PhoneService'] = X['PhoneService'].map(yes_or_no_map).astype('int')\nX['MultipleLines'] = X['MultipleLines'].map(multiplelines_map).astype('int')\nX['InternetService'] = X['InternetService'].map(internetservice_map).astype('int')\nX['OnlineSecurity'] = X['OnlineSecurity'].map(add_netservices_map).astype('int')\nX['OnlineBackup'] = X['OnlineBackup'].map(add_netservices_map).astype('int')\nX['DeviceProtection'] = X['DeviceProtection'].map(add_netservices_map).astype('int')\nX['TechSupport'] = X['TechSupport'].map(add_netservices_map).astype('int')\nX['StreamingTV'] = X['StreamingTV'].map(add_netservices_map).astype('int')\nX['StreamingMovies'] = X['StreamingMovies'].map(add_netservices_map).astype('int')\nX['Contract'] = X['Contract'].map(contract_map).astype('int')\nX['PaperlessBilling'] = X['PaperlessBilling'].map(yes_or_no_map).astype('int')\nX['PaymentMethod'] = X['PaymentMethod'].map(paymentmethod_map).astype('int')\n",
"_____no_output_____"
],
[
"X.info()",
"_____no_output_____"
]
],
[
[
"Now we will split the data into train and test sets.",
"_____no_output_____"
]
],
[
[
"X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42\n , stratify = Y)",
"_____no_output_____"
]
],
[
[
"## <a id=\"41\">Logistic Regression</a> ",
"_____no_output_____"
],
[
"The first model we're going to use is Logistic Regression, which will require two things for a better performance:\n- Scaling the numerical features;\n- (One hot) encoding the categorical (nominal) features.\n\nWe can use the Column Transformer to assign each transformation to its correct features and fit it in a pipeline as a preprocessing step.",
"_____no_output_____"
]
],
[
[
"num_features = num_accinfo_features\n\ncat_3p_features = []\nfor col in X.columns:\n if (X[col].nunique() > 2) & (X[col].nunique() < 5): #less than 5 to exclude the numerical features\n cat_3p_features.append(col)\nprint('Numerical features: ', num_features, '\\n') \nprint('Nominal with 3 or more categories: ', cat_3p_features)",
"_____no_output_____"
],
[
"cat_transformer = OneHotEncoder(handle_unknown='ignore')\nnum_transformer = StandardScaler()\n\npreprocessor = ColumnTransformer(\n transformers=[\n ('num', num_transformer, num_features),\n ('cat', cat_transformer, cat_3p_features) \n ], remainder='passthrough')",
"_____no_output_____"
],
[
"lr_pipe = Pipeline([('Transformers', preprocessor)\n ,('LR', LogisticRegression(random_state = 42, max_iter = 1000))])",
"_____no_output_____"
]
],
[
[
"Even without the intent of doing an extensive hyperparameter tuning, we can give each model a better chance of good performance by testing some values for a key parameter and choosing one of them based on cross-validation score.",
"_____no_output_____"
]
],
[
[
"def cv_function (model, param, list):\n \n rp_st_kfold = RepeatedStratifiedKFold(n_splits=10, n_repeats = 3, random_state = 42)\n search_model = model\n print ('Hyperparameter: ', param)\n \n for i in list:\n param_dict = {param : i}\n search_model.set_params(**param_dict) \n cv_score = cross_val_score(search_model, X_train, Y_train, cv=rp_st_kfold, scoring='roc_auc')\n print(\"Parameter: {0:0.2f} - AUC(SD): {1:0.4f} ({2:0.4f})\". format(i, cv_score.mean(), cv_score.std()))\n \nparams_lr_list = [0.01,0.1,0.2,0.3,0.5,0.7,1,2,3,5]\nparam_lr = 'LR__C'\ncv_function(lr_pipe, param_lr, params_lr_list)",
"_____no_output_____"
]
],
[
[
"After some point, there is barely an improvement. Choice: C = 3.0",
"_____no_output_____"
]
],
[
[
"lr_param = {'LR__C': 3.0}\nlr_pipe.set_params(**lr_param) \nlr_pipe",
"_____no_output_____"
]
],
[
[
"Now, let’s fit this model and predict.",
"_____no_output_____"
]
],
[
[
"lr_pipe.fit(X_train, Y_train)\npred_lr = lr_pipe.predict(X_test)\n\nprint(\"Test Accuracy: \",metrics.accuracy_score(Y_test, pred_lr))",
"_____no_output_____"
],
[
"lr_confusion_matrix = metrics.confusion_matrix(Y_test, pred_lr)\nsns.heatmap(lr_confusion_matrix, annot=True, fmt=\"d\")\n\nplt.xlabel(\"Predicted Label\", fontsize= 12)\nplt.ylabel(\"True Label\", fontsize= 12)\n\nplt.show()",
"_____no_output_____"
],
[
"print(metrics.classification_report(Y_test, pred_lr, labels = [0, 1]))",
"_____no_output_____"
],
[
"lr_pred_proba = lr_pipe.predict_proba(X_test)[:,1]\n\nlr_roc_auc = metrics.roc_auc_score(Y_test, lr_pred_proba)\nprint('ROC_AUC: ', lr_roc_auc)\n\nlr_fpr, lr_tpr, thresholds = metrics.roc_curve(Y_test, lr_pred_proba)\n\nplt.plot(lr_fpr,lr_tpr, label = 'ROC_AUC = %0.3f' % lr_roc_auc)\n\nplt.xlabel(\"False Positive Rate\", fontsize= 12)\nplt.ylabel(\"True Positive Rate\", fontsize= 12)\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## <a id=\"42\">Random Forest</a> ",
"_____no_output_____"
],
[
"For every model, we’re going to follow the same steps that we made with Logistic Regression, with the exception of using a pipeline for preprocessing.",
"_____no_output_____"
]
],
[
[
"rf_model = RandomForestClassifier(random_state = 42)\n\nparams_rf_list = [100,150,200,250,300,400,500]\nparam_rf = 'n_estimators'\ncv_function(rf_model, param_rf, params_rf_list)",
"_____no_output_____"
],
[
"rf_param = {'n_estimators': 500}\nrf_model.set_params(**rf_param) \nrf_model",
"_____no_output_____"
],
[
"rf_model.fit(X_train, Y_train)\npred_rf = rf_model.predict(X_test)\n\nprint(\"Test Accuracy: \",metrics.accuracy_score(Y_test, pred_rf))",
"_____no_output_____"
],
[
"rf_confusion_matrix = metrics.confusion_matrix(Y_test, pred_rf)\nsns.heatmap(rf_confusion_matrix, annot=True, fmt=\"d\")\n\nplt.xlabel(\"Predicted Label\", fontsize= 12)\nplt.ylabel(\"True Label\", fontsize= 12)\n\nplt.show()",
"_____no_output_____"
],
[
"print(metrics.classification_report(Y_test, pred_rf, labels = [0, 1]))",
"_____no_output_____"
],
[
"rf_pred_proba = rf_model.predict_proba(X_test)[:,1]\n\nrf_roc_auc = metrics.roc_auc_score(Y_test, rf_pred_proba)\nprint('ROC_AUC: ', rf_roc_auc)\n\nrf_fpr, rf_tpr, thresholds = metrics.roc_curve(Y_test, rf_pred_proba)\n\nplt.plot(rf_fpr,rf_tpr, label = 'ROC_AUC = %0.3f' % rf_roc_auc)\n\nplt.xlabel(\"False Positive Rate\", fontsize= 12)\nplt.ylabel(\"True Positive Rate\", fontsize= 12)\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"The results we found with Random Forest were quite disappointing. Although feature scaling and one hot encoding aren’t necessary, we can use them just for testing purposes.",
"_____no_output_____"
],
[
"## <a id=\"43\">Random Forest with Preprocessing</a> ",
"_____no_output_____"
]
],
[
[
"rf_pipe = Pipeline([('Transformers', preprocessor)\n ,('RF', RandomForestClassifier(n_estimators = 500, random_state = 42))])\n\nrf_pipe.fit(X_train, Y_train)\npred_rf_pipe = rf_pipe.predict(X_test)\n\nprint(\"Test Accuracy: \",metrics.accuracy_score(Y_test, pred_rf_pipe))",
"_____no_output_____"
],
[
"rf_pipe_confusion_matrix = metrics.confusion_matrix(Y_test, pred_rf_pipe)\nsns.heatmap(rf_pipe_confusion_matrix, annot=True, fmt=\"d\")\n\nplt.xlabel(\"Predicted Label\", fontsize= 12)\nplt.ylabel(\"True Label\", fontsize= 12)\n\nplt.show()",
"_____no_output_____"
],
[
"print(metrics.classification_report(Y_test, pred_rf_pipe, labels = [0, 1]))",
"_____no_output_____"
],
[
"rf_pipe_pred_proba = rf_pipe.predict_proba(X_test)[:,1]\n\nrf_pipe_roc_auc = metrics.roc_auc_score(Y_test, rf_pipe_pred_proba)\nprint('ROC_AUC: ', rf_pipe_roc_auc)\n\nrf_pipe_fpr, rf_pipe_tpr, thresholds = metrics.roc_curve(Y_test, rf_pipe_pred_proba)\n\nplt.plot(rf_pipe_fpr,rf_pipe_tpr, label = 'ROC_AUC = %0.3f' % rf_pipe_roc_auc)\n\nplt.xlabel(\"False Positive Rate\", fontsize= 12)\nplt.ylabel(\"True Positive Rate\", fontsize= 12)\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"It did not go too well either. Let’s move on to the boosting models.",
"_____no_output_____"
],
[
"## <a id=\"44\">XGBoost</a> ",
"_____no_output_____"
]
],
[
[
"xgb_model = XGBClassifier(learning_rate = 0.05 ,random_state = 42, eval_metric = 'logloss')\n\nparams_xgb_list = [50,75,100,150,200,250,300]\nparam_xgb = 'n_estimators'\ncv_function(xgb_model, param_xgb, params_xgb_list)",
"_____no_output_____"
],
[
"xgb_param = {'n_estimators': 75}\nxgb_model.set_params(**xgb_param) \nxgb_model",
"_____no_output_____"
],
[
"xgb_model.fit(X_train, Y_train, eval_set = [(X_test,Y_test)])\n\npred_xgb = xgb_model.predict(X_test)\n\nprint(\"Test Accuracy: \",metrics.accuracy_score(Y_test, pred_xgb))",
"_____no_output_____"
],
[
"xgb_confusion_matrix = metrics.confusion_matrix(Y_test, pred_xgb)\nsns.heatmap(xgb_confusion_matrix, annot=True, fmt=\"d\")\n\nplt.xlabel(\"Predicted Label\", fontsize= 12)\nplt.ylabel(\"True Label\", fontsize= 12)\n\nplt.show()",
"_____no_output_____"
],
[
"print(metrics.classification_report(Y_test, pred_xgb, labels = [0, 1]))",
"_____no_output_____"
],
[
"xgb_pred_proba = xgb_model.predict_proba(X_test)[:,1]\n\nxgb_roc_auc = metrics.roc_auc_score(Y_test, xgb_pred_proba)\nprint('ROC_AUC: ', xgb_roc_auc)\n\nxgb_fpr, xgb_tpr, thresholds = metrics.roc_curve(Y_test, xgb_pred_proba)\n\nplt.plot(xgb_fpr,xgb_tpr, label = 'ROC_AUC = %0.3f' % xgb_roc_auc)\n\nplt.xlabel(\"False Positive Rate\", fontsize= 12)\nplt.ylabel(\"True Positive Rate\", fontsize= 12)\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## <a id=\"45\">Catboost</a> ",
"_____no_output_____"
]
],
[
[
"categorical_ft = [x for x in X.columns if x not in num_features]\nprint(categorical_ft)",
"_____no_output_____"
],
[
"cat_model = CatBoostClassifier (random_state = 42, eval_metric = 'AUC', cat_features = categorical_ft, verbose = 0)\n#cat_model.get_params()\n\nparams_cat_list = [50,75,100,150,200,250,300]\nparam_cat = 'n_estimators'\ncv_function(cat_model, param_cat, params_cat_list)",
"_____no_output_____"
],
[
"cat_param = {'n_estimators':100}\ncat_model.set_params(**cat_param) \n#cat_model",
"_____no_output_____"
],
[
"cat_model.fit(X_train, Y_train, eval_set = [(X_test,Y_test)], cat_features = categorical_ft)\n\n#xgb_model.fit(X_train, Y_train, early_stopping_rounds = 100, eval_set = [(X_test,Y_test)])\n#cat_model.fit(X_train, Y_train)\n\npred_cat = cat_model.predict(X_test)\n\nprint(\"Test Accuracy: \",metrics.accuracy_score(Y_test, pred_cat))",
"_____no_output_____"
],
[
"cat_confusion_matrix = metrics.confusion_matrix(Y_test, pred_cat)\nsns.heatmap(cat_confusion_matrix, annot=True, fmt=\"d\")\n\nplt.xlabel(\"Predicted Label\", fontsize= 12)\nplt.ylabel(\"True Label\", fontsize= 12)\n\nplt.show()",
"_____no_output_____"
],
[
"print(metrics.classification_report(Y_test, pred_cat, labels = [0, 1]))",
"_____no_output_____"
],
[
"cat_pred_proba = cat_model.predict_proba(X_test)[:,1]\n\ncat_roc_auc = metrics.roc_auc_score(Y_test, cat_pred_proba)\nprint('ROC_AUC: ', cat_roc_auc)\n\ncat_fpr, cat_tpr, thresholds = metrics.roc_curve(Y_test, cat_pred_proba)\n\nplt.plot(cat_fpr,cat_tpr, label = 'ROC_AUC = %0.3f' % cat_roc_auc)\n\nplt.xlabel(\"False Positive Rate\", fontsize= 12)\nplt.ylabel(\"True Positive Rate\", fontsize= 12)\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Results (AUC/accuracy):\n- Logistic Regression: 0.842/0.807\n- Random Forest: 0.825/0.788\n- Random Forest w/preprocessing: 0.823/0.780\n- XGBoost: 0.846/0.806\n- Catboost: 0.849/0.813",
"_____no_output_____"
],
[
"The Catboost yielded the best results, although they were quite close from those obtained with XGBoost and Logistic Regression.",
"_____no_output_____"
],
[
"## <a id=\"46\">Feature Importance and SHAP Plot</a>",
"_____no_output_____"
],
[
"Let’s see what features have more importance for the Catboost’s predictions.",
"_____no_output_____"
]
],
[
[
"pool = Pool(X_train, Y_train, cat_features=categorical_ft)\n\nFeature_importance = pd.DataFrame({'feature_importance': cat_model.get_feature_importance(pool), \n 'feature_names': X_train.columns}).sort_values(by=['feature_importance'], \n ascending=False)\n\nFeature_importance",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,10))\n\nsns.barplot(x=Feature_importance['feature_importance'], y=Feature_importance['feature_names'], palette = 'rocket')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"To better interpret the model’s results, and maybe gain some insights, we can use the SHAP package [(link)](https://shap.readthedocs.io/en/latest/example_notebooks/tabular_examples/tree_based_models/Catboost%20tutorial.html).",
"_____no_output_____"
]
],
[
[
"explainer = shap.TreeExplainer(cat_model)\nshap_values = explainer.shap_values(pool)\n\nshap.summary_plot(shap_values, X_train)",
"_____no_output_____"
]
],
[
[
"Since we manually encoded the categorical features, it becomes easier to understand what’s been represented in each category. For instance, the feature ‘contract’ has 3 categories. ‘Month-to-month’ was encoded with the lowest value and it’s represented by the blue color. ‘One year’ is the mid value and it’s represented in purple. ‘Two years’ is the highest value and is represented in red. We can clearly see that the ‘month-to-month’ category impacts the prediction towards the positive value (churn), while the other types of contracts push the prediction into the opposite direction (no churn).",
"_____no_output_____"
],
[
"# <a id=\"5\">References</a>",
"_____no_output_____"
],
[
"- https://www.analyticsvidhya.com/blog/2020/06/auc-roc-curve-machine-learning/\n- https://shap.readthedocs.io/en/latest/example_notebooks/tabular_examples/tree_based_models/Catboost%20tutorial.html",
"_____no_output_____"
],
[
"## <center> If you find this notebook useful, support with an upvote! <center>",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a9cccc6ffa14be52ec3a1e96ee2d494ce43ddbf
| 3,900 |
ipynb
|
Jupyter Notebook
|
lab7.ipynb
|
owainscholbe/ia241
|
b880351b5d765dbc1a5b9e3a4ca7c68442c029e2
|
[
"MIT"
] | null | null | null |
lab7.ipynb
|
owainscholbe/ia241
|
b880351b5d765dbc1a5b9e3a4ca7c68442c029e2
|
[
"MIT"
] | null | null | null |
lab7.ipynb
|
owainscholbe/ia241
|
b880351b5d765dbc1a5b9e3a4ca7c68442c029e2
|
[
"MIT"
] | 1 |
2019-03-19T12:37:24.000Z
|
2019-03-19T12:37:24.000Z
| 19.796954 | 132 | 0.452821 |
[
[
[
"# Lab 7",
"_____no_output_____"
],
[
"#### 1) \n\nCompany: Leidos\nDecription: Entry Level Intelligence Analyst \n[website](https://careers.leidos.com/jobs/3674591-intelligence-analyst?tm_job=R-00007987&tm_event=view&tm_company=2502&bid=32)",
"_____no_output_____"
],
[
"#### 2)",
"_____no_output_____"
]
],
[
[
"from collections import Counter\n\nwith open('job.txt', 'r') as job:\n job_list = job.read().split()\n \n count_result = Counter(job_list)\n \n for word, count in count_result.most_common(20):\n print(word, count)",
"- 9\nand 8\nMust 6\nin 5\nto 4\nhave 4\nan 3\nfor 3\nlanguage 3\nExperience 3\nusing 3\nIndeed 3\nat 2\nArabic 2\nas 2\ndata 2\nreports 2\nQualifications 2\nbe 2\na 2\n"
]
],
[
[
"#### 3)",
"_____no_output_____"
]
],
[
[
"import xlwt\nfrom collections import Counter\n\nbook = xlwt.Workbook()\nsheet = book.add_sheet('word_count')\ni = 0\nsheet.write(0, 0, 'word')\nsheet.write(0, 1, 'count')\n\nwith open('job.txt', 'r') as job:\n job_list = job.read().split()\n count_result = Counter(job_list)\n \n for result in count_result.most_common(20):\n i += 1\n sheet.write(i, 0, result[0])\n sheet.write(i, 1, result[1])\n \n \nbook.save('realjobs.xls')",
"_____no_output_____"
]
],
[
[
"#### 4)",
"_____no_output_____"
]
],
[
[
"import xlrd\n\nbook = xlrd.open_workbook('realjobs.xls')\nsheet = book.sheet_by_name('word_count')\nnum_record = sheet.nrows\n\nfor i in range(num_record):\n row = sheet.row_values(i)\n word, count = row\n print(word, count)",
"word count\n- 9.0\nand 8.0\nMust 6.0\nin 5.0\nto 4.0\nhave 4.0\nan 3.0\nfor 3.0\nlanguage 3.0\nExperience 3.0\nusing 3.0\nIndeed 3.0\nat 2.0\nArabic 2.0\nas 2.0\ndata 2.0\nreports 2.0\nQualifications 2.0\nbe 2.0\na 2.0\n"
]
],
[
[
"#### 5)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a9ce47bd7c3b7bc0cac5d273111b2c7ce64b9aa
| 65,996 |
ipynb
|
Jupyter Notebook
|
Part-1_Drug_Discovery.ipynb
|
Shraeyas/Drug-Discovery-Using-Python
|
c795fd3be1d5b978f2ab401a8cf5ad6571bfb246
|
[
"CC0-1.0"
] | 1 |
2021-11-05T05:46:21.000Z
|
2021-11-05T05:46:21.000Z
|
Part-1_Drug_Discovery.ipynb
|
Shraeyas/Drug-Discovery-Using-Python
|
c795fd3be1d5b978f2ab401a8cf5ad6571bfb246
|
[
"CC0-1.0"
] | null | null | null |
Part-1_Drug_Discovery.ipynb
|
Shraeyas/Drug-Discovery-Using-Python
|
c795fd3be1d5b978f2ab401a8cf5ad6571bfb246
|
[
"CC0-1.0"
] | null | null | null | 37.097246 | 183 | 0.397994 |
[
[
[
"### **Install ChEMBL client for getting the dataset**\n\n#### **https://www.ebi.ac.uk/chembl/**",
"_____no_output_____"
]
],
[
[
"!pip install chembl_webresource_client",
"_____no_output_____"
]
],
[
[
"### **Import Libraries**",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom chembl_webresource_client.new_client import new_client",
"_____no_output_____"
]
],
[
[
"### **Find Coronavirus Dataset**",
"_____no_output_____"
],
[
"#### **Search Target**",
"_____no_output_____"
]
],
[
[
"target = new_client.target\ntarget_query = target.search ('acetylcholinesterase')\ntargets = pd.DataFrame.from_dict (target_query)\ntargets",
"_____no_output_____"
]
],
[
[
"#### **Fetch Bio-Activity data for the target**",
"_____no_output_____"
]
],
[
[
"selected_target = targets.target_chembl_id [0]\nselected_target",
"_____no_output_____"
],
[
"activity = new_client.activity\nres = activity.filter (target_chembl_id = selected_target).filter (standard_type = \"IC50\")",
"_____no_output_____"
]
],
[
[
"#### **A Higher Standard Value means we'll require more amount of the drug for same inhibition**",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame.from_dict (res)\ndf.head (3)",
"_____no_output_____"
],
[
"df.standard_type.unique ()",
"_____no_output_____"
]
],
[
[
"##### **Save the resulting Bio-Activity data to a CSV file**",
"_____no_output_____"
]
],
[
[
"import os\ndf.to_csv (os.path.join ('Datasets', 'Part-1_Bioactivity_Data.csv'), index = False)",
"_____no_output_____"
]
],
[
[
"### **Pre-Processing Data**",
"_____no_output_____"
],
[
"#### **Ignore values with Missing Standard Value data**",
"_____no_output_____"
]
],
[
[
"df2 = df [df.standard_value.notna ()]\ndf2 = df2 [df.canonical_smiles.notna ()]\ndf2",
"/home/shraeyas/miniconda3/envs/drug/lib/python3.7/site-packages/ipykernel_launcher.py:2: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n \n"
]
],
[
[
"#### **Label Compounds as active or inactive**\n##### Compounds with IC50 less than 1000nM are considered active, greater than 10000nM are considered to be inactive, in between 1000nM to 10000nM are considered intermediate\n##### 1. IC50 value of the drug indicates the toxicity of the drug to other disease causing organisms.\n##### 2. IC50 is a quantitative measure that shows how much a particular inhibitory drug/substance/extract/fraction is needed to inhibit a biological component by 50%.\n###### Above Definition taken from https://www.researchgate.net/post/What-is-the-significance-of-IC50-value-when-the-drug-is-exogenously-administered-to-an-animal-tissue",
"_____no_output_____"
]
],
[
[
"bioactivity_class = []\nfor i in df2.standard_value :\n if float (i) >= 10000 :\n bioactivity_class.append (\"inactive\")\n elif float (i) <= 1000 :\n bioactivity_class.append (\"active\")\n else :\n bioactivity_class.append (\"intermediate\")\n \nprint (len (bioactivity_class))",
"6340\n"
]
],
[
[
"#### **Append Chembl ID, Canonical Smiles and Standard Value to a list**\n##### Canonical Smiles :-\n##### 1. Simplified Molecular Input Line Entry Specification\n##### 2. They can represent a Molecular Compound in a Single Line",
"_____no_output_____"
]
],
[
[
"selection = ['molecule_chembl_id', 'canonical_smiles', 'standard_value']\ndf3 = df2 [selection]\nprint (len (df3))\ndf3",
"6340\n"
],
[
"import numpy as np\n\n#print (df3.values.shape)\n#print (np.array (bioactivity_class).shape)\ndf4 = df3.values\ndf4\nbioactivity_class = np.matrix (bioactivity_class).T\n#bioactivity_class\ncolumns = list (df3.columns)\ncolumns.append ('bioactivity_class')\nprint (columns)\nprint (bioactivity_class.shape)\nprint (df4.shape)\n#df3 = pd.concat ([df3, pd.Series (np.array (bioactivity_class))], axis = 1)\n#print (len (df3))\n#df3",
"['molecule_chembl_id', 'canonical_smiles', 'standard_value', 'bioactivity_class']\n(6340, 1)\n(6340, 3)\n"
],
[
"df4",
"_____no_output_____"
],
[
"#df3 = df3.rename (columns = {0 : 'bioactivity_class'})\n\ndf_final = np.concatenate ((df4, bioactivity_class), axis = 1)\n#df_final = pd.DataFrame (df_final, columns)\n\ndf_final\n#df3.head (3)\n#print (len (df3))",
"_____no_output_____"
],
[
"df_final = pd.DataFrame (df_final, columns = columns)",
"_____no_output_____"
],
[
"df_final",
"_____no_output_____"
]
],
[
[
"#### **Save Pre-Processed data to a CSV file**",
"_____no_output_____"
]
],
[
[
"df_final.to_csv (os.path.join ('Datasets', 'Part-1_Bioactivity_Preprocessed_Data.csv'), index = False)",
"_____no_output_____"
],
[
"!dir",
"Datasets\t\t Part-2_Exploratory_Drug_Analysis.ipynb README.md\nLICENSE\t\t\t Part-3_Descriptor_Calculation.ipynb\nPart-1_Drug_Discovery.ipynb Part-4_Model_Building.ipynb\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9ce4edea20f7fdac6dcf4597ab2eafbaac8dc4
| 220,886 |
ipynb
|
Jupyter Notebook
|
pydrot/examples/notebook.ipynb
|
vienmai/drot
|
a5ffa08d6911c7245984d3b0e5835f90767801f4
|
[
"MIT"
] | null | null | null |
pydrot/examples/notebook.ipynb
|
vienmai/drot
|
a5ffa08d6911c7245984d3b0e5835f90767801f4
|
[
"MIT"
] | null | null | null |
pydrot/examples/notebook.ipynb
|
vienmai/drot
|
a5ffa08d6911c7245984d3b0e5835f90767801f4
|
[
"MIT"
] | null | null | null | 280.312183 | 141,210 | 0.915287 |
[
[
[
"%matplotlib inline\nimport sys, os\nsys.path.append(\"../\")\nimport numpy as np\nimport scipy as sp\nimport numpy.linalg as nla\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom timeit import timeit\nimport ot\nimport ot.plot\nfrom ot.datasets import make_1D_gauss as gauss\nfrom drot.solver import drot, sinkhorn\nfrom drot.proximal import *\nimport csv\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"# Optimal transport",
"_____no_output_____"
]
],
[
[
"def save(C, nrows, ncols, filename):\n assert C.flags['F_CONTIGUOUS'] \n output_file = open(filename, 'wb')\n C.tofile(output_file)\n output_file.close()\n\ndef two_dimensional_gaussian_ot(m, n):\n d = 2\n mu_s = np.random.normal(0.0, 1.0, (d,)) # Gaussian mean\n A_s = np.random.rand(d, d)\n cov_s = np.dot(A_s, A_s.transpose()) # Gaussian covariance matrix\n mu_t = np.random.normal(5.0, 5.0, (d,))\n A_t = np.random.rand(d, d)\n cov_t = np.dot(A_t, A_t.transpose())\n xs = ot.datasets.make_2D_samples_gauss(m, mu_s, cov_s)\n xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t)\n p, q = np.ones((m,)) / m, np.ones((n,)) / n \n C = np.array(ot.dist(xs, xt), order='F')\n C /= C.max()\n return m, n, C, p, q",
"_____no_output_____"
],
[
"def multi_experiment(m, n, max_iters, accuracies, skregs, alpha=2.0, ntests=10):\n num_accuracies = accuracies.shape[0]\n num_algs = skregs.shape[0] + 1\n outs = np.zeros([num_algs, 1, num_accuracies, ntests])\n \n for test_idx in range(ntests):\n print(\"\\n *** Experiment\", test_idx+1, \"of\", ntests, \"***\")\n \n m, n, C, p, q = two_dimensional_gaussian_ot(m, n)\n x0 = np.array(np.outer(p, q), order = 'F')\n step = alpha / (m+n)\n \n C_ = C.copy()\n optval = ot.emd2(p, q, C_, numItermax=1_000_000)\n \n drout = drot(x0, C, p, q, max_iters=max_iters, step=step, compute_r_primal=True, \n compute_r_dual=False, eps_abs=1e-4, eps_rel=0.0)\n \n skout = [] \n for reg in skregs:\n skout.append(ot.sinkhorn(p, q, C_, reg, numItermax=max_iters, stopThr=7e-5))\n \n outs[0, 0, :, test_idx] = abs(np.sum(drout['sol']*C) - optval) / optval\n for sk_idx in range(skregs.shape[0]):\n outs[sk_idx+1, 0, :, test_idx] = abs(np.sum(skout[sk_idx]*C_) - optval) / optval\n\n file_name = 'Dims_' + str(m) + '_test_' + str(ntests)\n np.save('output/'+file_name + '.npy', outs)\n return file_name\n\ndef profile(dir, accuracies, labels, colors): \n outs = np.load(dir)\n (num_algs, num_objs_computed, num_accuracies, ntests) = outs.shape\n performance_ratio = np.zeros((num_algs, num_accuracies))\n \n for alg_idx in range(num_algs):\n for acc_idx in range(num_accuracies):\n performance_ratio[alg_idx, acc_idx] = np.sum((outs[alg_idx, 0, acc_idx, :] <= accuracies[acc_idx])) / ntests\n\n fig = plt.figure() \n for alg_idx in range(num_algs):\n plt.plot(accuracies, performance_ratio[alg_idx, :], color=colors[alg_idx], label=labels[alg_idx], linewidth=2.5)\n \n ylabel = r'Performance ratio'\n plt.xlabel(r'Accuracy')\n plt.ylabel(ylabel)\n plt.xscale('log')\n # plt.xlim(1e-4, 1e-1)\n plt.legend()\n \n return fig",
"_____no_output_____"
],
[
"m, n = 512, 512\nmax_iters = 1000\naccuracies = np.logspace(-4.5, -1, num=15)\nskregs = np.array([1e-4, 1e-3, 5e-3, 1e-2, 5e-2, 1e-1])\n\nfile_name = multi_experiment(m, n, max_iters, accuracies, skregs, ntests=100)",
"\n *** Experiment 1 of 100 ***\nDrot terminated at iteration 999\nWarning: numerical errors at iteration 0\n\n *** Experiment 2 of 100 ***\n"
],
[
"labels = ['DROT', 'SK1', 'SK2', 'SK3', 'SK4', 'Sk5', 'SK6']\ncolors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6']\ndir = \"output/\" + file_name + '.npy'\nfig = profile(dir, accuracies, labels, colors)\n\n# fig.savefig('figures/'+ file_name + '_mean_1_f64.eps', format='eps')",
"_____no_output_____"
]
],
[
[
"## Single problem",
"_____no_output_____"
]
],
[
[
"\nm, n, C, p, q = two_dimensional_gaussian_ot(512, 512)\nC_ = C.copy()\nG0 = ot.emd(p, q, C_, numItermax=1_000_000)\nGsk = ot.sinkhorn(p, q, C_, 1e-3, numItermax=1000, stopThr=1e-5)\nGsb = ot.bregman.sinkhorn_stabilized(p, q, C_, 1e-3, umItermax=1000, stopThr=1e-5)\nfemd, fsk, fsb = np.sum(G0*C_), np.sum(Gsk*C_), np.sum(Gsb*C_)",
"_____no_output_____"
],
[
"x0 = np.array(np.outer(p, q), order = 'F')\nmax_iters = 500\nstep = .051 / (m+n)\n\ndrout = drot(x0, C, p, q, max_iters=max_iters, step=step, compute_r_primal=True, \n compute_r_dual=True, eps_abs=1e-5, verbose=False, print_every=100)\nxopt = drout[\"sol\"]\nskout, log = sinkhorn(p, q, C_, 1e-3, numItermax=200, stopThr=1e-15)\noptval = femd\n\nplt.figure(1, figsize=(10,8))\nplt.plot(range(drout[\"num_iters\"]), [ abs(f-optval) for f in drout['dual']], color='C0', label='DROT: Funtion gap', linewidth=2)\nplt.plot(range(drout[\"num_iters\"]), [r for r in drout['primal']], color='C0', marker='o', label='DROT: Residual', linewidth=2)\nplt.plot([k for k in log['iter']], [ abs(f - optval) for f in log['fval']], color='C1', label='SK: Function gap', linewidth=2)\nplt.plot([k for k in log['iter']], [ r for r in log['res']], color='C1', marker='o', label='SK: Residual', linewidth=2)\n\nplt.xlabel(\"Iteration\") \nplt.ylabel(\"Suboptimality\") \nplt.yscale('log')\nplt.legend()",
"Drot terminated at iteration 11\n"
]
],
[
[
"### Sparsity of the approximate solutions",
"_____no_output_____"
]
],
[
[
"np.sum(xopt > 0) / (m*n), np.sum(G0 > 0) / (m*n), np.sum(Gsk > 0) / (m*n), np.sum(Gsb > 0) / (m*n)",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(2, 2, figsize=(15, 10))\n\naxs[0, 0].imshow(xopt, interpolation='nearest')\naxs[0, 0].set_title('OT matrix DR')\n\naxs[0, 1].imshow(G0, interpolation='nearest')\naxs[0, 1].set_title('OT matrix G0')\n\naxs[1, 0].imshow(Gsk, interpolation='nearest')\naxs[1, 0].set_title('OT matrix Sinkhorn')\n\naxs[1, 1].imshow(Gsk, interpolation='nearest')\naxs[1, 1].set_title('OT matrix Sinkhorn')",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a9cec6ee235b3e66ed0be3a4a3ff57be14431c5
| 5,463 |
ipynb
|
Jupyter Notebook
|
paper/figure1.ipynb
|
kclamar/ebnmpy
|
fc3d7126757c4184c7cb442312f1db5b78d73a3b
|
[
"MIT"
] | null | null | null |
paper/figure1.ipynb
|
kclamar/ebnmpy
|
fc3d7126757c4184c7cb442312f1db5b78d73a3b
|
[
"MIT"
] | null | null | null |
paper/figure1.ipynb
|
kclamar/ebnmpy
|
fc3d7126757c4184c7cb442312f1db5b78d73a3b
|
[
"MIT"
] | null | null | null | 29.370968 | 188 | 0.517115 |
[
[
[
"import itertools\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import stats\nfrom ebnmpy.estimators import estimators",
"_____no_output_____"
],
[
"def sample_point_normal(n, pi0=.9, mu=0, sigma=2):\n not_delta = stats.bernoulli.rvs(pi0, size=n) == 0\n z = np.full(n, mu, dtype=float)\n z[not_delta] = stats.norm.rvs(mu, sigma, size=not_delta.sum())\n return z\n\ndef sample_point_t(n, pi0=.8, df=5, scale=1.5):\n not_delta = stats.bernoulli.rvs(pi0, size=n) == 0\n z = np.zeros(n)\n z[not_delta] = stats.t.rvs(df=df, scale=scale, size=not_delta.sum())\n return z\n\ndef sample_asymmetric_tophat(n, pi0=.5, a=-5, b=10):\n not_delta = stats.bernoulli.rvs(pi0, size=n) == 0\n z = np.zeros(n)\n z[not_delta] = stats.uniform.rvs(a, b - a, size=not_delta.sum())\n return z\n\ndef get_rmse(theta, theta_hat):\n return np.sqrt(np.mean((theta_hat - theta) ** 2))\n\ndef get_clcov(theta, samples, intervals=(.05, .95)):\n lower = np.quantile(samples, intervals[0], axis=0)\n upper = np.quantile(samples, intervals[1], axis=0)\n return np.mean((theta >= lower) & (theta <= upper))",
"_____no_output_____"
]
],
[
[
"Run simulations",
"_____no_output_____"
]
],
[
[
"np.random.seed(0)\n\ns = 1\nn = 1000\nn_posterior_samples = 1000\nn_simulations = 10\n\nsamplers = {\n \"Point-normal\": sample_point_normal,\n \"Point-t\": sample_point_t,\n \"Asymmetric tophat\": sample_asymmetric_tophat,\n}\n\nresults = []\n\nfor _ in range(n_simulations):\n for sampler_name, sampler in samplers.items():\n theta = sampler(n)\n x = theta + stats.norm.rvs(size=n)\n\n for cls_name, cls in estimators.items():\n # run ebnm\n est = cls(include_posterior_sampler=True).fit(x=x, s=s)\n \n # sample from posterior\n samples = est.sample(n_posterior_samples)\n \n # compute metrics\n loglik = est.log_likelihood_\n rmse = get_rmse(theta, theta_hat=est.posterior_[\"mean\"])\n clcov = get_clcov(theta, samples)\n\n results.append((sampler_name, cls.__name__, loglik, rmse, clcov))",
"_____no_output_____"
]
],
[
[
"Format table",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(results, columns=(\"Distribution\", \"Class\", \"LogLik\", \"RMSE\", \"ClCov\"))\ncolumns = list(itertools.product(list(samplers), (\"LogLik\", \"RMSE\", \"ClCov\")))\ndf_mean = df.groupby([\"Distribution\", \"Class\"]).mean().unstack(0).swaplevel(0, 1, axis=1)[columns].loc[[i.__name__ for i in estimators.values()]]\ndf_mean.index.name = None\ndf_mean.columns.names = [None, None]\n\nformatter = {i: \"{:.1f}\" if \"LogLik\" in i else \"{:.3f}\" for i in columns}\ns = df_mean.style.format(formatter=formatter)\ns = s.background_gradient(cmap=\"Reds_r\", subset=columns[::3]).background_gradient(cmap=\"Reds\", subset=columns[1::3]).background_gradient(cmap=\"Reds_r\", subset=columns[2::3])\ns = s.set_properties(**{'text-align': 'center'})\ns = s.set_table_styles([dict(selector='th', props=[('text-align', 'center')])])\nfor i in (3, 6):\n s = s.set_table_styles({\n columns[i]: [{'selector': 'th', 'props': 'border-left: 1px solid black'},\n {'selector': 'td', 'props': 'border-left: 1px solid #000000'}]\n }, overwrite=False, axis=0)",
"_____no_output_____"
]
],
[
[
"Display table",
"_____no_output_____"
]
],
[
[
"s",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a9cf491ba45ced2b385e5dea31293386bd3fa71
| 20,571 |
ipynb
|
Jupyter Notebook
|
1. Load and Visualize Data.ipynb
|
paluchnuggets/Facial-Keypoint-Detection
|
3c93f6e010ace28736e297db8f12e536d5d9418e
|
[
"MIT"
] | 3 |
2019-04-15T12:48:08.000Z
|
2020-10-19T13:04:25.000Z
|
1. Load and Visualize Data.ipynb
|
paluchnuggets/Facial-Keypoint-Detection
|
3c93f6e010ace28736e297db8f12e536d5d9418e
|
[
"MIT"
] | null | null | null |
1. Load and Visualize Data.ipynb
|
paluchnuggets/Facial-Keypoint-Detection
|
3c93f6e010ace28736e297db8f12e536d5d9418e
|
[
"MIT"
] | null | null | null | 37.266304 | 581 | 0.564921 |
[
[
[
"# Facial Keypoint Detection\n \nThis project will be all about defining and training a convolutional neural network to perform facial keypoint detection, and using computer vision techniques to transform images of faces. The first step in any challenge like this will be to load and visualize the data you'll be working with. \n\nLet's take a look at some examples of images and corresponding facial keypoints.\n\n<img src='images/key_pts_example.png' width=50% height=50%/>\n\nFacial keypoints (also called facial landmarks) are the small magenta dots shown on each of the faces in the image above. In each training and test image, there is a single face and **68 keypoints, with coordinates (x, y), for that face**. These keypoints mark important areas of the face: the eyes, corners of the mouth, the nose, etc. These keypoints are relevant for a variety of tasks, such as face filters, emotion recognition, pose recognition, and so on. Here they are, numbered, and you can see that specific ranges of points match different portions of the face.\n\n<img src='images/landmarks_numbered.jpg' width=30% height=30%/>\n\n---",
"_____no_output_____"
],
[
"## Load and Visualize Data\n\nThe first step in working with any dataset is to become familiar with your data; you'll need to load in the images of faces and their keypoints and visualize them! This set of image data has been extracted from the [YouTube Faces Dataset](https://www.cs.tau.ac.il/~wolf/ytfaces/), which includes videos of people in YouTube videos. These videos have been fed through some processing steps and turned into sets of image frames containing one face and the associated keypoints.\n\n#### Training and Testing Data\n\nThis facial keypoints dataset consists of 5770 color images. All of these images are separated into either a training or a test set of data.\n\n* 3462 of these images are training images, for you to use as you create a model to predict keypoints.\n* 2308 are test images, which will be used to test the accuracy of your model.\n\nThe information about the images and keypoints in this dataset are summarized in CSV files, which we can read in using `pandas`. Let's read the training CSV and get the annotations in an (N, 2) array where N is the number of keypoints and 2 is the dimension of the keypoint coordinates (x, y).\n\n---",
"_____no_output_____"
]
],
[
[
"# import the required libraries\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport cv2",
"_____no_output_____"
],
[
"key_pts_frame = pd.read_csv('data/training_frames_keypoints.csv')\n\nn = 0\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nprint('Image name: ', image_name)\nprint('Landmarks shape: ', key_pts.shape)\nprint('First 4 key pts: {}'.format(key_pts[:4]))",
"_____no_output_____"
],
[
"# print out some stats about the data\nprint('Number of images: ', key_pts_frame.shape[0])",
"_____no_output_____"
]
],
[
[
"## Look at some images\n\nBelow, is a function `show_keypoints` that takes in an image and keypoints and displays them. As you look at this data, **note that these images are not all of the same size**, and neither are the faces! To eventually train a neural network on these images, we'll need to standardize their shape.",
"_____no_output_____"
]
],
[
[
"def show_keypoints(image, key_pts):\n \"\"\"Show image with keypoints\"\"\"\n plt.imshow(image)\n plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')\n",
"_____no_output_____"
],
[
"# Display a few different types of images by changing the index n\n\n# select an image by index in our data frame\nn = 0\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nplt.figure(figsize=(5, 5))\nshow_keypoints(mpimg.imread(os.path.join('data/training/', image_name)), key_pts)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Dataset class and Transformations\n\nTo prepare our data for training, we'll be using PyTorch's Dataset class. Much of this this code is a modified version of what can be found in the [PyTorch data loading tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n#### Dataset class\n\n``torch.utils.data.Dataset`` is an abstract class representing a\ndataset. This class will allow us to load batches of image/keypoint data, and uniformly apply transformations to our data, such as rescaling and normalizing images for training a neural network.\n\n\nYour custom dataset should inherit ``Dataset`` and override the following\nmethods:\n\n- ``__len__`` so that ``len(dataset)`` returns the size of the dataset.\n- ``__getitem__`` to support the indexing such that ``dataset[i]`` can\n be used to get the i-th sample of image/keypoint data.\n\nLet's create a dataset class for our face keypoints dataset. We will\nread the CSV file in ``__init__`` but leave the reading of images to\n``__getitem__``. This is memory efficient because all the images are not\nstored in the memory at once but read as required.\n\nA sample of our dataset will be a dictionary\n``{'image': image, 'keypoints': key_pts}``. Our dataset will take an\noptional argument ``transform`` so that any required processing can be\napplied on the sample. We will see the usefulness of ``transform`` in the\nnext section.\n",
"_____no_output_____"
]
],
[
[
"from torch.utils.data import Dataset, DataLoader\n\nclass FacialKeypointsDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.key_pts_frame = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.key_pts_frame)\n\n def __getitem__(self, idx):\n image_name = os.path.join(self.root_dir,\n self.key_pts_frame.iloc[idx, 0])\n \n image = mpimg.imread(image_name)\n \n # if image has an alpha color channel, get rid of it\n if(image.shape[2] == 4):\n image = image[:,:,0:3]\n \n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n key_pts = key_pts.astype('float').reshape(-1, 2)\n sample = {'image': image, 'keypoints': key_pts}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample",
"_____no_output_____"
]
],
[
[
"Now that we've defined this class, let's instantiate the dataset and display some images.",
"_____no_output_____"
]
],
[
[
"# Construct the dataset\nface_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv',\n root_dir='data/training/')\n\n# print some stats about the dataset\nprint('Length of dataset: ', len(face_dataset))",
"_____no_output_____"
],
[
"# Display a few of the images from the dataset\nnum_to_display = 3\n\nfor i in range(num_to_display):\n \n # define the size of images\n fig = plt.figure(figsize=(20,10))\n \n # randomly select a sample\n rand_i = np.random.randint(0, len(face_dataset))\n sample = face_dataset[rand_i]\n\n # print the shape of the image and keypoints\n print(i, sample['image'].shape, sample['keypoints'].shape)\n\n ax = plt.subplot(1, num_to_display, i + 1)\n ax.set_title('Sample #{}'.format(i))\n \n # Using the same display function, defined earlier\n show_keypoints(sample['image'], sample['keypoints'])\n",
"_____no_output_____"
]
],
[
[
"## Transforms\n\nNow, the images above are not of the same size, and neural networks often expect images that are standardized; a fixed size, with a normalized range for color ranges and coordinates, and (for PyTorch) converted from numpy lists and arrays to Tensors.\n\nTherefore, we will need to write some pre-processing code.\nLet's create four transforms:\n\n- ``Normalize``: to convert a color image to grayscale values with a range of [0,1] and normalize the keypoints to be in a range of about [-1, 1]\n- ``Rescale``: to rescale an image to a desired size.\n- ``RandomCrop``: to crop an image randomly.\n- ``ToTensor``: to convert numpy images to torch images.\n\n\nWe will write them as callable classes instead of simple functions so\nthat parameters of the transform need not be passed everytime it's\ncalled. For this, we just need to implement ``__call__`` method and \n(if we require parameters to be passed in), the ``__init__`` method. \nWe can then use a transform like this:\n\n tx = Transform(params)\n transformed_sample = tx(sample)\n\nObserve below how these transforms are generally applied to both the image and its keypoints.\n\n",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchvision import transforms, utils\n# tranforms\n\nclass Normalize(object):\n \"\"\"Convert a color image to grayscale and normalize the color range to [0,1].\"\"\" \n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n image_copy = np.copy(image)\n key_pts_copy = np.copy(key_pts)\n\n # convert image to grayscale\n image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n \n # scale color range from [0, 255] to [0, 1]\n image_copy= image_copy/255.0\n \n # scale keypoints to be centered around 0 with a range of [-1, 1]\n # mean = 100, sqrt = 50, so, pts should be (pts - 100)/50\n key_pts_copy = (key_pts_copy - 100)/50.0\n\n\n return {'image': image_copy, 'keypoints': key_pts_copy}\n\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = cv2.resize(image, (new_w, new_h))\n \n # scale the pts, too\n key_pts = key_pts * [new_w / w, new_h / h]\n\n return {'image': img, 'keypoints': key_pts}\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h,\n left: left + new_w]\n\n key_pts = key_pts - [left, top]\n\n return {'image': image, 'keypoints': key_pts}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n # if image has no grayscale color channel, add one\n if(len(image.shape) == 2):\n # add that third color dim\n image = image.reshape(image.shape[0], image.shape[1], 1)\n \n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n \n return {'image': torch.from_numpy(image),\n 'keypoints': torch.from_numpy(key_pts)}",
"_____no_output_____"
]
],
[
[
"## Test out the transforms\n\nLet's test these transforms out to make sure they behave as expected. As you look at each transform, note that, in this case, **order does matter**. For example, you cannot crop a image using a value smaller than the original image (and the orginal images vary in size!), but, if you first rescale the original image, you can then crop it to any size smaller than the rescaled size.",
"_____no_output_____"
]
],
[
[
"# test out some of these transforms\nrescale = Rescale(100)\ncrop = RandomCrop(50)\ncomposed = transforms.Compose([Rescale(250),\n RandomCrop(224)])\n\n# apply the transforms to a sample image\ntest_num = 500\nsample = face_dataset[test_num]\n\nfig = plt.figure()\nfor i, tx in enumerate([rescale, crop, composed]):\n transformed_sample = tx(sample)\n\n ax = plt.subplot(1, 3, i + 1)\n plt.tight_layout()\n ax.set_title(type(tx).__name__)\n show_keypoints(transformed_sample['image'], transformed_sample['keypoints'])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Create the transformed dataset\n\nApply the transforms in order to get grayscale images of the same shape. Verify that your transform works by printing out the shape of the resulting data (printing out a few examples should show you a consistent tensor size).",
"_____no_output_____"
]
],
[
[
"# define the data tranform\n# order matters! i.e. rescaling should come before a smaller crop\ndata_transform = transforms.Compose([Rescale(250),\n RandomCrop(224),\n Normalize(),\n ToTensor()])\n\n# create the transformed dataset\ntransformed_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv',\n root_dir='data/training/',\n transform=data_transform)\n",
"_____no_output_____"
],
[
"# print some stats about the transformed data\nprint('Number of images: ', len(transformed_dataset))\n\n# make sure the sample tensors are the expected size\nfor i in range(5):\n sample = transformed_dataset[i]\n print(i, sample['image'].size(), sample['keypoints'].size())\n",
"_____no_output_____"
]
],
[
[
"## Data Iteration and Batching\n\nRight now, we are iterating over this data using a ``for`` loop, but we are missing out on a lot of PyTorch's dataset capabilities, specifically the abilities to:\n\n- Batch the data\n- Shuffle the data\n- Load the data in parallel using ``multiprocessing`` workers.\n\n``torch.utils.data.DataLoader`` is an iterator which provides all these\nfeatures, and we'll see this in use in the *next* notebook, Notebook 2, when we load data in batches to train a neural network!\n\n---\n\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a9d19c8f3655232a40a97d68da9cdb6059af8e6
| 84,761 |
ipynb
|
Jupyter Notebook
|
doc/doc.ipynb
|
Felihong/wikidata-sequence-analysis
|
1d86ad9812c90864eb2c9ab72e5e61474d439f1e
|
[
"MIT"
] | null | null | null |
doc/doc.ipynb
|
Felihong/wikidata-sequence-analysis
|
1d86ad9812c90864eb2c9ab72e5e61474d439f1e
|
[
"MIT"
] | 1 |
2019-11-04T12:45:02.000Z
|
2019-11-04T12:45:02.000Z
|
doc/doc.ipynb
|
Felihong/wikidata-sequence-analysis
|
1d86ad9812c90864eb2c9ab72e5e61474d439f1e
|
[
"MIT"
] | null | null | null | 35.450021 | 193 | 0.353795 |
[
[
[
"import os\nimport sys\n\nmodule_path = os.path.abspath(os.path.join('../src'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n \nfrom prefix_span import PrefixSpan\nfrom js_distance import JS\nfrom sequence_generator import SequenceGenerator",
"_____no_output_____"
]
],
[
[
"# Descriptive Database\nThe tabel `data` contains the following content: \n\n| column | content explaination |\n|:----------------: | :----------------------------------------------------------: |\n| item_id | edited item page ID |\n| item_name | respective item page name |\n| label | English label of the item page |\n| category | classified content category based on label and description |\n| user_id | editor ID |\n| user_name | editer name |\n| user_group | editor's user group and their corresponding user rights |\n| user_editcount | rough number of edits and edit-like actions the user has performed |\n| user_registration | editor registration timestamp |\n| rev_id | revision(edit) ID | \n| rev_timestamp | revision timestamp |\n| comment | original comment information for this edit |\n| edit_summary | comment information simplified with regular expression |\n| edit_type | schematized and classified edit summary for ease of use |\n| paraphrase | paraphrase of edit summary according to Wikibase API |\n| prediction | quality prediction of this revision ID, chosen as the one with the biggest probability |\n|itemquality_A, itemquality_B, itemquality_C, itemquality_D, itemquality_E | concrete quality level probability distribution of this revision |\n| js_distance | Jensen-Shannon divergence value based on given quality distribution |",
"_____no_output_____"
],
[
"# Sequence Analysis\n## Generate Sequence Database\n\nAn event is a list of continuous activities contributed by the same editor. (list of strings)\n\nA sequence is a list of events occurred on the same article. (list)\n\nA sequence database is a list of sequences. (list)\n\nThus, a sequence database is a list of lists of lists of strings.\n\nA sequence database ready to be mined is determined by setting up the js-distance constraint.",
"_____no_output_____"
]
],
[
[
"seq = SequenceGenerator(csvfile='../db/data.csv', jsThreshold=0.8)\nseq_db = seq.generate_sequence()\nfor sequence in seq_db:\n print(sequence)",
"[['set reference', 'remove reference']]\n[['set item', 'set item', 'set item', 'set item', 'set item'], ['revert edits', 'revert edits', 'revert edits', 'revert edits', 'revert edits']]\n[['set reference']]\n[['set reference']]\n[['revert edits'], ['add reference']]\n[['add reference']]\n[['set claim', 'set reference']]\n[['set reference']]\n[['add reference']]\n[['set reference']]\n[['set description']]\n[['set label'], ['set claim']]\n[['set reference'], ['set reference'], ['remove claim']]\n[['set reference']]\n[['set reference']]\n[['set reference']]\n[['revert edits']]\n[['set reference']]\n[['set reference']]\n[['update item']]\n[['set reference']]\n[['set reference', 'revert edits'], ['revert edits'], ['revert edits'], ['add reference', 'add reference'], ['revert edits'], ['revert edits'], ['revert edits'], ['remove claim']]\n[['add reference']]\n[['set reference']]\n[['set reference']]\n[['add reference']]\n[['set reference'], ['update item']]\n[['add reference'], ['revert edits']]\n[['set claim']]\n[['set reference']]\n[['add description']]\n[['set description']]\n[['set reference']]\n[['revert edits'], ['add description']]\n[['add sitelink'], ['set claim', 'add reference']]\n[['set label']]\n[['set reference']]\n[['add reference'], ['set term']]\n[['set reference']]\n[['set reference'], ['set claim']]\n[['add description']]\n[['set reference']]\n[['add description']]\n[['set reference']]\n[['remove claim'], ['set claim'], ['set claim'], ['set label'], ['remove sitelink']]\n[['set reference']]\n[['set claim']]\n[['set sitelink']]\n[['add description']]\n[['add reference']]\n[['set reference']]\n[['set claim']]\n[['set reference']]\n[['set reference'], ['set description']]\n[['set claim']]\n[['set claim']]\n[['set reference']]\n[['set reference']]\n[['set reference']]\n[['set claim']]\n[['set reference']]\n[['set claim']]\n[['set claim']]\n[['set reference'], ['set claim']]\n[['revert edits', 'merge item'], ['merge item', 'merge item']]\n[['add reference']]\n[['set reference']]\n[['set reference'], ['set claim']]\n[['add reference'], ['set claim']]\n[['set reference']]\n[['set reference']]\n[['set description'], ['set reference']]\n[['set reference']]\n[['set reference']]\n[['set reference']]\n[['set reference']]\n[['add reference']]\n[['set reference']]\n[['set description']]\n[['add reference']]\n[['set reference']]\n[['set reference']]\n[['set claim']]\n[['set reference']]\n[['set sitelink']]\n[['set claim']]\n[['set reference']]\n[['set claim']]\n[['set reference']]\n[['add description'], ['set reference']]\n[['set claim']]\n[['set reference']]\n[['set claim']]\n[['add reference']]\n[['set reference']]\n[['set claim']]\n[['set claim']]\n[['set label']]\n[['set label'], ['set label']]\n[['set claim']]\n[['add reference'], ['remove claim', 'set claim']]\n[['set reference']]\n[['add reference']]\n[['set description'], ['set reference']]\n[['set claim', 'set sitelink']]\n[['set claim', 'add reference']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set reference'], ['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim'], ['set sitelink']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim', 'add reference']]\n[['set claim']]\n[['set claim']]\n[['set description']]\n[['add reference']]\n[['update item']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['set claim']]\n[['update item'], ['update item']]\n[['update item']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['set claim']]\n[['set claim']]\n[['add description'], ['set claim']]\n[['set claim']]\n[['set reference']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['update item']]\n[['update item']]\n[['set claim']]\n[['set claim']]\n[['update item'], ['update item']]\n[['update item'], ['update item']]\n[['set claim']]\n[['add description']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['add reference'], ['set claim']]\n[['set claim']]\n[['add description'], ['set claim']]\n[['set claim'], ['remove claim', 'set claim']]\n[['set claim'], ['add reference']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim', 'remove claim', 'set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set label']]\n[['set claim']]\n[['set claim'], ['update item'], ['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['update claim']]\n[['set description']]\n[['add label']]\n[['add description']]\n[['set claim']]\n[['set claim', 'add reference']]\n[['add description']]\n[['set claim']]\n[['set claim']]\n[['set claim', 'add reference']]\n[['set claim'], ['add reference']]\n[['set claim']]\n[['add description']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['add description']]\n[['remove description']]\n[['set claim']]\n[['set claim']]\n[['set claim']]\n[['add description'], ['add description']]\n[['add description']]\n[['add description']]\n[['add description'], ['add description']]\n[['add description'], ['add description']]\n[['add description']]\n[['add description']]\n[['add description']]\n[['add description']]\n[['add description']]\n[['set claim']]\n[['add alias']]\n[['set claim']]\n[['set claim']]\n[['set reference']]\n[['update item']]\n[['add description'], ['add label']]\n[['set description']]\n[['merge item'], ['set claim']]\n[['update item'], ['merge item']]\n[['merge item']]\n[['merge item'], ['set claim']]\n[['merge item'], ['add description'], ['add description']]\n[['merge item'], ['update item']]\n[['merge item'], ['update item']]\n"
]
],
[
[
"## Mine Sequential Patterns\n\nThe sequential patterns within the sequence database are discovered with PrefixSpan algorithm by setting up the minimum support threshold.",
"_____no_output_____"
]
],
[
[
"prex = PrefixSpan()\nresult = prex.prefix_span(dataset=seq_db, minSupport=0.1)\ndf = prex.display(result)\nprint(df)",
" 0 1\n2 [[set claim]] 99\n3 [[set reference]] 56\n0 [[add description]] 25\n1 [[add reference]] 25\n4 [[update item]] 25\n"
]
],
[
[
"## Representative Patterns \n\nFollowing metrics are used for mining patterns from different perspectives, this can be archieved by adjusting the jsThreshold and minSupport constraints:\n\n* high quality + high frequency \n* high quality + middle frequency\n* high quality + low frequency \n____________________________________\n* middle quality + high frequency\n* middle quality + middle frequency\n* middle quality + low frequency\n____________________________________\n\n* low quality + high frequency\n* low quality + middle frequency\n* low quality + low frequency\n____________________________________\n\n* no quality constraint + high frequency\n* no quality constraint + middle frequency\n* no quality constraint + low frequency \n____________________________________\n\n\n| Grading | Range |\n| :-------------| :-------------|\n| Q_high | \\[0.7, 1) |\n| Q_middle | \\[0.3, 0.7) |\n| Q_low | (0, 0.3) |\n| F_high | \\[0.2, 1) |\n| F_middle | \\[0.05, 0.2) |\n| F_low | (0, 0.05) |",
"_____no_output_____"
],
[
"### High Quality Constraint ",
"_____no_output_____"
]
],
[
[
"seq_high = SequenceGenerator(csvfile='../db/data.csv', jsThreshold=0.75)\ndb = seq_high.generate_sequence()\nhighF = prex.prefix_span(dataset=db, minSupport=0.25)\nmidF = prex.prefix_span(dataset=db, minSupport=0.1)\nlowF = prex.prefix_span(dataset=db, minSupport=0.01)\nprex.display(highF)",
"_____no_output_____"
],
[
"prex.display(midF)",
"_____no_output_____"
],
[
"prex.display(lowF)",
"_____no_output_____"
]
],
[
[
"### Middle Quality Constraint",
"_____no_output_____"
]
],
[
[
"seq_mid = SequenceGenerator(csvfile='../db/data.csv', jsThreshold=0.35)\ndb = seq_mid.generate_sequence()\nhighF = prex.prefix_span(dataset=db, minSupport=0.25)\nmidF = prex.prefix_span(dataset=db, minSupport=0.1)\nlowF = prex.prefix_span(dataset=db, minSupport=0.01)\nprex.display(highF)",
"_____no_output_____"
],
[
"prex.display(midF)",
"_____no_output_____"
],
[
"prex.display(lowF)",
"_____no_output_____"
]
],
[
[
"### Low Quality Constraint",
"_____no_output_____"
]
],
[
[
"seq_low = SequenceGenerator(csvfile='../db/data.csv', jsThreshold=0.05)\ndb = seq_low.generate_sequence()\nhighF = prex.prefix_span(dataset=db, minSupport=0.25)\nmidF = prex.prefix_span(dataset=db, minSupport=0.1)\nlowF = prex.prefix_span(dataset=db, minSupport=0.01)\nprex.display(highF)",
"_____no_output_____"
],
[
"prex.display(midF)",
"_____no_output_____"
],
[
"prex.display(lowF)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a9d2491baff134d448891e6c60b8bee594ae891
| 8,718 |
ipynb
|
Jupyter Notebook
|
Ariticial_Platoon/Artificial Platoon.ipynb
|
MatthewNice/I24_processed_data
|
91f5208fef6d7df3804b6f0cad7021b2c683aa35
|
[
"MIT"
] | 1 |
2021-06-10T14:16:37.000Z
|
2021-06-10T14:16:37.000Z
|
Ariticial_Platoon/Artificial Platoon.ipynb
|
MatthewNice/I24_processed_data
|
91f5208fef6d7df3804b6f0cad7021b2c683aa35
|
[
"MIT"
] | null | null | null |
Ariticial_Platoon/Artificial Platoon.ipynb
|
MatthewNice/I24_processed_data
|
91f5208fef6d7df3804b6f0cad7021b2c683aa35
|
[
"MIT"
] | null | null | null | 27.501577 | 90 | 0.372448 |
[
[
[
"import pandas as pd\nGPSdata = pd.read_csv(\"2021-03-09-13-35-04_2T3MWRFVXLW056972_GPS_Messages.csv\")\nCANdata = pd.read_csv(\"2021-03-09-13-35-04_2T3MWRFVXLW056972_CAN_Messages.csv\")",
"_____no_output_____"
],
[
"GPSdata.head()",
"_____no_output_____"
],
[
"CANdata.head()",
"_____no_output_____"
],
[
"n = 15\nm = 40\n\nGPSlist = [GPSdata]\nCANlist = [CANdata]",
"_____no_output_____"
],
[
"for x in range (1,n):\n tempCAN = CANlist[x-1]\n tempGPS = GPSlist[x-1]\n tempCAN['Time'] += m\n tempGPS['Systime'] += m\n CANlist.append(tempCAN)\n GPSlist.append(tempGPS)",
"_____no_output_____"
],
[
"for x in range (n):\n CANlist[x].to_csv('Platoon/platoonCAN'+ str(x) + '.csv')\n GPSlist[x].to_csv('Platoon/platoonGPS'+ str(x) + '.csv')",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a9d3a819bbf2839f96473d8903f86edfd2952c9
| 518,391 |
ipynb
|
Jupyter Notebook
|
content/labs/lab04/notebook/cs109b_lab04_bayes.ipynb
|
simonwarchol/2020-CS109B
|
e3ab6307ca7701beee44c5436deb68010b5a2bb6
|
[
"MIT"
] | null | null | null |
content/labs/lab04/notebook/cs109b_lab04_bayes.ipynb
|
simonwarchol/2020-CS109B
|
e3ab6307ca7701beee44c5436deb68010b5a2bb6
|
[
"MIT"
] | null | null | null |
content/labs/lab04/notebook/cs109b_lab04_bayes.ipynb
|
simonwarchol/2020-CS109B
|
e3ab6307ca7701beee44c5436deb68010b5a2bb6
|
[
"MIT"
] | null | null | null | 385.134473 | 219,076 | 0.927811 |
[
[
[
"# <img style=\"float: left; padding-right: 10px; width: 45px\" src=\"https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png\"> CS109B Data Science 2: Advanced Topics in Data Science \n\n## Lab 4 - Bayesian Analysis\n\n**Harvard University**<br>\n**Spring 2020**<br>\n**Instructors:** Mark Glickman, Pavlos Protopapas, and Chris Tanner<br>\n**Lab Instructors:** Chris Tanner and Eleni Angelaki Kaxiras<br>\n**Content:** Eleni Angelaki Kaxiras\n\n---",
"_____no_output_____"
]
],
[
[
"## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES\nimport requests\nfrom IPython.core.display import HTML\nstyles = requests.get(\"https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css\").text\nHTML(styles)",
"_____no_output_____"
],
[
"import pymc3 as pm\nfrom pymc3 import summary",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport pandas as pd\n%matplotlib inline \n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"print('Running on PyMC3 v{}'.format(pm.__version__))",
"Running on PyMC3 v3.8\n"
],
[
"%%javascript\nIPython.OutputArea.auto_scroll_threshold = 20000;",
"_____no_output_____"
]
],
[
[
"<a id=top></a>\n\n## Learning Objectives\n\nBy the end of this lab, you should be able to:\n* Understand how probability distributions work.\n* Apply Bayes Rule in calculating probabilities.\n* Understand how to apply Bayesian analysis using PyMC3\n* Avoid getting fired when talking to your Bayesian employer.\n\n**This lab corresponds to Lectures 6, 7, and 8, and maps to Homework 3.**",
"_____no_output_____"
],
[
"## Table of Contents\n\n1. The Bayesian Way of Thinking or Is this a Fair Coin?\n2. [Intro to `pyMC3`](#pymc3). \n3. [Bayesian Linear Regression](#blr).\n4. [Try this at Home: Example on Mining Disasters](#no4).",
"_____no_output_____"
],
[
"## 1. The Bayesian way of Thinking\n\n```\nHere is my state of knowledge about the situation. Here is some data, I am now going to revise my state of knowledge.\n```",
"_____no_output_____"
],
[
"<div class=\"exercise\" style=\"background-color:#b3e6ff\"><b>Table Exercise</b>: Discuss the statement above with your table mates and make sure everyone understands what it means and what constitutes Bayesian way of thinking. Finally, count the Bayesians among you. </div>",
"_____no_output_____"
],
[
"### A. Bayes Rule\n\n\\begin{equation}\n\\label{eq:bayes} \nP(A|\\textbf{B}) = \\frac{P(\\textbf{B} |A) P(A) }{P(\\textbf{B})} \n\\end{equation}\n\n$P(A|\\textbf{B})$ is the **posterior** distribution, prob(hypothesis | data) \n\n$P(\\textbf{B} |A)$ is the **likelihood** function, how probable is my data **B** for different values of the parameters\n\n$P(A)$ is the marginal probability to observe the data, called the **prior**, this captures our belief about the data before observing it.\n\n$P(\\textbf{B})$ is the marginal distribution (sometimes called marginal likelihood)",
"_____no_output_____"
],
[
"<BR>\n<div class=\"exercise\" style=\"background-color:#b3e6ff\"><b>Table Exercise</b>: Solve the Monty Hall Paradox using Bayes Rule.</div> \n\n\n\nYou are invited to play a game. There are 3 doors behind **one** of which are the keys to a brand new red Tesla. There is a goat behind each of the other two. \n\nYou are asked to pick one door, and let's say you pick **Door1**. The host knows where the keys are. Of the two remaining closed doors, he will always open the door that has a goat behind it. He'll say \"I will do you a favor and open **Door2**\". So he opens Door2 inside which there is, of course, a goat. He now asks you, do you want to open the initial Door you chose or change to **Door3**? Generally, in this game, when you are presented with this choice should you swap the doors?\n\n**Initial Steps:**\n- Start by defining the `events` of this probabilities game. One definition is:\n \n - $A_i$: car is behind door $i$ \n \n - $B_i$ host opens door $i$\n \n$i\\in[1,2,3]$\n \n- In more math terms, the question is: is the probability that the price is behind **Door 1** higher than the probability that the price is behind **Door2**, given that an event **has occured**?",
"_____no_output_____"
],
[
"### B. Bayes Rule written with Probability Distributions\n\nWe have data that we believe come from an underlying distribution of unknown parameters. If we find those parameters, we know everything about the process that generated this data and we can make inferences (create new data).\n\n\\begin{equation}\n\\label{eq:bayes} \nP(\\theta|\\textbf{D}) = \\frac{P(\\textbf{D} |\\theta) P(\\theta) }{P(\\textbf{D})} \n\\end{equation}",
"_____no_output_____"
],
[
"#### But what is $\\theta \\;$?\n\n$\\theta$ is an unknown yet fixed set of parameters. In Bayesian inference we express our belief about what $\\theta$ might be and instead of trying to guess $\\theta$ exactly, we look for its **probability distribution**. What that means is that we are looking for the **parameters** of that distribution. For example, for a Poisson distribution our $\\theta$ is only $\\lambda$. In a normal distribution, our $\\theta$ is often just $\\mu$ and $\\sigma$.",
"_____no_output_____"
],
[
"### C. A review of Common Probability Distributions\n\n#### Discrete Distributions\n\nThe random variable has a **probability mass function (pmf)** which measures the probability that our random variable will take a specific value $y$, denoted $P(Y=y)$.\n\n- **Bernoulli** (binary outcome, success has probability $\\theta$, $one$ trial):\n$\nP(Y=k) = \\theta^k(1-\\theta)^{1-k}\n$\n<HR>\n- **Binomial** (binary outcome, success has probability $\\theta$, $n$ trials):\n\\begin{equation}\nP(Y=k) = {{n}\\choose{k}} \\cdot \\theta^k(1-\\theta)^{n-k}\n\\end{equation}\n\n*Note*: Binomial(1,$p$) = Bernouli($p$)\n<HR>\n- **Negative Binomial**\n<HR>\n- **Poisson** (counts independent events occurring at a rate)\n\\begin{equation}\nP\\left( Y=y|\\lambda \\right) = \\frac{{e^{ - \\lambda } \\lambda ^y }}{{y!}}\n\\end{equation}\ny = 0,1,2,...\n<HR>\n- **Discrete Uniform** \n<HR>\n- **Categorical, or Multinulli** (random variables can take any of K possible categories, each having its own probability; this is a generalization of the Bernoulli distribution for a discrete variable with more than two possible outcomes, such as the roll of a die)\n<HR>\n- **Dirichlet-multinomial** (a generalization of the beta distribution for many variables)",
"_____no_output_____"
],
[
"#### Continuous Distributions\n\nThe random variable has a **probability density function (pdf)**.\n- **Uniform** (variable equally likely to be near each value in interval $(a,b)$)\n\\begin{equation}\nP(X = x) = \\frac{1}{b - a}\n\\end{equation}\nanywhere within the interval $(a, b)$, and zero elsewhere.\n<HR>\n- **Normal** (a.k.a. Gaussian)\n\\begin{equation}\nX \\sim \\mathcal{N}(\\mu,\\,\\sigma^{2})\n\\end{equation} \n\n A Normal distribution can be parameterized either in terms of precision $\\tau$ or standard deviation ($\\sigma^{2}$. The link between the two is given by\n\\begin{equation}\n\\tau = \\frac{1}{\\sigma^{2}}\n\\end{equation}\n - Mean $\\mu$\n - Variance $\\frac{1}{\\tau}$ or $\\sigma^{2}$\n - Parameters: `mu: float`, `sigma: float` or `tau: float`\n<HR>\n- **Beta** (variable ($\\theta$) taking on values in the interval $[0,1]$, and parametrized by two positive parameters, $\\alpha$ and $\\beta$ that control the shape of the distribution. \n \n*Note:*Beta is a good distribution to use for priors (beliefs) because its range is $[0,1]$ which is the natural range for a probability and because we can model a wide range of functions by changing the $\\alpha$ and $\\beta$ parameters.\n\n\\begin{equation}\n\\label{eq:beta} \nP(\\theta) = \\frac{1}{B(\\alpha, \\beta)} {\\theta}^{\\alpha - 1} (1 - \\theta)^{\\beta - 1} \\propto {\\theta}^{\\alpha - 1} (1 - \\theta)^{\\beta - 1}\n\\end{equation}\n\n\nwhere the normalisation constant, $B$, is a beta function of $\\alpha$ and $\\beta$,\n\n\n\\begin{equation}\nB(\\alpha, \\beta) = \\int_{t=0}^1 t^{\\alpha - 1} (1 - t)^{\\beta - 1} dt.\n\\end{equation}\n<HR>\n- **Exponential**\n<HR>\n- **Gamma**\n\n",
"_____no_output_____"
],
[
" #### Code Resources:\n - Statistical Distributions in numpy/scipy: [scipy.stats](https://docs.scipy.org/doc/scipy/reference/stats.html)\n - Statistical Distributions in pyMC3: [distributions in PyMC3](https://docs.pymc.io/api/distributions.html) (we will see those below).",
"_____no_output_____"
],
[
"<div class=\"discussion\"><b>Exercise: Plot a Discrete variable</b></div>\n\nChange the value of $\\mu$ in the Poisson PMF and see how the plot changes. Remember that the y-axis in a discrete probability distribution shows the probability of the random variable having a specific value in the x-axis.\n\n\\begin{equation}\nP\\left( X=k \\right) = \\frac{{e^{ - \\mu } \\mu ^k }}{{k!}}\n\\end{equation}\n\n**stats.poisson.pmf(x, mu)** $\\mu$(mu) is our $\\theta$ in this case.",
"_____no_output_____"
]
],
[
[
"plt.style.use('seaborn-darkgrid')\nx = np.arange(0, 30)\nfor m in [0.5, 3, 8]:\n pmf = stats.poisson.pmf(x, m)\n plt.plot(x, pmf, 'o', alpha=0.5, label='$\\mu$ = {}'.format(m))\nplt.xlabel('random variable', fontsize=12)\nplt.ylabel('probability', fontsize=12)\nplt.legend(loc=1)\nplt.ylim=(-0.1)\nplt.show()",
"_____no_output_____"
],
[
"# same for binomial\nplt.style.use('seaborn-darkgrid')\nx = np.arange(0, 22)\nns = [10, 17]\nps = [0.5, 0.7]\nfor n, p in zip(ns, ps):\n pmf = stats.binom.pmf(x, n, p)\n plt.plot(x, pmf, 'o', alpha=0.5, label='n = {}, p = {}'.format(n, p))\nplt.xlabel('x', fontsize=14)\nplt.ylabel('f(x)', fontsize=14)\nplt.legend(loc=1)\nplt.show()",
"_____no_output_____"
],
[
"# discrete uniform\nplt.style.use('seaborn-darkgrid')\nls = [0]\nus = [3] # watch out, this number can only be integer!\nfor l, u in zip(ls, us):\n x = np.arange(l, u+1)\n pmf = [1.0 / (u - l + 1)] * len(x)\n plt.plot(x, pmf, '-o', label='lower = {}, upper = {}'.format(l, u))\nplt.xlabel('x', fontsize=12)\nplt.ylabel('probability P(x)', fontsize=12)\nplt.legend(loc=1)\nplt.show()",
"_____no_output_____"
]
],
[
[
"<div class=\"discussion\"><b>Exercise: Plot a continuous variable<br></div>\n\nChange the value of $\\mu$ in the Uniform PDF and see how the plot changes.\n \nRemember that the y-axis in a continuous probability distribution does not shows the actual probability of the random variable having a specific value in the x-axis because that probability is zero!. Instead, to see the probability that the variable is within a small margin we look at the integral below the curve of the PDF.\n\nThe uniform is often used as a noninformative prior.",
"_____no_output_____"
],
[
"```\nUniform - numpy.random.uniform(a=0.0, b=1.0, size)\n```\n\n$\\alpha$ and $\\beta$ are our parameters. `size` is how many tries to perform.\nOur $\\theta$ is basically the combination of the parameters a,b. We can also call it \n\\begin{equation}\n\\mu = (a+b)/2\n\\end{equation}",
"_____no_output_____"
]
],
[
[
"from scipy.stats import uniform\n\nr = uniform.rvs(size=1000)\nplt.plot(r, uniform.pdf(r),'r-', lw=5, alpha=0.6, label='uniform pdf')\nplt.hist(r, density=True, histtype='stepfilled', alpha=0.2)\nplt.ylabel(r'probability density')\nplt.xlabel(f'random variable')\nplt.legend(loc='best', frameon=False)\nplt.show()",
"_____no_output_____"
],
[
"from scipy.stats import beta\n\nalphas = [0.5, 1.5, 3.0]\nbetas = [0.5, 1.5, 3.0]\nx = np.linspace(0, 1, 1000) \ncolors = ['red', 'green', 'blue']\n\nfig, ax = plt.subplots(figsize=(8, 5))\n\nfor a, b, colors in zip(alphas, betas, colors):\n dist = beta(a, b)\n plt.plot(x, dist.pdf(x), c=colors,\n label=f'a={a}, b={b}')\n\nax.set_ylim(0, 3)\n\nax.set_xlabel(r'$\\theta$')\nax.set_ylabel(r'$p(\\theta|\\alpha,\\beta)$')\nax.set_title('Beta Distribution')\n\nax.legend(loc='best')\nfig.show();",
"_____no_output_____"
],
[
"plt.style.use('seaborn-darkgrid')\nx = np.linspace(-5, 5, 1000)\nmus = [0., 0., 0., -2.]\nsigmas = [0.4, 1., 2., 0.4]\nfor mu, sigma in zip(mus, sigmas):\n pdf = stats.norm.pdf(x, mu, sigma)\n plt.plot(x, pdf, label=r'$\\mu$ = '+ f'{mu},' + r'$\\sigma$ = ' + f'{sigma}') \nplt.xlabel('random variable', fontsize=12)\nplt.ylabel('probability density', fontsize=12)\nplt.legend(loc=1)\nplt.show()",
"_____no_output_____"
],
[
"plt.style.use('seaborn-darkgrid')\nx = np.linspace(-5, 5, 1000)\nmus = [0., 0., 0., -2.] # mean\nsigmas = [0.4, 1., 2., 0.4] # std\nfor mu, sigma in zip(mus, sigmas):\n plt.plot(x, uniform.pdf(x, mu, sigma), lw=5, alpha=0.4, \\\n label=r'$\\mu$ = '+ f'{mu},' + r'$\\sigma$ = ' + f'{sigma}')\nplt.xlabel('random variable', fontsize=12)\nplt.ylabel('probability density', fontsize=12)\nplt.legend(loc=1)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### D. Is this a Fair Coin?\n\nWe do not want to promote gambling but let's say you visit the casino in **Monte Carlo**. You want to test your theory that casinos are dubious places where coins have been manipulated to have a larger probability for tails. So you will try to estimate how fair a coin is based on 100 flips. <BR>\nYou begin by flipping the coin. You get either Heads ($H$) or Tails ($T$) as our observed data and want to see if your posterior probabilities change as you obtain more data, that is, more coin flips. A nice way to visualize this is to plot the posterior probabilities as we observe more flips (data). ",
"_____no_output_____"
],
[
"We will be using Bayes rule. $\\textbf{D}$ is our data.\n\n\\begin{equation}\n\\label{eq:bayes} \nP(\\theta|\\textbf{D}) = \\frac{P(\\textbf{D} |\\theta) P(\\theta) }{P(\\textbf{D})} \n\\end{equation}",
"_____no_output_____"
],
[
"In the case of a coin toss when we observe $k$ heads in $n$ tosses:\n\\begin{equation}\n\\label{eq:bayes} \nP(\\theta|\\textbf{k}) = Beta(\\alpha + \\textbf{k}, \\beta + n - \\textbf{k}) \n\\end{equation}\n\nwe can say that $\\alpha$ and $\\beta$ play the roles of a \"prior number of heads\" and \"prior number of tails\".",
"_____no_output_____"
]
],
[
[
"# play with the priors - here we manually set them but we could be sampling from a separate Beta\ntrials = np.array([0, 1, 3, 5, 10, 15, 20, 100, 200, 300])\nheads = np.array([0, 1, 2, 4, 8, 10, 10, 50, 180, 150])\nx = np.linspace(0, 1, 100)\n\n# for simplicity we set a,b=1\n\nplt.figure(figsize=(10,8))\nfor k, N in enumerate(trials):\n sx = plt.subplot(len(trials)/2, 2, k+1)\n posterior = stats.beta.pdf(x, 1 + heads[k], 1 + trials[k] - heads[k]) \n plt.plot(x, posterior, alpha = 0.5, label=f'{trials[k]} tosses\\n {heads[k]} heads');\n plt.fill_between(x, 0, posterior, color=\"#348ABD\", alpha=0.4) \n plt.legend(loc='upper left', fontsize=10)\n plt.legend()\n plt.autoscale(tight=True)\n \nplt.suptitle(\"Posterior probabilities for coin flips\", fontsize=15);\nplt.tight_layout()\nplt.subplots_adjust(top=0.88)",
"_____no_output_____"
]
],
[
[
"<a id=pymc3></a> [Top](#top)\n\n## 2. Introduction to `pyMC3`\n \nPyMC3 is a Python library for programming Bayesian analysis, and more specifically, data creation, model definition, model fitting, and posterior analysis. It uses the concept of a `model` which contains assigned parametric statistical distributions to unknown quantities in the model. Within models we define random variables and their distributions. A distribution requires at least a `name` argument, and other `parameters` that define it. You may also use the `logp()` method in the model to build the model log-likelihood function. We define and fit the model.\n\nPyMC3 includes a comprehensive set of pre-defined statistical distributions that can be used as model building blocks. Although they are not meant to be used outside of a `model`, you can invoke them by using the prefix `pm`, as in `pm.Normal`. \n\n#### Markov Chain Monte Carlo (MCMC) Simulations\n\nPyMC3 uses the **No-U-Turn Sampler (NUTS)** and the **Random Walk Metropolis**, two Markov chain Monte Carlo (MCMC) algorithms for sampling in posterior space. Monte Carlo gets into the name because when we sample in posterior space, we choose our next move via a pseudo-random process. NUTS is a sophisticated algorithm that can handle a large number of unknown (albeit continuous) variables.",
"_____no_output_____"
]
],
[
[
"with pm.Model() as model:\n z = pm.Normal('z', mu=0., sigma=5.) \n x = pm.Normal('x', mu=z, sigma=1., observed=5.) \nprint(x.logp({'z': 2.5})) \nprint(z.random(10, 100)[:10]) ",
"-4.043938533204672\n[ 2.49487126 -0.51068451 -3.65709456 0.43477628 10.22323769 1.22927069\n 0.01286762 -4.87182774 -2.60094774 -0.72939632]\n"
]
],
[
[
"**References**:\n\n- *Salvatier J, Wiecki TV, Fonnesbeck C. 2016. Probabilistic programming in Python using PyMC3. PeerJ Computer Science 2:e55* [(https://doi.org/10.7717/peerj-cs.55)](https://doi.org/10.7717/peerj-cs.55)\n- [Distributions in PyMC3](https://docs.pymc.io/api/distributions.html)\n- [More Details on Distributions](https://docs.pymc.io/developer_guide.html)\n\nInformation about PyMC3 functions including descriptions of distributions, sampling methods, and other functions, is available via the `help` command.",
"_____no_output_____"
]
],
[
[
"#help(pm.Poisson)",
"_____no_output_____"
]
],
[
[
"<a id=blr></a> [Top](#top)\n\n## 3. Bayesian Linear Regression",
"_____no_output_____"
],
[
"Let's say we want to predict outcomes Y as normally distributed observations with an expected value $mu$ that is a linear function of two predictor variables, $\\bf{x}_1$ and $\\bf{x}_2$.\n\n\\begin{equation}\n\\mu = \\alpha + \\beta_1 \\bf{x}_1 + \\beta_2 x_2 \n\\end{equation}\n\n\\begin{equation}\nY \\sim \\mathcal{N}(\\mu,\\,\\sigma^{2})\n\\end{equation} \n\nwhere $\\sigma^2$ represents the measurement error. \n\nIn this example, we will use $\\sigma^2 = 10$\n\nWe also choose the parameters as normal distributions:\n\n\\begin{eqnarray}\n\\alpha \\sim \\mathcal{N}(0,\\,10) \\\\\n\\beta_i \\sim \\mathcal{N}(0,\\,10) \\\\\n\\sigma^2 \\sim |\\mathcal{N}(0,\\,10)|\n\\end{eqnarray} \n\nWe will artificially create the data to predict on. We will then see if our model predicts them correctly.",
"_____no_output_____"
]
],
[
[
"# Initialize random number generator\nnp.random.seed(123)\n\n# True parameter values\nalpha, sigma = 1, 1\nbeta = [1, 2.5]\n\n# Size of dataset\nsize = 100\n\n# Predictor variable\nX1 = np.linspace(0, 1, size)\nX2 = np.linspace(0,.2, size)\n\n# Simulate outcome variable\nY = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma\n\nfig, ax = plt.subplots(1,2, figsize=(10,6), sharex=True)\nax[0].scatter(X1,Y)\nax[1].scatter(X2,Y)\nax[0].set_xlabel(r'$x_1$', fontsize=14) \nax[0].set_ylabel(r'$Y$', fontsize=14)\nax[1].set_xlabel(r'$x_2$', fontsize=14) \nax[1].set_ylabel(r'$Y$', fontsize=14)",
"_____no_output_____"
],
[
"from pymc3 import Model, Normal, HalfNormal\n\nbasic_model = Model()\n\nwith basic_model:\n\n # Priors for unknown model parameters, specifically create stochastic random variables \n # with Normal prior distributions for the regression coefficients,\n # and a half-normal distribution for the standard deviation of the observations, σ.\n alpha = Normal('alpha', mu=0, sd=10)\n beta = Normal('beta', mu=0, sd=10, shape=2)\n sigma = HalfNormal('sigma', sd=1)\n\n # Expected value of outcome - posterior\n mu = alpha + beta[0]*X1 + beta[1]*X2\n\n # Likelihood (sampling distribution) of observations\n Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)",
"_____no_output_____"
],
[
"# model fitting with sampling\nfrom pymc3 import NUTS, sample, find_MAP\nfrom scipy import optimize\n\nwith basic_model:\n\n # obtain starting values via MAP\n start = find_MAP(fmin=optimize.fmin_powell)\n\n # instantiate sampler\n step = NUTS(scaling=start)\n\n # draw 2000 posterior samples\n trace = sample(2000, step, start=start)",
"logp = -164.5: 5%|▌ | 270/5000 [00:00<00:01, 4530.74it/s] "
],
[
"from pymc3 import traceplot\n\ntraceplot(trace);",
"_____no_output_____"
],
[
"results = pm.summary(trace, \n var_names=['alpha', 'beta', 'sigma'])\nresults",
"_____no_output_____"
]
],
[
[
"This linear regression example is from the original paper on PyMC3: *Salvatier J, Wiecki TV, Fonnesbeck C. 2016. Probabilistic programming in Python using PyMC3. PeerJ Computer Science 2:e55 https://doi.org/10.7717/peerj-cs.55*",
"_____no_output_____"
],
[
"<a id=no4></a> [Top](#top)\n\n## 4. Try this at Home: Example on Mining Disasters\nWe will go over the classical `mining disasters from 1851 to 1962` dataset. \n\nThis example is from the [pyMC3 Docs](https://docs.pymc.io/notebooks/getting_started.html).",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ndisaster_data = pd.Series([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,\n 3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,\n 2, 2, 3, 4, 2, 1, 3, np.nan, 2, 1, 1, 1, 1, 3, 0, 0,\n 1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,\n 0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,\n 3, 3, 1, np.nan, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\nfontsize = 12\nyears = np.arange(1851, 1962)\nplt.figure(figsize=(10,5))\n#plt.scatter(years, disaster_data); \nplt.bar(years, disaster_data)\nplt.ylabel('Disaster count', size=fontsize)\nplt.xlabel('Year', size=fontsize);\nplt.title('Was there a Turning Point in Mining disasters from 1851 to 1962?', size=15);",
"_____no_output_____"
]
],
[
[
"#### Building the model\n\n**Step1:** We choose the probability model for our experiment. Occurrences of disasters in the time series is thought to follow a **Poisson** process with a large **rate** parameter in the early part of the time series, and from one with a smaller **rate** in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations. \n\n```\ndisasters = pm.Poisson('disasters', rate, observed=disaster_data)\n```\n\nWe have two rates, `early_rate` if $t<=s$, and `late_rate` if $t>s$, where $s$ is the year the switch was made (a.k.a. the `switchpoint`). \n\n**Step2:** Choose a prior distributions of the two rates, what we believe the rates were before we observed the data, and the switchpoint. We choose Exponential.\n```\nearly_rate = pm.Exponential('early_rate', 1)\n```\n\nThe parameters of this model are: \n\n\n**Note:** Watch for missing values. Missing values are handled transparently by passing a MaskedArray or a pandas.DataFrame. Behind the scenes, another random variable, disasters.missing_values is created to model the missing values. If you pass a np.array with missing values you will get an error.",
"_____no_output_____"
]
],
[
[
"with pm.Model() as disaster_model:\n\n # discrete\n switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)\n\n # Priors for pre- and post-switch rates number of disasters\n early_rate = pm.Exponential('early_rate', 1)\n late_rate = pm.Exponential('late_rate', 1)\n\n # our theta - allocate appropriate Poisson rates to years before and after current\n # switch is an `if` statement in puMC3\n rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)\n\n # our observed data as a likelihood function of the `rate` parameters\n # shows how we think our data is distributed\n disasters = pm.Poisson('disasters', rate, observed=disaster_data)",
"_____no_output_____"
]
],
[
[
"#### Model Fitting",
"_____no_output_____"
]
],
[
[
"# there are defaults but we can also more explicitly set the sampling algorithms\nwith disaster_model:\n \n # for continuous variables\n step1 = pm.NUTS([early_rate, late_rate])\n \n # for discrete variables\n step2 = pm.Metropolis([switchpoint, disasters.missing_values[0]] )\n\n trace = pm.sample(10000, step=[step1, step2])\n # try different number of samples\n #trace = pm.sample(5000, step=[step1, step2])",
"Multiprocess sampling (4 chains in 4 jobs)\nCompoundStep\n>NUTS: [late_rate, early_rate]\n>CompoundStep\n>>Metropolis: [disasters_missing]\n>>Metropolis: [switchpoint]\nSampling 4 chains, 0 divergences: 75%|███████▌ | 31500/42000 [00:30<00:04, 2506.23draws/s]"
]
],
[
[
"#### Posterior Analysis",
"_____no_output_____"
],
[
"On the left side plots we notice that our early rate is between 2.5 and 3.5 disasters a year. In the late period it seems to be between 0.6 and 1.2 so definitely lower.\n\nThe right side plots show the samples we drew to come to our conclusion.",
"_____no_output_____"
]
],
[
[
"pm.traceplot(trace, ['early_rate', 'late_rate', 'switchpoint'], figsize=(20,10));",
"_____no_output_____"
],
[
"results = pm.summary(trace, \n var_names=['early_rate', 'late_rate', 'switchpoint'])\nresults",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
4a9d3bdcf7155e84593a89408d837d02efe04a43
| 37,508 |
ipynb
|
Jupyter Notebook
|
Simulador_Interativo_Regressão_Linear.ipynb
|
Daniel-ASG/Aulas_de_cursos
|
9e10ff1373dc3f0351a94f5cd94d96aac20052fc
|
[
"MIT"
] | null | null | null |
Simulador_Interativo_Regressão_Linear.ipynb
|
Daniel-ASG/Aulas_de_cursos
|
9e10ff1373dc3f0351a94f5cd94d96aac20052fc
|
[
"MIT"
] | null | null | null |
Simulador_Interativo_Regressão_Linear.ipynb
|
Daniel-ASG/Aulas_de_cursos
|
9e10ff1373dc3f0351a94f5cd94d96aac20052fc
|
[
"MIT"
] | null | null | null | 34.50598 | 265 | 0.491069 |
[
[
[
"<a href=\"https://colab.research.google.com/github/Daniel-ASG/Aulas_de_cursos/blob/main/Simulador_Interativo_Regress%C3%A3o_Linear.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"<h1 style='color: green; font-size: 36px; font-weight: bold;'>Data Science - Regressão Linear</h1>",
"_____no_output_____"
],
[
"# <font color='red' style='font-size: 30px;'>Bônus</font>\n<hr style='border: 2px solid red;'>",
"_____no_output_____"
],
[
"## Importando nosso modelo",
"_____no_output_____"
]
],
[
[
"import pickle\n\nmodelo = open('/content/drive/MyDrive/Analise_de_dados/modelo_preco_imovel','rb')\nlm_new = pickle.load(modelo)\nmodelo.close()\n\narea = 38\ngaragem = 2\nbanheiros = 4\nlareira = 4\nmarmore = 0\nandares = 1\n\nentrada = [[area, garagem, banheiros, lareira, marmore, andares]]\n\nprint('$ {0:.2f}'.format(lm_new.predict(entrada)[0]))",
"$ 46389.80\n"
]
],
[
[
"## Exemplo de um simulador interativo para Jupyter\n\nhttps://ipywidgets.readthedocs.io/en/stable/index.html\n\nhttps://github.com/jupyter-widgets/ipywidgets",
"_____no_output_____"
]
],
[
[
"# Importando bibliotecas\nfrom ipywidgets import widgets, HBox, VBox\nfrom IPython.display import display\n\n# Criando os controles do formulário\narea = widgets.Text(description=\"Área\")\ngaragem = widgets.Text(description=\"Garagem\")\nbanheiros = widgets.Text(description=\"Banheiros\")\nlareira = widgets.Text(description=\"Lareira\")\nmarmore = widgets.Text(description=\"Mármore?\")\nandares = widgets.Text(description=\"Andares?\")\n\nbotao = widgets.Button(description=\"Simular\")\n\n# Posicionando os controles\nleft = VBox([area, banheiros, marmore])\nright = VBox([garagem, lareira, andares])\ninputs = HBox([left, right])\n\n# Função de simulação\ndef simulador(sender):\n entrada=[[\n float(area.value if area.value else 0), \n float(garagem.value if garagem.value else 0), \n float(banheiros.value if banheiros.value else 0), \n float(lareira.value if lareira.value else 0), \n float(marmore.value if marmore.value else 0), \n float(andares.value if andares.value else 0)\n ]]\n print('$ {0:.2f}'.format(lm_new.predict(entrada)[0]))\n \n# Atribuindo a função \"simulador\" ao evento click do botão\nbotao.on_click(simulador) ",
"_____no_output_____"
],
[
"display(inputs, botao)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a9d501668fc7f3335c241309d63634cd49f349e
| 3,259 |
ipynb
|
Jupyter Notebook
|
notebook/index.ipynb
|
osscar-org/Quantum-Mechanics
|
5c3bfb8d890eb7a80e3ea2eaa33778712f397022
|
[
"MIT"
] | null | null | null |
notebook/index.ipynb
|
osscar-org/Quantum-Mechanics
|
5c3bfb8d890eb7a80e3ea2eaa33778712f397022
|
[
"MIT"
] | null | null | null |
notebook/index.ipynb
|
osscar-org/Quantum-Mechanics
|
5c3bfb8d890eb7a80e3ea2eaa33778712f397022
|
[
"MIT"
] | null | null | null | 36.617978 | 269 | 0.612458 |
[
[
[
"# **Quantum Mechanics and Computational Materials Science**\n\n<hr style=\"height:1px;border:none;color:#cccccc;background-color:#cccccc;\" />",
"_____no_output_____"
],
[
"## Section 1: **Quantum Mechanics**\n\n<ol style=\"font-size:18px\">\n <li><a href=\"./quantum-mechanics/1quantumwell.ipynb\">Numerical Solution of the Schrödinger Equation for 1D Quantum Well</a></li>\n <li><a href=\"./quantum-mechanics/2quantumwells.ipynb\">Numerical Solution of the Schrödinger Equation for the Double Square Well Potential</a></li>\n <li><a href=\"./quantum-mechanics/asymmetricwell.ipynb\">Avoided Crossing in 1D Asymmetric Quantum Well</a></li>\n <li><a href=\"./quantum-mechanics/shooting_method.ipynb\">Shooting Method with Numerov Algorithm to Solve the Time Independent Schrödinger Equation for 1D Quantum Well</a></li>\n <li><a href=\"./quantum-mechanics/soft.ipynb\">Numerical Solution of 1D Time-Dependent Schrödinger Equation by Split Operator Fourier Transform (SOFT) Method</a></li>\n <li><a href=\"./quantum-mechanics/msoft.ipynb\">Numerical Solution of 1D Time-Dependent Schrödinger Equation For Nuclear Evolution On Multiple Electronic Potential Energy Surfaces Via The Multiple Split Operator Fourier Transform (MSOFT) Method.</a></li>\n</ol>",
"_____no_output_____"
],
[
"## Section 2: **Band Theory of Crystals**\n\n<ol style=\"font-size:18px\">\n <li><a href=\"./band-theory/FFT_and_planewaves.ipynb\">Fourier Transforms and Plane-Wave Expansions</a></li>\n <li><a href=\"./band-theory/free_electron.ipynb\">Free Eelectron Bandstructure</a></li>\n <li><a href=\"./band-theory/pseudopotential.ipynb\">Norm-Conserving Pseudopotentials</a></li>\n</ol>",
"_____no_output_____"
],
[
"## Section 3: **Statistical Mechanics**\n\n<ol style=\"font-size:18px\">\n <li><a href=\"./statistical-mechanics/monte_carlo_pi.ipynb\">Monte Carlo Calculations of Pi</a></li>\n <li><a href=\"./statistical-mechanics/monte_carlo_parabolic.ipynb\">Monte Carlo Simulation to Obtain Global Minimum</a></li>\n <li><a href=\"./statistical-mechanics/ising_model.ipynb\">Ising Model in 2D</a></li>\n <li><a href=\"./statistical-mechanics/diffusion_2d.ipynb\">Random Diffusion in 2D</a></li>\n</ol>",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.