markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Делаем симлинк скрытой папки с временными файлами и настройками ботана случай если придется что-то редактировать или вынимать оттуда наживую, иначе ее не будет видно в браузере файлов слева
|
!ln -s ~/.ScreamingFrogSEOSpider ~/ScreamingFrogSEOSpider
|
_____no_output_____
|
MIT
|
Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb
|
danzerzine/seospider-colab
|
Даем команду боту в headless режиме прописываем все нужные флаги для экспорта, настроек, отчетов, выгрузок и так далее
|
#@title Crawl settings { vertical-output: true }
url_start = "" #@param {type:"string"}
use_gcs = "" #@param ["", "--use-google-search-console \"account \""] {allow-input: true}
config_path = "" #@param {type:"string"}
output_folder = "" #@param {type:"string"}
!screamingfrogseospider --crawl "$url_start" $use_gcs --headless --config "$config_path" --output-folder "$output_folder" --timestamped-output --save-crawl --export-tabs "Internal:All,Response Codes:All,Response Codes:Blocked by Robots.txt,Response Codes:Blocked Resource,Response Codes:No Response,Response Codes:Redirection (3xx),Response Codes:Redirection (JavaScript),Response Codes:Redirection (Meta Refresh),Response Codes:Client Error (4xx),Response Codes:Server Error (5xx),Page Titles:All,Page Titles:Missing,Page Titles:Duplicate,Page Titles:Over X Characters,Page Titles:Below X Characters,Page Titles:Over X Pixels,Page Titles:Below X Pixels,Page Titles:Same as H1,Page Titles:Multiple,Meta Description:All,Meta Description:Missing,Meta Description:Duplicate,Meta Description:Over X Characters,Meta Description:Below X Characters,Meta Description:Over X Pixels,Meta Description:Below X Pixels,Meta Description:Multiple,Meta Keywords:All,Meta Keywords:Missing,Meta Keywords:Duplicate,Meta Keywords:Multiple,Canonicals:All,Canonicals:Contains Canonical,Canonicals:Self Referencing,Canonicals:Canonicalised,Canonicals:Missing,Canonicals:Multiple,Canonicals:Non-Indexable Canonical,Directives:All,Directives:Index,Directives:Noindex,Directives:Follow,Directives:Nofollow,Directives:None,Directives:NoArchive,Directives:NoSnippet,Directives:Max-Snippet,Directives:Max-Image-Preview,Directives:Max-Video-Preview,Directives:NoODP,Directives:NoYDIR,Directives:NoImageIndex,Directives:NoTranslate,Directives:Unavailable_After,Directives:Refresh,AMP:All,AMP:Non-200 Response,AMP:Missing Non-AMP Return Link,AMP:Missing Canonical to Non-AMP,AMP:Non-Indexable Canonical,AMP:Indexable,AMP:Non-Indexable,AMP:Missing <html amp> Tag,AMP:Missing/Invalid <!doctype html> Tag,AMP:Missing <head> Tag,AMP:Missing <body> Tag,AMP:Missing Canonical,AMP:Missing/Invalid <meta charset> Tag,AMP:Missing/Invalid <meta viewport> Tag,AMP:Missing/Invalid AMP Script,AMP:Missing/Invalid AMP Boilerplate,AMP:Contains Disallowed HTML,AMP:Other Validation Errors,Structured Data:All,Structured Data:Contains Structured Data,Structured Data:Missing,Structured Data:Validation Errors,Structured Data:Validation Warnings,Structured Data:Parse Errors,Structured Data:Microdata URLs,Structured Data:JSON-LD URLs,Structured Data:RDFa URLs,Sitemaps:All,Sitemaps:URLs in Sitemap,Sitemaps:URLs not in Sitemap,Sitemaps:Orphan URLs,Sitemaps:Non-Indexable URLs in Sitemap,Sitemaps:URLs in Multiple Sitemaps,Sitemaps:XML Sitemap with over 50k URLs,Sitemaps:XML Sitemap over 50MB" --bulk-export "Canonicals:Contains Canonical Inlinks,Canonicals:Self Referencing Inlinks,Canonicals:Canonicalised Inlinks,Canonicals:Missing Inlinks,Canonicals:Multiple Inlinks,Canonicals:Non-Indexable Canonical Inlinks,AMP:All Inlinks,AMP:Non-200 Response Inlinks,AMP:Missing Non-AMP Return Link Inlinks,AMP:Missing Canonical to Non-AMP Inlinks,AMP:Non-Indexable Canonical Inlinks,AMP:Indexable Inlinks,AMP:Non-Indexable Inlinks,Structured Data:Contains Structured Data,Structured Data:Validation Errors,Structured Data:Validation Warnings,Structured Data:JSON-LD URLs,Structured Data:Microdata URLs,Structured Data:RDFa URLs,Sitemaps:URLs in Sitemap Inlinks,Sitemaps:Orphan URLs Inlinks,Sitemaps:Non-Indexable URLs in Sitemap Inlinks,Sitemaps:URLs in Multiple Sitemaps Inlinks" --save-report "Crawl Overview,Redirects:All Redirects,Redirects:Redirect Chains,Redirects:Redirect & Canonical Chains,Canonicals:Canonical Chains,Canonicals:Non-Indexable Canonicals,Pagination:Non-200 Pagination URLs,Pagination:Unlinked Pagination URLs,Hreflang:All hreflang URLs,Hreflang:Non-200 hreflang URLs,Hreflang:Unlinked hreflang URLs,Hreflang:Missing Return Links,Hreflang:Inconsistent Language & Region Return Links,Hreflang:Non Canonical Return Links,Hreflang:Noindex Return Links,Insecure Content,SERP Summary,Orphan Pages,Structured Data:Validation Errors & Warnings Summary,Structured Data:Validation Errors & Warnings,Structured Data:Google Rich Results Features Summary,Structured Data:Google Rich Results Features,HTTP Headers:HTTP Header Summary,Cookies:Cookie Summary" --export-format xlsx --export-custom-summary "Site Crawled,Date,Time,Total URLs Encountered,Total URLs Crawled,Total Internal blocked by robots.txt,Total External blocked by robots.txt,URLs Displayed,Total Internal URLs,Total External URLs,Total Internal Indexable URLs,Total Internal Non-Indexable URLs,JavaScript:All,JavaScript:Uses Old AJAX Crawling Scheme URLs,JavaScript:Uses Old AJAX Crawling Scheme Meta Fragment Tag,JavaScript:Page Title Only in Rendered HTML,JavaScript:Page Title Updated by JavaScript,JavaScript:H1 Only in Rendered HTML,JavaScript:H1 Updated by JavaScript,JavaScript:Meta Description Only in Rendered HTML,JavaScript:Meta Description Updated by JavaScript,JavaScript:Canonical Only in Rendered HTML,JavaScript:Canonical Mismatch,JavaScript:Noindex Only in Original HTML,JavaScript:Nofollow Only in Original HTML,JavaScript:Contains JavaScript Links,JavaScript:Contains JavaScript Content,JavaScript:Pages with Blocked Resources,H1:All,H1:Missing,H1:Duplicate,H1:Over X Characters,H1:Multiple,H2:All,H2:Missing,H2:Duplicate,H2:Over X Characters,H2:Multiple,Internal:All,Internal:HTML,Internal:JavaScript,Internal:CSS,Internal:Images,Internal:PDF,Internal:Flash,Internal:Other,Internal:Unknown,External:All,External:HTML,External:JavaScript,External:CSS,External:Images,External:PDF,External:Flash,External:Other,External:Unknown,AMP:All,AMP:Non-200 Response,AMP:Missing Non-AMP Return Link,AMP:Missing Canonical to Non-AMP,AMP:Non-Indexable Canonical,AMP:Indexable,AMP:Non-Indexable,AMP:Missing <html amp> Tag,AMP:Missing/Invalid <!doctype html> Tag,AMP:Missing <head> Tag,AMP:Missing <body> Tag,AMP:Missing Canonical,AMP:Missing/Invalid <meta charset> Tag,AMP:Missing/Invalid <meta viewport> Tag,AMP:Missing/Invalid AMP Script,AMP:Missing/Invalid AMP Boilerplate,AMP:Contains Disallowed HTML,AMP:Other Validation Errors,Canonicals:All,Canonicals:Contains Canonical,Canonicals:Self Referencing,Canonicals:Canonicalised,Canonicals:Missing,Canonicals:Multiple,Canonicals:Non-Indexable Canonical,Content:All,Content:Spelling Errors,Content:Grammar Errors,Content:Near Duplicates,Content:Exact Duplicates,Content:Low Content Pages,Custom Extraction:All,Custom Search:All,Directives:All,Directives:Index,Directives:Noindex,Directives:Follow,Directives:Nofollow,Directives:None,Directives:NoArchive,Directives:NoSnippet,Directives:Max-Snippet,Directives:Max-Image-Preview,Directives:Max-Video-Preview,Directives:NoODP,Directives:NoYDIR,Directives:NoImageIndex,Directives:NoTranslate,Directives:Unavailable_After,Directives:Refresh,Analytics:All,Analytics:Sessions Above 0,Analytics:Bounce Rate Above 70%,Analytics:No GA Data,Analytics:Non-Indexable with GA Data,Analytics:Orphan URLs,Search Console:All,Search Console:Clicks Above 0,Search Console:No GSC Data,Search Console:Non-Indexable with GSC Data,Search Console:Orphan URLs,Hreflang:All,Hreflang:Contains hreflang,Hreflang:Non-200 hreflang URLs,Hreflang:Unlinked hreflang URLs,Hreflang:Missing Return Links,Hreflang:Inconsistent Language & Region Return Links,Hreflang:Non-Canonical Return Links,Hreflang:Noindex Return Links,Hreflang:Incorrect Language & Region Codes,Hreflang:Multiple Entries,Hreflang:Missing Self Reference,Hreflang:Not Using Canonical,Hreflang:Missing X-Default,Hreflang:Missing,Images:All,Images:Over X KB,Images:Missing Alt Text,Images:Missing Alt Attribute,Images:Alt Text Over X Characters,Link Metrics:All,Meta Description:All,Meta Description:Missing,Meta Description:Duplicate,Meta Description:Over X Characters,Meta Description:Below X Characters,Meta Description:Over X Pixels,Meta Description:Below X Pixels,Meta Description:Multiple,Meta Keywords:All,Meta Keywords:Missing,Meta Keywords:Duplicate,Meta Keywords:Multiple,PageSpeed:All,PageSpeed:Eliminate Render-Blocking Resources,PageSpeed:Defer Offscreen Images,PageSpeed:Efficiently Encode Images,PageSpeed:Properly Size Images,PageSpeed:Minify CSS,PageSpeed:Minify JavaScript,PageSpeed:Reduce Unused CSS,PageSpeed:Reduce Unused JavaScript,PageSpeed:Serve Images in Next-Gen Formats,PageSpeed:Enable Text Compression,PageSpeed:Preconnect to Required Origins,PageSpeed:Reduce Server Response Times (TTFB),PageSpeed:Avoid Multiple Page Redirects,PageSpeed:Preload Key Requests,PageSpeed:Use Video Formats for Animated Content,PageSpeed:Avoid Excessive DOM Size,PageSpeed:Reduce JavaScript Execution Time,PageSpeed:Serve Static Assets with an Efficient Cache Policy,PageSpeed:Minimize Main-Thread Work,PageSpeed:Ensure Text Remains Visible During Webfont Load,PageSpeed:Image Elements Do Not Have Explicit Width & Height,PageSpeed:Avoid Large Layout Shifts,PageSpeed:Avoid Serving Legacy JavaScript to Modern Browsers,PageSpeed:Request Errors,Pagination:All,Pagination:Contains Pagination,Pagination:First Page,Pagination:Paginated 2+ Pages,Pagination:Pagination URL Not in Anchor Tag,Pagination:Non-200 Pagination URLs,Pagination:Unlinked Pagination URLs,Pagination:Non-Indexable,Pagination:Multiple Pagination URLs,Pagination:Pagination Loop,Pagination:Sequence Error,Response Codes:All,Response Codes:Blocked by Robots.txt,Response Codes:Blocked Resource,Response Codes:No Response,Response Codes:Success (2xx),Response Codes:Redirection (3xx),Response Codes:Redirection (JavaScript),Response Codes:Redirection (Meta Refresh),Response Codes:Client Error (4xx),Response Codes:Server Error (5xx),Security:All,Security:HTTP URLs,Security:HTTPS URLs,Security:Mixed Content,Security:Form URL Insecure,Security:Form on HTTP URL,Security:Unsafe Cross-Origin Links,Security:Missing HSTS Header,Security:Bad Content Type,Security:Missing X-Content-Type-Options Header,Security:Missing X-Frame-Options Header,Security:Protocol-Relative Resource Links,Security:Missing Content-Security-Policy Header,Security:Missing Secure Referrer-Policy Header,Sitemaps:All,Sitemaps:URLs in Sitemap,Sitemaps:URLs not in Sitemap,Sitemaps:Orphan URLs,Sitemaps:Non-Indexable URLs in Sitemap,Sitemaps:URLs in Multiple Sitemaps,Sitemaps:XML Sitemap with over 50k URLs,Sitemaps:XML Sitemap over 50MB,Structured Data:All,Structured Data:Contains Structured Data,Structured Data:Missing,Structured Data:Validation Errors,Structured Data:Validation Warnings,Structured Data:Parse Errors,Structured Data:Microdata URLs,Structured Data:JSON-LD URLs,Structured Data:RDFa URLs,Page Titles:All,Page Titles:Missing,Page Titles:Duplicate,Page Titles:Over X Characters,Page Titles:Below X Characters,Page Titles:Over X Pixels,Page Titles:Below X Pixels,Page Titles:Same as H1,Page Titles:Multiple,URL:All,URL:Non ASCII Characters,URL:Underscores,URL:Uppercase,URL:Parameters,URL:Over X Characters,URL:Multiple Slashes,URL:Repetitive Path,URL:Contains Space,URL:Broken Bookmark,URL:Internal Search,Depth 1,Depth 2,Depth 3,Depth 4,Depth 5,Depth 6,Depth 7,Depth 8,Depth 9,Depth 10+,Top Inlinks 1 URL,Top Inlinks 1 Number of Inlinks,Top Inlinks 2 URL,Top Inlinks 2 Number of Inlinks,Top Inlinks 3 URL,Top Inlinks 3 Number of Inlinks,Top Inlinks 4 URL,Top Inlinks 4 Number of Inlinks,Top Inlinks 5 URL,Top Inlinks 5 Number of Inlinks,Top Inlinks 6 URL,Top Inlinks 6 Number of Inlinks,Top Inlinks 7 URL,Top Inlinks 7 Number of Inlinks,Top Inlinks 8 URL,Top Inlinks 8 Number of Inlinks,Top Inlinks 9 URL,Top Inlinks 9 Number of Inlinks,Top Inlinks 10 URL,Top Inlinks 10 Number of Inlinks,Top Inlinks 11 URL,Top Inlinks 11 Number of Inlinks,Top Inlinks 12 URL,Top Inlinks 12 Number of Inlinks,Top Inlinks 13 URL,Top Inlinks 13 Number of Inlinks,Top Inlinks 14 URL,Top Inlinks 14 Number of Inlinks,Top Inlinks 15 URL,Top Inlinks 15 Number of Inlinks,Top Inlinks 16 URL,Top Inlinks 16 Number of Inlinks,Top Inlinks 17 URL,Top Inlinks 17 Number of Inlinks,Top Inlinks 18 URL,Top Inlinks 18 Number of Inlinks,Top Inlinks 19 URL,Top Inlinks 19 Number of Inlinks,Top Inlinks 20 URL,Top Inlinks 20 Number of Inlinks,Response Times 0s to 1s,Response Times 1s to 2s,Response Times 2s to 3s,Response Times 3s to 4s,Response Times 4s to 5s,Response Times 5s to 6s,Response Times 6s to 7s,Response Times 7s to 8s,Response Times 8s to 9s,Response Times 10s or more"
|
_____no_output_____
|
MIT
|
Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb
|
danzerzine/seospider-colab
|
---- 베이즈 정리 - 데이터라는 조건이 주어졌을 때 조건부 확률을 구하는 공식 - $P(A|B) = \frac{P(B|A)P(A)}{P(B)}$ ---- - $P(A|B)$ : 사후확률(posterior). 사건 B가 발생한 후 갱신된 사건 A의 확률 - $P(A)$ : 사전확률 (prior). 사건 B가 발생하기 전에 가지고 있던 사건 A의 확률 - $P(B|A)$ : 가능도(likelihood). 사건 A가 발생한 경우 사건 B의 확률 - $P(B)$ : 정규화상수(normalizing constant) 또는 증거(evidence). 확률의 크기 조정 --- 베이즈 정리 확장1 - $P(A_1|B)$ $= \frac{P(B|A)P(A)}{P(B)}$ $= \frac{P(B|A_1)P(A_1)}{\sum_iP(A_i,B)}$ $= \frac{P(B|A_1)P(A_1)}{\sum_iP(B|A_I)P(A_i)}$ - $P(A_i|B)$ 에서 $i$의 값이 바뀌어도 분자의 값만 비교하면 됨 --- Classification 의 장점과 단점 - 장점 : 첫번째 답이 아닐 때 2,3을 구할 수 있음. - 단점 : Class4개를 풀기 위해서 4개를 구해야함.... --- $A_1 = A , A_2 = A^\complement$ 인 경우 - $P(A|B)$ $ = \frac{P(B|A)P(A)}{P(B)}$ $ = \frac{P(B|A)P(A)}{P(B,A)+P(B,A^\complement}$ $ = \frac{p(B|A)P(A)}{P(B|A)P(A) + P(B|A^\complement)P(A^\complement)}$ $ = \frac{P(B|A)P(A)}{P(B|A)P(A)+P(B|A^\complement)(1-P(A)}$ - 2진 분류 문제 --- 검사 시약 문제 1) 사건 - 병에 걸리는 경우 : D - 양성반응을 보이는 경우 : S - 병에 걸린 사람이 양성 반응을 보이는 경우 : S|D - 양성 반응을 보이는 사람이 병에 걸려있을 경우 : D|S 2) 문제 - $P(S|D) = 0.99$가 주어졌을 때, P(D|S)를 구하라. ---- 베이즈 정리에 의해서 - $P(D|S) = \frac{P(S|D)P(D)}{P(S)}$ -- 현재 $P(S), P(D)$ 를 모르기 때문에 구할 수가 없다. ---- 3) 추가 조사 정보 - 이 병은 전체 인구 중에서 걸린 사람이 0.2%인 희귀병이다. : $P(D) = 0.002$ - 이 병에 걸리지 않은 사람에게 시약검사를 했을 때, 양성반응이 나타날 확률은 5%이다. : $P(S|D^\complement) = 0.05$ --- 베이즈 정리의 확장에 의해서 - $P(D|S)$ $= \frac{P(S|D)P(D)}{P(S)}$ $ = \frac{P(S|D)P(D)}{P(S,D)+P(S,D^\complement)} $ $ = \frac{P(S|D)P(D)}{P(S|D)P(D)+P(S|D^\complement)P(D^\complement)}$ $ = \frac{P(S|D)P(D)}{P(S|D)P(D)+P(S|D^\complement)(1-P(D))}$ $ = \frac{0.99\cdot 0.002}{0.99\cdot 0.002+0.05\cdot (1-0.002)}$ $ = 0.038$
|
round((0.99*0.002) / (0.99*0.002+0.05)*(1-0.002), 3)
|
_____no_output_____
|
MIT
|
MATH/18_Bayesian_rule.ipynb
|
CATERINA-SEUL/Data-Science-School
|
---- TabularCPD(variable, variable_card, value, evidence=None, evidence_card=None) - BayesianModel : 베이즈정리에 적용 - TabularCPD : 조건부확률을 구현 ---- - variable : 확률 변수의 이름 문자열 - variable_card : 확률변수가 가질 수 있는 경우의 수 - value : 조건부확률 배열. 하나의 열(column)이 동일 조건을 뜻하므로, 하나의 열의 확률 합은 1이어야 한다. - evidence : 조건이 되는 확률변수의 이름 문자열 리스트 - evidence_card : 조건이 되는 확률변수가 가질 수 있는 경우의 수 리스트 일반적인 확률을 구현할 때 : evidence = None , evidence_card = None 병에 걸렸을 사전확률 $P(D) = P(X=1)$, 병에 걸리지 않았을 사전확률 $P(D^\complement) = P(X = 0)$
|
from pgmpy.factors.discrete import TabularCPD
cpd_X = TabularCPD('X', 2, [[1-0.002, 0.002]])
print(cpd_X)
|
+------+-------+
| X(0) | 0.998 |
+------+-------+
| X(1) | 0.002 |
+------+-------+
|
MIT
|
MATH/18_Bayesian_rule.ipynb
|
CATERINA-SEUL/Data-Science-School
|
양성반응이 나올 확률 $P(S) = P(Y = 1)$, 음성 반응이 나올 확률 $P(S^\complement) = P(Y=0)$ - 확률 변수 $Y$ 에 확률을 베이즈 모형에 넣을 때는 $P(Y|X)$의 형태로 넣어야한다. - evidence : 조건이 되는 확률변수가 누구냐 ! - evidence_card : 몇가지 조건이 존재하는가 !
|
cpd_Y_on_X = TabularCPD('Y', 2, np.array(
[[0.95, 0.01], [0.05, 0.99]]), evidence=['X'], evidence_card=[2])
print(cpd_Y_on_X)
from pgmpy.models import BayesianModel
|
_____no_output_____
|
MIT
|
MATH/18_Bayesian_rule.ipynb
|
CATERINA-SEUL/Data-Science-School
|
BayesianModel(variables) - variables : 확률모형이 포함하는 확률변수 이름 문자열 리스트 - add_cpds() : 조건부확률 추가 - check_model() : 모형이 정상적인지 확인. True이면 정상모델
|
model = BayesianModel([('X','Y')])
model.add_cpds(cpd_X,cpd_Y_on_X)
model.check_model()
from pgmpy.inference import VariableElimination
|
_____no_output_____
|
MIT
|
MATH/18_Bayesian_rule.ipynb
|
CATERINA-SEUL/Data-Science-School
|
VariableElimination (변수제거법) 을 사용한 추정을 제공 query(variables, evidences) - query() 를 통해 사후확률 계산---- - variables : 사후 확률을 계산할 확률변수의 이름 리스트 - evidences : 조건이 되는 확률변수의 값을 나타내는 딕셔너리
|
inference = VariableElimination(model)
posterior = inference.query(['X'], evidence={'Y':1})
print(posterior)
|
+------+----------+
| X | phi(X) |
+======+==========+
| X(0) | 0.9618 |
+------+----------+
| X(1) | 0.0382 |
+------+----------+
|
MIT
|
MATH/18_Bayesian_rule.ipynb
|
CATERINA-SEUL/Data-Science-School
|
Machine Learning OverviewMachine learning is the ability of computers to take a dataset of objects and learn patterns about them. This dataset is structured as a table, where each row is a vector representing some object by encoding their properties as the values of the vector. The columns represent **features** - properties that all the objects share.There are, broadly speaking, two kinds of machine learning. **Supervised learning** has an extra column at the end of the dataset, and the program learns to predict the value of this based on the input features for some new object. If the output value is continuous, it is **regression**, otherwise it is **classification**. **Unsupervised learning** seeks to find patterns within the data by, for example, clustering. Supervised LearningOne of the most critical concepts in supervised learning is the dataset. This represents the knowledge about the set of objects in question that you wish the machine to learn. It is essentially a table where the rows represent objects, and the columns represent the properties. 'Training' is essentially the creation of an object called a model, which can take a row missing the last column, and predict what its value will be by examining the data in the dataset. For example...
|
import pandas as pd
iris_dataset = pd.read_csv("../data/iris.csv")
iris_dataset.head()
|
_____no_output_____
|
MIT
|
notebooks/ML.ipynb
|
samirelanduk/numberwang
|
Here a dataset has been loaded from CSV into a pandas dataframe. Each row represents a flower, on which four measurements have been taken, and each flower belongs to one of three classes. A supervised learning model would take this dataset of 150 flowers and train such that any other flower for which the relevant measurements were known could have its class predicted. This would obviously be a classification problem, not regression.A very simple model would take just two features and map them to one of two classes. The dataset can be reduced to this form asd follows:
|
simple_iris = iris_dataset.iloc[0:100, [0, 2, 4]]
simple_iris.head()
simple_iris.tail()
|
_____no_output_____
|
MIT
|
notebooks/ML.ipynb
|
samirelanduk/numberwang
|
Because this is just two dimensions, it can be easily visualised as a scatter plot.
|
import sys
sys.path.append("..")
import numerus.learning as ml
ml.plot_dataset(simple_iris)
|
_____no_output_____
|
MIT
|
notebooks/ML.ipynb
|
samirelanduk/numberwang
|
The data can be seen to be **linearly separable** - there is a line that can be drawn between them that would separate them perfectly.One of the simplest classifiers for supervised learning is the perceptron. Perceptrons have a weights vector which they dot with an input vector to get some level of activation. If the activation is above some threshold, one class is predicted - otherwise the other is predicted. Training a perceptron means giving the model training inputs until it has values for the weights and threshold that effectively separate the classes.The data must be split into training and test data, and then a perceptron created from the training data.
|
train_simple_iris, test_simple_iris = ml.split_data(simple_iris)
ml.plot_dataset(train_simple_iris, title="Training Data")
perceptron = ml.Perceptron(train_simple_iris)
print(perceptron)
|
_____no_output_____
|
MIT
|
notebooks/ML.ipynb
|
samirelanduk/numberwang
|
_*Using Qiskit Aqua for clique problems*_This Qiskit Aqua Optimization notebook demonstrates how to use the VQE quantum algorithm to compute the clique of a given graph. The problem is defined as follows. A clique in a graph $G$ is a complete subgraph of $G$. That is, it is a subset $K$ of the vertices such that every two vertices in $K$ are the two endpoints of an edge in $G$. A maximal clique is a clique to which no more vertices can be added. A maximum clique is a clique that includes the largest possible number of vertices. We will go through three examples to show (1) how to run the optimization in the non-programming way, (2) how to run the optimization in the programming way, (3) how to run the optimization with the VQE.We will omit the details for the support of CPLEX, which are explained in other notebooks such as maxcut.Note that the solution may not be unique. The problem and a brute-force method.
|
import numpy as np
from qiskit import Aer
from qiskit_aqua import run_algorithm
from qiskit_aqua.input import EnergyInput
from qiskit_aqua.translators.ising import clique
from qiskit_aqua.algorithms import ExactEigensolver
|
_____no_output_____
|
Apache-2.0
|
community/aqua/optimization/clique.ipynb
|
Chibikuri/qiskit-tutorials
|
first, let us have a look at the graph, which is in the adjacent matrix form.
|
K = 3 # K means the size of the clique
np.random.seed(100)
num_nodes = 5
w = clique.random_graph(num_nodes, edge_prob=0.8, weight_range=10)
print(w)
|
[[ 0. 4. 5. 3. -5.]
[ 4. 0. 7. 0. 6.]
[ 5. 7. 0. -4. 0.]
[ 3. 0. -4. 0. 8.]
[-5. 6. 0. 8. 0.]]
|
Apache-2.0
|
community/aqua/optimization/clique.ipynb
|
Chibikuri/qiskit-tutorials
|
Let us try a brute-force method. Basically, we exhaustively try all the binary assignments. In each binary assignment, the entry of a vertex is either 0 (meaning the vertex is not in the clique) or 1 (meaning the vertex is in the clique). We print the binary assignment that satisfies the definition of the clique (Note the size is specified as K).
|
def brute_force():
# brute-force way: try every possible assignment!
def bitfield(n, L):
result = np.binary_repr(n, L)
return [int(digit) for digit in result]
L = num_nodes # length of the bitstring that represents the assignment
max = 2**L
has_sol = False
for i in range(max):
cur = bitfield(i, L)
cur_v = clique.satisfy_or_not(np.array(cur), w, K)
if cur_v:
has_sol = True
break
return has_sol, cur
has_sol, sol = brute_force()
if has_sol:
print("solution is ", sol)
else:
print("no solution found for K=", K)
|
solution is [1, 0, 0, 1, 1]
|
Apache-2.0
|
community/aqua/optimization/clique.ipynb
|
Chibikuri/qiskit-tutorials
|
Part I: run the optimization in the non-programming way
|
qubit_op, offset = clique.get_clique_qubitops(w, K)
algo_input = EnergyInput(qubit_op)
params = {
'problem': {'name': 'ising'},
'algorithm': {'name': 'ExactEigensolver'}
}
result = run_algorithm(params, algo_input)
x = clique.sample_most_likely(len(w), result['eigvecs'][0])
ising_sol = clique.get_graph_solution(x)
if clique.satisfy_or_not(ising_sol, w, K):
print("solution is", ising_sol)
else:
print("no solution found for K=", K)
|
solution is [1. 0. 1. 1. 0.]
|
Apache-2.0
|
community/aqua/optimization/clique.ipynb
|
Chibikuri/qiskit-tutorials
|
Part II: run the optimization in the programming way
|
algo = ExactEigensolver(algo_input.qubit_op, k=1, aux_operators=[])
result = algo.run()
x = clique.sample_most_likely(len(w), result['eigvecs'][0])
ising_sol = clique.get_graph_solution(x)
if clique.satisfy_or_not(ising_sol, w, K):
print("solution is", ising_sol)
else:
print("no solution found for K=", K)
|
solution is [1. 0. 1. 1. 0.]
|
Apache-2.0
|
community/aqua/optimization/clique.ipynb
|
Chibikuri/qiskit-tutorials
|
Part III: run the optimization with the VQE
|
algorithm_cfg = {
'name': 'VQE',
'operator_mode': 'matrix'
}
optimizer_cfg = {
'name': 'COBYLA'
}
var_form_cfg = {
'name': 'RY',
'depth': 5,
'entanglement': 'linear'
}
params = {
'problem': {'name': 'ising', 'random_seed': 10598},
'algorithm': algorithm_cfg,
'optimizer': optimizer_cfg,
'variational_form': var_form_cfg
}
backend = Aer.get_backend('statevector_simulator')
result = run_algorithm(params, algo_input, backend=backend)
x = clique.sample_most_likely(len(w), result['eigvecs'][0])
ising_sol = clique.get_graph_solution(x)
if clique.satisfy_or_not(ising_sol, w, K):
print("solution is", ising_sol)
else:
print("no solution found for K=", K)
|
solution is [1. 0. 1. 1. 0.]
|
Apache-2.0
|
community/aqua/optimization/clique.ipynb
|
Chibikuri/qiskit-tutorials
|
Test shifting template experiments
|
%load_ext autoreload
%autoreload 2
import os
import sys
import pandas as pd
import numpy as np
import random
import umap
import glob
import pickle
import tensorflow as tf
from keras.models import load_model
from sklearn.decomposition import PCA
from plotnine import (ggplot,
labs,
geom_point,
aes,
ggsave,
theme_bw,
theme,
facet_wrap,
scale_color_manual,
guides,
guide_legend,
element_blank,
element_text,
element_rect,
element_line,
coords)
import warnings
warnings.filterwarnings(action='ignore')
from ponyo import utils, train_vae_modules, simulate_expression_data
# Set seeds to get reproducible VAE trained models
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
os.environ["PYTHONHASHSEED"] = "0"
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
tf.set_random_seed(1234)
# Read in config variables
base_dir = os.path.abspath(os.path.join(os.getcwd(),"../"))
config_filename = os.path.abspath(os.path.join(base_dir,
"human_tests",
"config_test_human.tsv"))
params = utils.read_config(config_filename)
# Load parameters
local_dir = params["local_dir"]
dataset_name = params['dataset_name']
analysis_name = params["simulation_type"]
rpkm_data_filename = params["raw_data_filename"]
normalized_data_filename = params["normalized_data_filename"]
metadata_filename = params["metadata_filename"]
NN_architecture = params['NN_architecture']
scaler_filename = params['scaler_transform_filename']
num_runs = params['num_simulated']
metadata_delimiter = params["metadata_delimiter"]
experiment_id_colname = params['metadata_experiment_colname']
sample_id_colname = params['metadata_sample_colname']
project_id = params['project_id']
NN_dir = os.path.join(
base_dir,
dataset_name,
"models",
NN_architecture)
assert os.path.exists(rpkm_data_filename)
|
_____no_output_____
|
BSD-3-Clause
|
human_tests/Human_template_simulation.ipynb
|
ben-heil/ponyo
|
Setup directories
|
utils.setup_dir(config_filename)
|
_____no_output_____
|
BSD-3-Clause
|
human_tests/Human_template_simulation.ipynb
|
ben-heil/ponyo
|
Pre-process data
|
train_vae_modules.normalize_expression_data(base_dir,
config_filename,
rpkm_data_filename,
normalized_data_filename)
|
input: dataset contains 50 samples and 5000 genes
Output: normalized dataset contains 50 samples and 5000 genes
|
BSD-3-Clause
|
human_tests/Human_template_simulation.ipynb
|
ben-heil/ponyo
|
Train VAE
|
# Directory containing log information from VAE training
vae_log_dir = os.path.join(
base_dir,
dataset_name,
"logs",
NN_architecture)
# Train VAE
train_vae_modules.train_vae(config_filename,
normalized_data_filename)
|
input dataset contains 50 samples and 5000 genes
WARNING:tensorflow:From /home/alexandra/anaconda3/envs/test_ponyo/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.
tracking <tf.Variable 'Variable:0' shape=() dtype=float32> beta
WARNING:tensorflow:From /home/alexandra/anaconda3/envs/test_ponyo/lib/python3.7/site-packages/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /home/alexandra/anaconda3/envs/test_ponyo/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.
Train on 45 samples, validate on 5 samples
Epoch 1/10
45/45 [==============================] - 4s 88ms/step - loss: 2511.2365 - val_loss: 2078.2676
Epoch 2/10
45/45 [==============================] - 4s 79ms/step - loss: 1688.8236 - val_loss: 2374.3589
Epoch 3/10
45/45 [==============================] - 4s 79ms/step - loss: 1664.0755 - val_loss: 1454.6667
Epoch 4/10
45/45 [==============================] - 4s 79ms/step - loss: 1509.4538 - val_loss: 1387.5260
Epoch 5/10
45/45 [==============================] - 4s 79ms/step - loss: 1474.1985 - val_loss: 1371.2039
Epoch 6/10
45/45 [==============================] - 4s 79ms/step - loss: 1489.1452 - val_loss: 1350.6823
Epoch 7/10
45/45 [==============================] - 4s 79ms/step - loss: 1502.0319 - val_loss: 1949.6031
Epoch 8/10
45/45 [==============================] - 4s 79ms/step - loss: 1381.4732 - val_loss: 1232.3323
Epoch 9/10
45/45 [==============================] - 4s 79ms/step - loss: 1419.9623 - val_loss: 1151.1223
Epoch 10/10
45/45 [==============================] - 4s 79ms/step - loss: 1384.7468 - val_loss: 1161.4500
|
BSD-3-Clause
|
human_tests/Human_template_simulation.ipynb
|
ben-heil/ponyo
|
Shift template experiment
|
#tmp result dir
tmp = os.path.join(local_dir, "pseudo_experiment")
os.makedirs(tmp, exist_ok=True)
# Load pickled file
scaler = pickle.load(open(scaler_filename, "rb"))
# Run simulation
normalized_data = normalized_data = pd.read_csv(
normalized_data_filename, header=0, sep="\t", index_col=0
)
for run in range(num_runs):
simulate_expression_data.shift_template_experiment(
normalized_data,
NN_architecture,
dataset_name,
scaler,
metadata_filename,
metadata_delimiter,
experiment_id_colname,
sample_id_colname,
project_id,
local_dir,
base_dir,
run)
|
_____no_output_____
|
BSD-3-Clause
|
human_tests/Human_template_simulation.ipynb
|
ben-heil/ponyo
|
Visualize latent transform compendium
|
# Load VAE models
model_encoder_filename = glob.glob(os.path.join(
NN_dir,
"*_encoder_model.h5"))[0]
weights_encoder_filename = glob.glob(os.path.join(
NN_dir,
"*_encoder_weights.h5"))[0]
model_decoder_filename = glob.glob(os.path.join(
NN_dir,
"*_decoder_model.h5"))[0]
weights_decoder_filename = glob.glob(os.path.join(
NN_dir,
"*_decoder_weights.h5"))[0]
# Load saved models
loaded_model = load_model(model_encoder_filename)
loaded_decode_model = load_model(model_decoder_filename)
loaded_model.load_weights(weights_encoder_filename)
loaded_decode_model.load_weights(weights_decoder_filename)
pca = PCA(n_components=2)
# Read data
normalized_compendium = pd.read_csv(normalized_data_filename, header=0, sep="\t", index_col=0)
# Encode normalized compendium into latent space
compendium_encoded = loaded_model.predict_on_batch(normalized_compendium)
compendium_encoded_df = pd.DataFrame(data=compendium_encoded,
index=normalized_compendium.index)
# Get and save PCA model
model = pca.fit(compendium_encoded_df)
compendium_PCAencoded = model.transform(compendium_encoded_df)
compendium_PCAencoded_df = pd.DataFrame(data=compendium_PCAencoded,
index=compendium_encoded_df.index,
columns=['1','2'])
# Add label
compendium_PCAencoded_df['experiment_id'] = 'background'
# Embedding of real template experiment (encoded)
template_filename = os.path.join(local_dir,
"pseudo_experiment",
"template_normalized_data_"+project_id+"_test.txt")
template_data = pd.read_csv(template_filename, header=0, sep='\t', index_col=0)
# Encode template experiment into latent space
template_encoded = loaded_model.predict_on_batch(template_data)
template_encoded_df = pd.DataFrame(data=template_encoded,
index=template_data.index)
template_PCAencoded = model.transform(template_encoded_df)
template_PCAencoded_df = pd.DataFrame(data=template_PCAencoded,
index=template_encoded_df.index,
columns=['1','2'])
# Add back label column
template_PCAencoded_df['experiment_id'] = 'template_experiment'
# Embedding of simulated experiment (encoded)
encoded_simulated_filename = os.path.join(local_dir,
"pseudo_experiment",
"selected_simulated_encoded_data_"+project_id+"_1.txt")
simulated_encoded_df = pd.read_csv(encoded_simulated_filename,header=0, sep='\t', index_col=0)
simulated_PCAencoded = model.transform(simulated_encoded_df)
simulated_PCAencoded_df = pd.DataFrame(data=simulated_PCAencoded,
index=simulated_encoded_df.index,
columns=['1','2'])
# Add back label column
simulated_PCAencoded_df['experiment_id'] = 'simulated_experiment'
# Concatenate dataframes
combined_PCAencoded_df = pd.concat([compendium_PCAencoded_df,
template_PCAencoded_df,
simulated_PCAencoded_df])
print(combined_PCAencoded_df.shape)
combined_PCAencoded_df.head()
# Plot
fig = ggplot(combined_PCAencoded_df, aes(x='1', y='2'))
fig += geom_point(aes(color='experiment_id'), alpha=0.2)
fig += labs(x ='PCA 1',
y = 'PCA 2',
title = 'PCA original data with experiments (latent space)')
fig += theme_bw()
fig += theme(
legend_title_align = "center",
plot_background=element_rect(fill='white'),
legend_key=element_rect(fill='white', colour='white'),
legend_title=element_text(family='sans-serif', size=15),
legend_text=element_text(family='sans-serif', size=12),
plot_title=element_text(family='sans-serif', size=15),
axis_text=element_text(family='sans-serif', size=12),
axis_title=element_text(family='sans-serif', size=15)
)
fig += guides(colour=guide_legend(override_aes={'alpha': 1}))
fig += scale_color_manual(['#bdbdbd', 'red', 'blue'])
fig += geom_point(data=combined_PCAencoded_df[combined_PCAencoded_df['experiment_id'] == 'template_experiment'],
alpha=0.2,
color='blue')
fig += geom_point(data=combined_PCAencoded_df[combined_PCAencoded_df['experiment_id'] == 'simulated_experiment'],
alpha=0.1,
color='red')
print(fig)
|
_____no_output_____
|
BSD-3-Clause
|
human_tests/Human_template_simulation.ipynb
|
ben-heil/ponyo
|
选择 布尔类型、数值和表达式- 注意:比较运算符的相等是两个等到,一个等到代表赋值- 在Python中可以用整型0来代表False,其他数字来代表True- 后面还会讲到 is 在判断语句中的用发
|
1== true
while 1:
print('hahaha')
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
字符串的比较使用ASCII值
|
'a'>True
0<10>100
num=eval(input('>>'))
if num>=90:
print('A')
elif 80<=num<90:
print('B')
else :
print('C')
|
>>80
B
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
Markdown - https://github.com/younghz/Markdown EP:- - 输入一个数字,判断其实奇数还是偶数 产生随机数字- 函数random.randint(a,b) 可以用来产生一个a和b之间且包括a和b的随机整数
|
import random
a=random.randint(1,5)
print(a)
while True:
num=eval(input('>>'))
if num == a:
print('Success')
break
elif num>a:
print('太大了')
elif num<a:
print('太小了')
|
2
>>5
太大了
>>2
Success
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
其他random方法- random.random 返回0.0到1.0之间前闭后开区间的随机浮点- random.randrange(a,b) 前闭后开 EP:- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字的和,并判定其是否正确- 进阶:写一个随机序号点名程序
|
import random
a=random.randint(1,5)
b=random.randint(2,6)
print(a,b)
# num=eval(input('>>'))
# if num==a+b:
# print('Success')
# else :
# print('失败')
num=a+b
while 1:
input('>>')
if input == num:
print('Success')
break
else :
print('失败')
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
if语句- 如果条件正确就执行一个单向if语句,亦即当条件为真的时候才执行if内部的语句- Python有很多选择语句:> - 单向if - 双向if-else - 嵌套if - 多向if-elif-else - 注意:当语句含有子语句的时候,那么一定至少要有一个缩进,也就是说如果有儿子存在,那么一定要缩进- 切记不可tab键和space混用,单用tab 或者 space- 当你输出的结果是无论if是否为真时都需要显示时,语句应该与if对齐
|
a=eval(input('>>'))
if a<=30:
b=input('>>')
if b!='丑':
c=input('>>')
if c=='高':
d=input('>>')
if d=='是':
print('见')
else:
print('不见')
else :
print('不见')
else :
print('不见')
else:
print('too old')
|
>>25
>>帅
>>高
>>是
见
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
EP:- 用户输入一个数字,判断其实奇数还是偶数- 进阶:可以查看下4.5实例研究猜生日 双向if-else 语句- 如果条件为真,那么走if内部语句,否则走else内部语句 EP:- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确,如果正确打印“you‘re correct”,否则打印正确错误 嵌套if 和多向if-elif-else EP:- 提示用户输入一个年份,然后显示表示这一年的动物- 计算身体质量指数的程序- BMI = 以千克为单位的体重除以以米为单位的身高
|
a=eval(input('>>'))
num=a%12
if num==0:
print('猴')
elif num == 1:
print('鸡')
elif num == 2:
print('狗')
elif num == 3:
print('猪')
elif num== 4:
print('鼠')
elif num== 5:
print('牛')
elif num== 6:
print('虎')
elif num== 7:
print('兔')
elif num== 8:
print('龙')
elif num== 9:
print('蛇')
elif num== 10:
print('马')
else:
print('羊')
w,h=eval(input('>>'))
bmi=w/(h*h)
print(bmi)
if bmi<18.5:
print('超轻')
elif 18.5<=bmi<25.0:
print('标准')
elif 25.0<=bmi<30.0:
print('超重')
else :
print('痴肥')
|
>>60,1.84
17.72211720226843
超轻
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
逻辑运算符 
|
a=[1,2,3,4]
1 not in a
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
EP:- 判定闰年:一个年份如果能被4整除但不能被100整除,或者能被400整除,那么这个年份就是闰年- 提示用户输入一个年份,并返回是否是闰年- 提示用户输入一个数字,判断其是否为水仙花数
|
year=eval(input('>>'))
a=year%4==0
b=year%100!=0
c=year%400==0
if (a or c) and b :
print('闰年')
else :
print('非闰年')
n=eval(input('>>'))
a1=n//100
a2=n//10%10
a3=n%10
s=a1**3+a2**3+a3**3
if s == n:
print('是水仙花数')
else :
print('结束')
|
>>154
结束
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
实例研究:彩票
|
import random
a1=random.randint(0,9)
a2=random.randint(0,9)
print(a1,a2)
a=str(a1)+str(a2)
num=input('>>')
if num==a:
print('一等奖')
elif (num[0]==a[1] and (num[1]== a[0])):
print('二等奖')
elif ((num[0]==a[0]) or (num[1]==a[0]) or (num[0]==a[1]) or (num[1]==a[1])):
print('三等奖')
else :
('未中奖')
|
8 4
>>48
二等奖
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
Homework- 1
|
import math
a,b,c=eval(input('>>'))
pan=b**2-4*a*c
r1=((-b)+math.sqrt(pan))/(2*a)
r2=((-b)-math.sqrt(pan))/(2*a)
if pan>0:
print(r1,r2)
elif pan==0:
print(r1)
else :
print('The equation has no real roots')
|
>>1,3,1
-0.3819660112501051 -2.618033988749895
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 2
|
import random
a1=random.randint(0,99)
a2=random.randint(0,99)
print(a1,a2)
num=eval(input('>>'))
number=a1+a2
if num == number:
print('True')
else :
print('False')
|
93 42
>>12
False
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 3
|
day = eval(input('今天是哪一天(星期天是0,星期一是1,。。。,星期六是6):'))
days = eval(input('今天之后到未来某天的天数:'))
n = day + days
if day==0:
a='星期日'
elif day==1:
a='星期一'
elif day==2:
a='星期二'
elif day==3:
a='星期三'
elif day==4:
a='星期四'
elif day==5:
a='星期五'
elif day==6:
a='星期六'
if n%7 ==0:
print('今天是'+str(a)+'并且'+str(days)+'天之后是星期天')
elif n%7 ==1:
print('今天是'+str(a)+'并且'+str(days)+'天之后是星期一')
elif n%7 ==2:
print('今天是'+str(a)+'并且'+str(days)+'天之后是星期二')
elif n%7 ==3:
print('今天是'+str(a)+'并且'+str(days)+'天之后是星期三')
elif n%7 ==4:
print('今天是'+str(a)+'并且'+str(days)+'天之后是星期四')
elif n%7 ==5:
print('今天是'+str(a)+'并且'+str(days)+'天之后是星期五')
elif n%7 ==6:
print('今天是'+str(a)+'并且'+str(days)+'天之后是星期六')
|
今天是哪一天(星期天是0,星期一是1,。。。,星期六是6):1
今天之后到未来某天的天数:3
今天是星期一并且3天之后是星期四
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 4
|
a,b,c = eval(input('输入三个整数:'))
if a>=b and b>=c:
print(c,b,a)
elif a>=b and b<=c and a>=c:
print(b,c,a)
elif b>=a and a>=c :
print(c,a,b)
elif b>=a and a<=c and b>=c:
print(a,c,b)
elif c>=b and b>=a:
print(a,b,c)
elif c>=b and b<=a and c>=a:
print(b,a,c)
|
输入三个整数:2,1,3
1 2 3
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 5
|
a1,a2=eval(input('输入第一种重量和价钱:'))
b1,b2=eval(input('输入第一种重量和价钱:'))
num1=a2/a1
num2=b2/b1
if num1>num2:
print('购买第二种更加合适')
else :
print('购买第一种更合适')
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 6
|
m,year=eval(input('输入月份和年'))
a=year%4==0
b=year%100!=0
c=year%400==0
r=[1,3,5,7,8,10,12]
if (a or c) and b and m==2:
print(str(year)+'年'+str(m)+'月有29天')
elif ((m==1) or (m==3) or (m==5) or (m==7) or (m==8) or (m==10) or (m==12)):
print(str(year)+'年'+str(m)+'月有31天')
elif ((m==4) or (m==6) or (m==9) or (m==11)):
print(str(year)+'年'+str(m)+'月有30天')
else :
print(str(year)+'年'+str(m)+'月有28天')
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 7
|
import random
a=random.randint(0,1)
print(a)
num=eval(input('>>'))
if a==num:
print('正确')
else :
print('错误')
|
0
>>1
错误
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 8
|
a=eval(input('输入1,2或0:'))
import random
d=random.randint(0,3)
if d==a:
print('平局')
elif a==0 and d==1:
print('你输了')
elif a==0 and d==2:
print('你赢了')
elif a==1 and d==0:
print('你赢了')
elif a==1 and d==2:
print('你输了')
elif a==2 and d==1:
print('你赢了')
elif a==2 and d==0:
print('你输了')
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 9
|
y = eval(input('请输入年份:'))
m = eval(input('请输入月份:'))
q = eval(input('请输入天数:'))
j = y//100//1
k = y%100
if m == 1:
m = 13
elif m == 2:
m = 14
h = (q + (26*(m+1))/10//1+k+k/4//1+j/4//1+5*j)%7
print(round(h))
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 10
|
import random
size=['Ace',2,3,4,5,6,7,8,9,10,'Jack','Queen','King']
A=random.randint(0,len(size)-1)
color=['Diamond','Heart','Spade','Club']
B=random.randint(0,len(color)-1)
print('The card you picked is the ' + str(size[A]) + ' of ' + str(color[B]))
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 11
|
x = input('Enter a three-digit integer:')
if x[0] == x[2] :
print(str(x)+'is a palindrome')
else:
print(str(x)+'is not a palindrome')
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
- 12
|
lenght1,lenght2,lenght3, =eval(input('Enter three adges:'))
perimeter = lenght1 + lenght2 + lenght3
if lenght1 + lenght2 > lenght3 and lenght1 + lenght3 > lenght2 and lenght2 + lenght3 > lenght1:
print('The perimeter is',perimeter)
else:
print('The perimeter invalid')
|
_____no_output_____
|
Apache-2.0
|
9.12.ipynb
|
ljmzyl/Work
|
1. Деревья решений для классификации (продолжение)На прошлом занятии мы разобрали идею Деревьев решений:Давайте теперь разберемся **как происходит разделения в каждом узле** то есть как проходит этап **обучения модели**. Есть как минимум две причины в этом разобраться : во-первых это позволит нам решать задачи классификации на 3 и более классов, во-вторых это даст нам возможность считать *важность* признаков в обученной модели.Для начала посмотрим какие бывают деревья решений ----Дерево решений вообще говоря **не обязано быть бинарным**, на практике однако используются именно бинарные деревья, поскольку для любоого не бинарного дерева решений **можно построить бинарное** (при этом увеличится глубина дерева). 1. Деревья решений использую простой одномерный предикат для разделения объектовИмеется ввиду что в каждом узле разделение объектов (и создание двух новых узлов) происходит **по 1 (одному)** признаку: *Все объекты со значением некоторого признака меньше трешхолда отправляются в один узел, а больше - в другой:*$$[x_j < t]$$Вообще говоря это совсем не обязательно, например в каждом отдельном узле можно строить любую модель (например логистическую регрессию или KNN), рассматривая сразу несколько признаков. 2. Оценка качества Мы говорили про простой функционал качества разбиения (**выбора трешхолда**): количество ошибок (1-accuracy). На практике используются два критерия: Gini's impurity index и Information gain.**Индекс Джини**$$I_{Gini} = 1 - \sum_i^K p_i^2 $$где $K$ - количество классов, a $p_i = \frac{|n_i|}{n}$ - доля представителей $i$ - ого класса в данном узле**Энтропия**$$H(p) = - \sum_i^K p_i\log(p_i)$$**Информационный критерий**$$IG(p) = H(\text{parent}) - H(\text{child})$$ Разделение производится по тому трешхолду и тому признаку по которому взвешенное среднее функционала качества в узлах потомках наименьшее. 3. Критерий остановкиМы с вами говорили о таких параметрах Решающего дерева как минимальное число объектов в листе,и минимальное число объектов в узле, для того чтобы он был разделен на два. Еще один критерий - глубина дерева. Возможны и другие.* Ограничение числа объектов в листе* Ограничение числа объектов в узле, для того чтобы он был разделен* Ограничение глубины дерева* Ограничение минимального прироста Энтропии или Информационного критерия при разделении* Остановка в случае если все объекты в листе принадлежат к одному классуНа прошлой лекции мы обсуждали технику которая называется **Прунинг** (pruning) это альтернатива Критериям остановки, когда сначала строится переобученное дерево, а затем она каким то образом упрощается. На практике по ряду причин чаще используются критерии остановки, а не прунинг.Подробнее см. https://github.com/esokolov/ml-course-hse/blob/master/2018-fall/lecture-notes/lecture07-trees.pdfОссобенности разбиения непрерывных признаков* http://kevinmeurer.com/a-simple-guide-to-entropy-based-discretization/* http://clear-lines.com/blog/post/Discretizing-a-continuous-variable-using-Entropy.aspx--- 1.1. Оценка качества разделения в узле
|
def gini_impurity(y_current):
n = y_current.shape[0]
val, count = np.unique(y_current, return_counts=True)
gini = 1 - ((count/n)**2).sum()
return gini
def entropy(y_current):
gini = 1
n = y_current.shape[0]
val, count = np.unique(y_current, return_counts=True)
p = count/n
igain = p.dot(np.log(p))
return igain
n = 100
Y_example = np.zeros((100,100))
for i in range(100):
for j in range(i, 100):
Y_example[i, j] = 1
gini = [gini_impurity(y) for y in Y_example]
ig = [-entropy(y) for y in Y_example]
plt.figure(figsize=(7,7))
plt.plot(np.linspace(0,1,100), gini, label='Index Gini');
plt.plot(np.linspace(0,1,100), ig, label ='Entropy');
plt.legend()
plt.xlabel('Доля примеров\n положительного класса')
plt.ylabel('Значение оптимизируемого\n функционала');
|
_____no_output_____
|
MIT
|
seminar-5-dt-rf/5_1_dt_2_draft.ipynb
|
kurmukovai/iitp-ml-ds
|
1.2. Пример работы Решающего дерева **Индекс Джини** и **Информационный критерий** это меры сбалансированности вектора (насколько значения объектов в наборе однородны). Максимальная неоднородность когда объектов разных классов поровну. Максимальная однородность когда в наборе объекты одного класса. Разбивая множество объектов на два подмножества, мы стремимся уменьшить неоднородность в каждом подмножестве.Посмотрем на примере Ирисов Фишера Ирисы Фишера
|
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
model = DecisionTreeClassifier()
model = model.fit(iris.data, iris.target)
feature_names = ['sepal length', 'sepal width', 'petal length', 'petal width']
target_names = ['setosa', 'versicolor', 'virginica']
model.feature_importances_
np.array(model.decision_path(iris.data).todense())[0]
np.array(model.decision_path(iris.data).todense())[90]
iris.data[0]
model.predict(iris.data)
model.tree_.node_count
|
_____no_output_____
|
MIT
|
seminar-5-dt-rf/5_1_dt_2_draft.ipynb
|
kurmukovai/iitp-ml-ds
|
Цифры. Интерпретируемость
|
from sklearn.datasets import load_digits
X, y = load_digits(n_class=2, return_X_y=True)
plt.figure(figsize=(12,12))
for i in range(9):
ax = plt.subplot(3,3,i+1)
ax.imshow(X[i].reshape(8,8), cmap='gray')
from sklearn.metrics import accuracy_score
model = DecisionTreeClassifier()
model.fit(X, y)
y_pred = model.predict(X)
print(accuracy_score(y, y_pred))
print(X.shape)
np.array(model.decision_path(X).todense())[0]
model.feature_importances_
plt.imshow(model.feature_importances_.reshape(8,8));
from sklearn.tree import export_graphviz
export_graphviz(model, out_file='tree.dot', filled=True)
# #sudo apt-get install graphviz
# !dot -Tpng 'tree.dot' -o 'tree.png'
# 
np.array(model.decision_path(X).todense())[0]
plt.imshow(X[0].reshape(8,8))
|
_____no_output_____
|
MIT
|
seminar-5-dt-rf/5_1_dt_2_draft.ipynb
|
kurmukovai/iitp-ml-ds
|
2.3. Решающие деревья легко обобщаются на задачу многоклассовой классификации Пример с рукописными цифрами
|
X, y = load_digits(n_class=10, return_X_y=True)
plt.figure(figsize=(12,12))
for i in range(9):
ax = plt.subplot(3,3,i+1)
ax.imshow(X[i].reshape(8,8), cmap='gray')
ax.set_title(y[i])
ax.set_xticks([])
ax.set_yticks([])
model = DecisionTreeClassifier()
model.fit(X, y)
y_pred = model.predict(X)
print(accuracy_score(y, y_pred))
plt.imshow(model.feature_importances_.reshape(8,8));
model.feature_importances_
|
_____no_output_____
|
MIT
|
seminar-5-dt-rf/5_1_dt_2_draft.ipynb
|
kurmukovai/iitp-ml-ds
|
Вопрос: откуда мы получаем feature importance? 2.4. Пример на котором дерево решений строит очень сложную разделяющую кривуюПример взят отсюда https://habr.com/ru/company/ods/blog/322534/slozhnyy-sluchay-dlya-derevev-resheniy .Как мы помним Деревья используют одномерный предикат для разделени множества объектов.Это значит что если данные плохо разделимы по **каждому** (индивидуальному) признаку по отдельности, результирующее решающее правило может оказаться очень сложным.
|
from sklearn.tree import DecisionTreeClassifier
def form_linearly_separable_data(n=500, x1_min=0, x1_max=30, x2_min=0, x2_max=30):
data, target = [], []
for i in range(n):
x1, x2 = np.random.randint(x1_min, x1_max), np.random.randint(x2_min, x2_max)
if np.abs(x1 - x2) > 0.5:
data.append([x1, x2])
target.append(np.sign(x1 - x2))
return np.array(data), np.array(target)
X, y = form_linearly_separable_data()
plt.figure(figsize=(10,10))
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='autumn');
|
_____no_output_____
|
MIT
|
seminar-5-dt-rf/5_1_dt_2_draft.ipynb
|
kurmukovai/iitp-ml-ds
|
Давайте посмотрим как данные выглядит в проекции на 1 ось
|
plt.figure(figsize=(15,5))
ax1 = plt.subplot(1,2,1)
ax1.set_title('Проекция на ось $X_0$')
ax1.hist(X[y==1, 0], alpha=.3);
ax1.hist(X[y==-1, 0], alpha=.6);
ax2 = plt.subplot(1,2,2)
ax2.set_title('Проекция на ось $X_1$')
ax2.hist(X[y==1, 1], alpha=.3);
ax2.hist(X[y==-1, 1], alpha=.6);
def get_grid(data, eps=0.01):
x_min, x_max = data[:, 0].min() - 1, data[:, 0].max() + 1
y_min, y_max = data[:, 1].min() - 1, data[:, 1].max() + 1
return np.meshgrid(np.arange(x_min, x_max, eps),
np.arange(y_min, y_max, eps))
tree = DecisionTreeClassifier(random_state=17).fit(X, y)
xx, yy = get_grid(X, eps=.05)
predicted = tree.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.figure(figsize=(10,10))
plt.pcolormesh(xx, yy, predicted, cmap='autumn', alpha=0.3)
plt.scatter(X[y==1, 0], X[y==1, 1], marker='x', s=100, cmap='autumn', linewidth=1.5)
plt.scatter(X[y==-1, 0], X[y==-1, 1], marker='o', s=100, cmap='autumn', edgecolors='k',linewidth=1.5)
plt.title('Easy task. Decision tree compexifies everything');
# export_graphviz(tree, out_file='complex_tree.dot', filled=True)
# !dot -Tpng 'complex_tree.dot' -o 'complex_tree.png'
|
_____no_output_____
|
MIT
|
seminar-5-dt-rf/5_1_dt_2_draft.ipynb
|
kurmukovai/iitp-ml-ds
|
Kernel analysis
|
df = read_ods("./results.ods", "matmul-kernel")
expand_modes(df)
print(df["MODE"].unique())
#############################################
# Disregard the store result for the kernel #
#############################################
df.loc[df["MODE"] == "AD (volatile result)", "MODE"] = "AD"
order = ['DRAM', 'AD', 'AD (in-place FMA)', 'MM (hot)', 'MM (cold)']
hue_order = [7000, 1000]
# Split the two families of experiments
df_rowcol = df[df.MATRIX_SIDE != 0]
df = df[df.MATRIX_SIDE == 0]
sns.barplot(x='MODE', y='TIMING',
data=df[(df.BLOCKSIZE == 1000)],
capsize=0.1,
order=order,
palette=custom_kernel_palette(6))
plt.title("Submatrix size: 1000x1000 (small object)")
plt.xticks(rotation=25, horizontalalignment='right')
plt.show()
sns.barplot(x='MODE', y='TIMING',
data=df[(df.BLOCKSIZE == 7000)],
capsize=0.1,
order=order,
palette=custom_kernel_palette(6))
plt.title("Submatrix size: 7000x7000 (big object)")
plt.xticks(rotation=25, horizontalalignment='right')
plt.show()
###################################
# sns.barplot(x='MODE', y='TIMING',
# data=df_rowcol[(df_rowcol.BLOCKSIZE == 1000)],
# capsize=0.1,
# order=order,
# palette=palette)
# plt.title("BLOCKSIZE: 1k || row x col")
# plt.xticks(rotation=25, horizontalalignment='right')
# plt.show()
# sns.barplot(x='MODE', y='TIMING',
# data=df_rowcol[(df_rowcol.BLOCKSIZE == 7000)],
# capsize=0.1,
# order=order,
# palette=palette)
# plt.title("BLOCKSIZE: 7k || row x col")
# plt.xticks(rotation=25, horizontalalignment='right')
# plt.show()
# Remove MM-NVM as it is outlier-ish
#df = df[df.MODE != 'MM-NVM']
# ... or maybe not? trying set_ylim maybe:
#axes = plt.gca()
#axes.set_ylim([0,1.5])
#plt.title("...")
#plt.show()
df.loc[(df.BLOCKSIZE == 1000), "NORMALIZED"] = df.TIMING
df.loc[(df.BLOCKSIZE == 7000), "NORMALIZED"] = df.TIMING / (7*7*7)
ax = sns.barplot(y='MODE', x='NORMALIZED',
data=df,
capsize=0.1,
order=order,
hue_order=hue_order,
hue="BLOCKSIZE",
palette="muted")
kernel_plot_tweaks(ax, 7*7*7, legend_title="Submatrix blocksize")
plt.savefig("matmul-kernel.pdf", bbox_inches='tight')
plt.show()
kernel_times = df.groupby(["BLOCKSIZE", "MODE"]).min()
kernel_times
#rowcol_times = df_rowcol.groupby(["BLOCKSIZE", "MODE"]).min()
#rowcol_times
|
_____no_output_____
|
CC0-1.0
|
analysis/matmul-analysis.ipynb
|
bsc-dom/optanedc-miniapps
|
Matmul results analysis
|
df = read_ods("./results.ods", "matmul-app")
expand_modes(df)
df
for bs in [1000, 7000]:
df.loc[(df.BLOCKSIZE == bs) & (df.MODE == "DRAM"), "ATOM_KERNEL"] = \
kernel_times.loc[(bs, "DRAM"), "TIMING"]
df.loc[(df.BLOCKSIZE == bs) & (df.MODE == "AD (volatile result)"), "ATOM_KERNEL"] = \
kernel_times.loc[(bs, "AD"), "TIMING"]
df.loc[(df.BLOCKSIZE == bs) & (df.MODE == "AD (store result)"), "ATOM_KERNEL"] = \
kernel_times.loc[(bs, "AD"), "TIMING"]
df.loc[(df.BLOCKSIZE == bs) & (df.MODE == "AD (in-place FMA)"), "ATOM_KERNEL"] = \
kernel_times.loc[(bs, "AD (in-place FMA)"), "TIMING"]
df.loc[(df.BLOCKSIZE == bs) & (df.MODE == "DAOS (volatile result)"), "ATOM_KERNEL"] = \
kernel_times.loc[(bs, "DRAM"), "TIMING"]
df.loc[(df.BLOCKSIZE == bs) & (df.MODE == "DAOS (store result)"), "ATOM_KERNEL"] = \
kernel_times.loc[(bs, "DRAM"), "TIMING"]
df.loc[(df.BLOCKSIZE == 1000)
& (df.MATRIX_SIDE == 42)
& (df.MODE == "MM"),
"ATOM_KERNEL"] = kernel_times.loc[(1000, "MM (hot)"), "TIMING"]
df.loc[(df.BLOCKSIZE == 7000)
& (df.MATRIX_SIDE == 6)
& (df.MODE == "MM"),
"ATOM_KERNEL"] = kernel_times.loc[(7000, "MM (hot)"), "TIMING"]
df.loc[(df.BLOCKSIZE == 1000)
& (df.MATRIX_SIDE == 84)
& (df.MODE == "MM"),
"ATOM_KERNEL"] = kernel_times.loc[(1000, "MM (cold)"), "TIMING"]
df.loc[(df.BLOCKSIZE == 7000)
& (df.MATRIX_SIDE == 12)
& (df.MODE == "MM"),
"ATOM_KERNEL"] = kernel_times.loc[(7000, "MM (cold)"), "TIMING"]
df["KERNEL_TIME"] = df["MATRIX_SIDE"]**3 * df["ATOM_KERNEL"]
# Sanity check
null_values = df[df.isnull().values]
if len(null_values) > 0:
print('There are null values, check null_values variable')
df
|
_____no_output_____
|
CC0-1.0
|
analysis/matmul-analysis.ipynb
|
bsc-dom/optanedc-miniapps
|
Article image generation
|
sns.set(style="whitegrid")
order = ['DRAM', 'AD (volatile result)', 'AD (store result)', 'AD (in-place FMA)',
'MM', 'DAOS (volatile result)', 'DAOS (store result)']
small = (
((df.BLOCKSIZE == 1000) & (df.MATRIX_SIDE == 42)) |
((df.BLOCKSIZE == 7000) & (df.MATRIX_SIDE == 6))
)
big = (
((df.BLOCKSIZE == 1000) & (df.MATRIX_SIDE == 84)) |
((df.BLOCKSIZE == 7000) & (df.MATRIX_SIDE == 12))
)
ax = sns.barplot(y='MODE', x="TIMING",
data=df[small],
capsize=0.1,
order=order,
hue_order=hue_order,
palette="colorblind",
hue=df.BLOCKSIZE)
bottom = sns.barplot(y='MODE', x="KERNEL_TIME",
data=df[small],
capsize=0,
order=order,
hue_order=hue_order,
palette="pastel",
hue=df.BLOCKSIZE)
crop_axis(ax, 800)
ylabel_tweaks(ax, [2, 5], ['non-active', 'active'], 0.40, 0.005)
legend_tweaks(bottom, ["big objects", "small objects", "kernel comp."], placement='upper center')
ax.set_xlabel("execution time (s)")
plt.title("Small dataset")
save_tweaks("matmul-small.pdf", big=True)
plt.show()
ax = sns.barplot(y='MODE', x="TIMING",
data=df[big],
capsize=0.1,
order=order,
hue_order=hue_order,
palette="colorblind",
hue=df.BLOCKSIZE)
annotate_dram(ax)
bottom = sns.barplot(y='MODE', x="KERNEL_TIME",
data=df[big],
capsize=0,
order=order,
hue_order=hue_order,
palette="pastel",
hue=df.BLOCKSIZE)
crop_axis(ax, 6000)
ylabel_tweaks(ax, [2, 5], ['non-active', 'active'], 0.40, 0.005)
legend_tweaks(bottom, ["big objects", "small objects", "kernel comp."], placement='upper center')
ax.set_xlabel("execution time (s)")
plt.title("Big dataset")
save_tweaks("matmul-big.pdf", big=True)
plt.show()
df.groupby(["BLOCKSIZE", "MATRIX_SIDE", "MODE"]).mean()
|
_____no_output_____
|
CC0-1.0
|
analysis/matmul-analysis.ipynb
|
bsc-dom/optanedc-miniapps
|
DASHBOARD LINKhttps://public.tableau.com/profile/altaf.lakhi2442!/vizhome/UnbankedExploration/Dashboard1
|
import pandas as pd
import seaborn as sns
CPS_df = pd.read_csv("../data/processed/CPS_2009_2017_clean.csv")
ACS_df = pd.read_csv("../data/processed/ACS_2011_2017_clean.csv")
NFCS_df = pd.read_csv("../data/processed/NFCS_2009_2018_clean.csv")
frames = [CPS_df, ACS_df, NFCS_df]
#declaring STATE list
STATES = ["Alabama","Alaska","Arizona","Arkansas","California","Colorado",
"Connecticut","Delaware","District of Columbia", "Florida","Georgia","Hawaii",
"Idaho","Illinois", "Indiana","Iowa","Kansas","Kentucky","Louisiana","Maine",
"Maryland","Massachusetts","Michigan","Minnesota","Mississippi","Missouri","Montana",
"Nebraska","Nevada","New Hampshire","New Jersey","New Mexico","New York",
"North Carolina","North Dakota","Ohio","Oklahoma","Oregon","Pennsylvania",
"Rhode Island","South Carolina","South Dakota","Tennessee","Texas","Utah",
"Vermont","Virginia","Washington","West Virginia","Wisconsin","Wyoming"]
#generating state:state_number dictionary
STATE_FIPS = list(frames[0].STATEFIP.unique())
STATE = {}
for state, name in zip(STATE_FIPS, STATES):
STATE[state] = name
#generating STATE column for pertinent dfs
CPS_df["STATE"] = CPS_df.STATEFIP.map(STATE)
ACS_df["STATE"] = ACS_df.STATEFIP.map(STATE)
counties = pd.read_csv("../data/external/county_fips_master.csv", engine='python')
|
_____no_output_____
|
MIT
|
notebooks/Dashboard_Data.ipynb
|
Altaf410/An-Exploration-of-the-Unbanked-in-the-US
|
Aggregatting CPS Data
|
pop_prop = pd.read_csv("../data/interim/population_proportions")
pop_prop.head()
pop_prop = pop_prop[["YEAR", "BUNBANKED", "STATEFIP"]]
pop_prop
state_year_agg = []
for year in pop_prop.YEAR.unique():
holder = pop_prop[pop_prop.YEAR == year]
state_year_agg.append(holder)
#national_agg_sums = [pop_prop[pop_prop.STATEFIP == state].BUNBANKED.sum() for state in pop_prop.STATEFIP.unique()]
#print(f"{year}")
#display(holder)
state_survey_pop_agg = pd.concat(state_year_agg)
state_survey_pop_agg["STATE"] = state_survey_pop_agg.STATEFIP.map(STATE)
state_survey_pop_agg
state_survey_pop_agg.rename(columns = {"BUNBANKED": "SURVEY_POP"}, inplace = True)
state_survey_pop_agg
CPS_agg = pd.DataFrame()
CPS_agg["STATE"] = CPS_df.STATE
CPS_agg["UNDERBANKED"] = CPS_df.BUNBANKED
CPS_agg["YEAR"] = CPS_df.YEAR
#copying aggregation before grouping for additional breakdowns
CPS_reason_agg = CPS_agg.copy(deep=True)
CPS_agg = CPS_agg.groupby(["YEAR", "STATE"]).count()
CPS_agg = CPS_agg.reset_index()
CPS_agg
state_survey_pop_agg = state_survey_pop_agg[state_survey_pop_agg.YEAR.isin(CPS_agg.YEAR.unique())].reset_index()
state_survey_pop_agg
CPS_agg["SURVEY_POP"] = state_survey_pop_agg.SURVEY_POP
CPS_agg
CPS_agg.to_csv("../data/processed/Dashboard_Data/CPS_STATE_Aggregate.csv")
#Isolating the specific northwest while
PNW = ["Washington", "Oregon", "Wyoming", "Montana", "Idaho"]
PNW_CPS_agg = CPS_agg[CPS_agg.STATE.isin(PNW)]
PNW_CPS_agg
PNW_CPS_agg.to_csv("../data/processed/Dashboard_Data/CPS_PNW_STATE_Aggregate.csv")
|
_____no_output_____
|
MIT
|
notebooks/Dashboard_Data.ipynb
|
Altaf410/An-Exploration-of-the-Unbanked-in-the-US
|
---------------------------------------------------------------------------------------------- Aggregatting ACS Data
|
#ACS_df = pd.read_csv("../data/processed/ACS_2011_2017_clean")
#ACS_df["STATE"] = ACS_df.STATEFIP.map(STATE)
ACS_df.head()
ACS_df.HHWT
ACS_df = ACS_df.drop(columns = ['Unnamed: 0'])
filtering_columns = ACS_df.columns
filtering_columns = filtering_columns.drop(["STATE", "YEAR", "SAMPLE", "REGION", 'STATEFIP'])
filtering_columns
pivot_df = ACS_df.copy(deep=True)
#using filter to generate multiple pivot tables for data vizualization
for _filter in filtering_columns:
pivot_df[f"{_filter}_COUNTS"] = pivot_df[_filter]
pivot_df_final = pivot_df[["YEAR", "REGION", "STATE", _filter, f"{_filter}_COUNTS"]].groupby(["YEAR", "REGION", "STATE", _filter]).count()
#display(pivot_df[["YEAR", "REGION", "STATE", _filter, f"{_filter}_COUNTS"]].groupby(["YEAR", "REGION", "STATE", _filter]).count())
#display(pivot_df_final)
pivot_df_final.to_csv(f"../data/processed/Dashboard_Data/{_filter}_ACS_AGG.csv")
ACS_df.groupby(["YEAR", "REGION", "STATE", "CINETHH"]).count()#.value_counts()
ACS_df.columns
|
_____no_output_____
|
MIT
|
notebooks/Dashboard_Data.ipynb
|
Altaf410/An-Exploration-of-the-Unbanked-in-the-US
|
* HHINCOME = House Hold Income* MARST = Marital Status* OCC2010 = Occupation* CINETHH = Access to Internet* CILAPTOP = Laptop, desktop, or notebook computer* CISMRTPHN = Smartphone* CITABLET = Tablet or other portable wireless computer* CIHAND = Handheld Computer* CIHISPEED = Broadband (high speed) Internet service such as cable, fiber optic, or DSL service* CISAT = Satellite internet service* CIDIAL = Dial-up Service* CIOTHSVC = Other Internet Service
|
ACS_agg = pd.DataFrame()
ACS_agg["STATE"] = ACS_df.STATE
ACS_agg["OCC2010"] = ACS_df.OCC2010
ACS_agg["CINETHH"] = ACS_df.CINETHH
ACS_agg["CILAPTOP"] = ACS_df.CILAPTOP
ACS_agg["CISMRTPHN"] = ACS_df.CISMRTPHN
ACS_agg["CITABLET"] = ACS_df.CITABLET
ACS_agg["CIHAND"] = ACS_df.CIHAND
ACS_agg["CIHISPEED"] = ACS_df.CIHISPEED
ACS_agg["CISAT"] = ACS_df.CISAT
ACS_agg["CIDIAL"] = ACS_df.CIDIAL
ACS_agg["CIOTHSVC"] = ACS_df.CIOTHSVC
ACS_agg["YEAR"] = ACS_df.YEAR
ACS_agg = ACS_agg.groupby(["STATE", "YEAR"]).count()
ACS_agg = ACS_agg.reset_index()
ACS_agg
ACS_agg.to_csv("../data/processed/Dashboard_Data/ACS_STATE_Aggregate.csv")
|
_____no_output_____
|
MIT
|
notebooks/Dashboard_Data.ipynb
|
Altaf410/An-Exploration-of-the-Unbanked-in-the-US
|
---------------------------------------------------------------------------------------------- Aggregating NFCS
|
NFCS_df.head()
NFCS_df.drop("Unnamed: 0", axis=1,inplace=True)
#declaring STATE list
STATES = ["Alabama","Alaska","Arizona","Arkansas","California","Colorado",
"Connecticut","Delaware","District of Columbia", "Florida","Georgia","Hawaii",
"Idaho","Illinois", "Indiana","Iowa","Kansas","Kentucky","Louisiana","Maine",
"Maryland","Massachusetts","Michigan","Minnesota","Mississippi","Missouri","Montana",
"Nebraska","Nevada","New Hampshire","New Jersey","New Mexico","New York",
"North Carolina","North Dakota","Ohio","Oklahoma","Oregon","Pennsylvania",
"Rhode Island","South Carolina","South Dakota","Tennessee","Texas","Utah",
"Vermont","Virginia","Washington","West Virginia","Wisconsin","Wyoming"]
#generating state:state_number dictionary
STATE_NFCS = list(NFCS_df.STATE.unique())
STATE_NFCS.sort()
STATE = {}
for state, name in zip(STATE_NFCS, STATES):
STATE[state] = name
NFCS_df.STATE = NFCS_df.STATE.map(STATE)
NFCS_df.STATE
NFCS_agg = NFCS_df.groupby(["STATE", "YEAR"]).count()
NFCS_agg
factors = list(NFCS_df.columns)
factors.remove("STATE")
factors.remove("YEAR")
#using filter to generate multiple pivot tables for data vizualization
pivot_df = NFCS_df.copy(deep=True)
for factor in factors:
pivot_df[f"{factor}_COUNTS"] = pivot_df[factor]
pivot_df_final = pivot_df[["YEAR", "STATE", factor, f"{factor}_COUNTS"]].groupby(["YEAR", "STATE", factor]).count()
#display(pivot_df[["YEAR", "REGION", "STATE", factor, f"{factor}_COUNTS"]].groupby(["YEAR", "REGION", "STATE", factor]).count())
display(pivot_df_final)
pivot_df_final.to_csv(f"../data/processed/Dashboard_Data/{factor}_NFCS_AGG.csv")
NFCS_agg.to_csv("../data/processed/Dashboard_Data/NFCS_STATE_Aggregate.csv")
|
_____no_output_____
|
MIT
|
notebooks/Dashboard_Data.ipynb
|
Altaf410/An-Exploration-of-the-Unbanked-in-the-US
|
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Configuration_**Setting up your Azure Machine Learning services workspace and configuring your notebook library**_------ Table of Contents1. [Introduction](Introduction) 1. What is an Azure Machine Learning workspace1. [Setup](Setup) 1. Azure subscription 1. Azure ML SDK and other library installation 1. Azure Container Instance registration1. [Configure your Azure ML Workspace](Configure%20your%20Azure%20ML%20workspace) 1. Workspace parameters 1. Access your workspace 1. Create a new workspace 1. Create compute resources1. [Next steps](Next%20steps)--- IntroductionThis notebook configures your library of notebooks to connect to an Azure Machine Learning (ML) workspace. In this case, a library contains all of the notebooks in the current folder and any nested folders. You can configure this notebook library to use an existing workspace or create a new workspace.Typically you will need to run this notebook only once per notebook library as all other notebooks will use connection information that is written here. If you want to redirect your notebook library to work with a different workspace, then you should re-run this notebook.In this notebook you will* Learn about getting an Azure subscription* Specify your workspace parameters* Access or create your workspace* Add a default compute cluster for your workspace What is an Azure Machine Learning workspaceAn Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models. SetupThis section describes activities required before you can access any Azure ML services functionality. 1. Azure SubscriptionIn order to create an Azure ML Workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com). Later in this notebook you will need information such as your subscription ID in order to create and access AML workspaces. 2. Azure ML SDK and other library installationIf you are running in your own environment, follow [SDK installation instructions](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment). If you are running in Azure Notebooks or another Microsoft managed environment, the SDK is already installed.Also install following libraries to your environment. Many of the example notebooks depend on them```(myenv) $ conda install -y matplotlib tqdm scikit-learn```Once installation is complete, the following cell checks the Azure ML SDK version:
|
import azureml.core
print("This notebook was created using version 1.0.74.1 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
|
_____no_output_____
|
MIT
|
aml/configuration.ipynb
|
kawo123/azure-e2e-ml
|
Configure your Azure ML workspace Workspace parametersTo use an AML Workspace, you will need to import the Azure ML SDK and supply the following information:* Your subscription id* A resource group name* (optional) The region that will host your workspace* A name for your workspaceYou can get your subscription ID from the [Azure portal](https://portal.azure.com).You will also need access to a [_resource group_](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overviewresource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.The region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.The name for your workspace is unique within the subscription and should be descriptive enough to discern among other AML Workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.The following cell allows you to specify your workspace parameters. This cell uses the python method `os.getenv` to read values from environment variables which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values. If you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.Replace the default values in the cell below with your workspace parameters
|
import os
subscription_id = os.getenv("SUBSCRIPTION_ID", default="<my-subscription-id>")
resource_group = os.getenv("RESOURCE_GROUP", default="<my-resource-group>")
workspace_name = os.getenv("WORKSPACE_NAME", default="<my-workspace-name>")
workspace_region = os.getenv("WORKSPACE_REGION", default="eastus2")
|
_____no_output_____
|
MIT
|
aml/configuration.ipynb
|
kawo123/azure-e2e-ml
|
Access your workspaceThe following cell uses the Azure ML SDK to attempt to load the workspace specified by your parameters. If this cell succeeds, your notebook library will be configured to access the workspace from all notebooks using the `Workspace.from_config()` method. The cell can fail if the specified workspace doesn't exist or you don't have permissions to access it.
|
from azureml.core import Workspace
try:
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
print("Workspace configuration succeeded. Skip the workspace creation steps below")
except:
print("Workspace not accessible. Change your parameters or create a new workspace below")
|
_____no_output_____
|
MIT
|
aml/configuration.ipynb
|
kawo123/azure-e2e-ml
|
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Given an array of (unix_timestamp, num_people, EventType.ENTER or EventType.EXIT), find the busiest period.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Can we assume the input array is valid? * Check for None* Can we assume the elements of the input array are valid? * Yes* Is the input sorted by time? * No* Can you have enter and exit elements for the same timestamp? * Yes you can, order of enter and exit is not guaranteed* Could we have multiple enter events (or multiple exit events) for the same timestamp? * No* What is the format of the output? * An array of timestamps [t1, t2]* Can we assume the starting number of people is zero? * Yes* Can we assume the inputs are valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> TypeError* [] -> None* General casetimestamp num_people event_type1 2 EventType.ENTER3 1 EventType.ENTER3 2 EventType.EXIT7 3 EventType.ENTER8 2 EventType.EXIT9 2 EventType.EXITresult = Period(7, 8) AlgorithmRefer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
|
from enum import Enum
class Data(object):
def __init__(self, timestamp, num_people, event_type):
self.timestamp = timestamp
self.num_people = num_people
self.event_type = event_type
def __lt__(self, other):
return self.timestamp < other.timestamp
class Period(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start == other.start and self.end == other.end
def __repr__(self):
return str(self.start) + ', ' + str(self.end)
class EventType(Enum):
ENTER = 0
EXIT = 1
class Solution(object):
def find_busiest_period(self, data):
# TODO: Implement me
pass
|
_____no_output_____
|
Apache-2.0
|
online_judges/busiest_period/busiest_period_challenge.ipynb
|
benkeesey/interactive-coding-challenges
|
Unit Test **The following unit test is expected to fail until you solve the challenge.**
|
# %load test_find_busiest_period.py
import unittest
class TestSolution(unittest.TestCase):
def test_find_busiest_period(self):
solution = Solution()
self.assertRaises(TypeError, solution.find_busiest_period, None)
self.assertEqual(solution.find_busiest_period([]), None)
data = [
Data(3, 2, EventType.EXIT),
Data(1, 2, EventType.ENTER),
Data(3, 1, EventType.ENTER),
Data(7, 3, EventType.ENTER),
Data(9, 2, EventType.EXIT),
Data(8, 2, EventType.EXIT),
]
self.assertEqual(solution.find_busiest_period(data), Period(7, 8))
print('Success: test_find_busiest_period')
def main():
test = TestSolution()
test.test_find_busiest_period()
if __name__ == '__main__':
main()
|
_____no_output_____
|
Apache-2.0
|
online_judges/busiest_period/busiest_period_challenge.ipynb
|
benkeesey/interactive-coding-challenges
|
$ \newcommand{\bra}[1]{\langle 1|} $$ \newcommand{\ket}[1]{|1\rangle} $$ \newcommand{\braket}[2]{\langle 1|2\rangle} $$ \newcommand{\dot}[2]{ 1 \cdot 2} $$ \newcommand{\biginner}[2]{\left\langle 1,2\right\rangle} $$ \newcommand{\mymatrix}[2]{\left( \begin{array}{1} 2\end{array} \right)} $$ \newcommand{\myvector}[1]{\mymatrix{c}{1}} $$ \newcommand{\myrvector}[1]{\mymatrix{r}{1}} $$ \newcommand{\mypar}[1]{\left( 1 \right)} $$ \newcommand{\mybigpar}[1]{ \Big( 1 \Big)} $$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $$ \newcommand{\onehalf}{\frac{1}{2}} $$ \newcommand{\donehalf}{\dfrac{1}{2}} $$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $$ \newcommand{\vzero}{\myvector{1\\0}} $$ \newcommand{\vone}{\myvector{0\\1}} $$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $$ \newcommand{\myarray}[2]{ \begin{array}{1}2\end{array}} $$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $$ \newcommand{\norm}[1]{ \left\lVert 1 \right\rVert } $$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} 1 \mspace{-1.5mu} \rfloor } $$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}1}}} $$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}1}}} $$ \newcommand{\redbit}[1] {\mathbf{{\color{red}1}}} $$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}1}}} $$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}1}}} $ Probabilistic States _prepared by Abuzer Yakaryilmaz_[](https://youtu.be/tJjrF7WgT1g) Suppose that Asja tosses a fair coin secretly.As we do not see the result, our information about the outcome will be probabilistic:$\rightarrow$ The outcome is heads with probability $0.5$ and the outcome will be tails with probability $0.5$.If the coin has a bias $ \dfrac{Pr(Head)}{Pr(Tail)} = \dfrac{3}{1}$, then our information about the outcome will be as follows:$\rightarrow$ The outcome will be heads with probability $ 0.75 $ and the outcome will be tails with probability $ 0.25 $. Explanation: The probability of getting heads is three times of the probability of getting tails. The total probability is 1. We divide the whole probability 1 into four parts (three parts are for heads and one part is for tail), one part is $ \dfrac{1}{4} = 0.25$, and then give three parts for heads ($0.75$) and one part for tails ($0.25$). Listing probabilities as a column We have two different outcomes: heads (0) and tails (1).We use a column of size 2 to show the probabilities of getting heads and getting tails.For the fair coin, our information after the coin-flip will be $ \myvector{0.5 \\ 0.5} $. For the biased coin, it will be $ \myvector{0.75 \\ 0.25} $.The first entry shows the probability of getting heads, and the second entry shows the probability of getting tails. $ \myvector{0.5 \\ 0.5} $ and $ \myvector{0.75 \\ 0.25} $ are two examples of 2-dimensional (column) vectors. Task 1 Suppose that Balvis secretly flips a coin having the bias $ \dfrac{Pr(Heads)}{Pr(Tails)} = \dfrac{1}{4}$.Represent your information about the outcome as a column vector. Task 2 Suppose that Fyodor secretly rolls a loaded (tricky) dice with the bias $$ Pr(1):Pr(2):Pr(3):Pr(4):Pr(5):Pr(6) = 7:5:4:2:6:1 . $$Represent your information about the result as a column vector. Remark that the size of your column vector should be 6.You may use python for your calculations.
|
#
# your code is here
#
|
_____no_output_____
|
Apache-2.0
|
classical-systems/CS16_Probabilistic_States.ipynb
|
dev-aditya/QWorld_Summer_School_2021
|
click for our solution Vector representation Suppose that we have a system with 4 distiguishable states: $ s_1 $, $s_2 $, $s_3$, and $s_4$. We expect the system to be in one of them at any moment. By speaking with probabilities, we say that the system is in one of the states with probability 1, and in any other state with probability 0. By using our column representation, we can show each state as a column vector (by using the vectors in standard basis of $ \mathbb{R}^4 $):$ e_1 = \myvector{1\\ 0 \\ 0 \\ 0}, e_2 = \myvector{0 \\ 1 \\ 0 \\ 0}, e_3 = \myvector{0 \\ 0 \\ 1 \\ 0}, \mbox{ and } e_4 = \myvector{0 \\ 0 \\ 0 \\ 1}.$ This representation helps us to represent our information on a system when it is in more than one state with certain probabilities. Remember the case in which the coins are tossed secretly. For example, suppose that the system is in states $ s_1 $, $ s_2 $, $ s_3 $, and $ s_4 $ with probabilities $ 0.20 $, $ 0.25 $, $ 0.40 $, and $ 0.15 $, respectively. (The total probability must be 1, i.e., $ 0.20+0.25+0.40+0.15 = 1.00 $)Then, we can say that the system is in the following probabilistic state:$ 0.20 \cdot e_1 + 0.25 \cdot e2 + 0.40 \cdot e_3 + 0.15 \cdot e4 $$ = 0.20 \cdot \myvector{1\\ 0 \\ 0 \\ 0} + 0.25 \cdot \myvector{0\\ 1 \\ 0 \\ 0} + 0.40 \cdot \myvector{0\\ 0 \\ 1 \\ 0} + 0.15 \cdot \myvector{0\\ 0 \\ 0 \\ 1} $$ = \myvector{0.20\\ 0 \\ 0 \\ 0} + \myvector{0\\ 0.25 \\ 0 \\ 0} + \myvector{0\\ 0 \\0.40 \\ 0} + \myvector{0\\ 0 \\ 0 \\ 0.15 } = \myvector{ 0.20 \\ 0.25 \\ 0.40 \\ 0.15 }, $where the summation of entries must be 1. Probabilistic state A probabilistic state is a linear combination of the vectors in the standard basis. Here coefficients (scalars) must satisfy certain properties: Each coefficient is non-negative The summation of coefficients is 1 Alternatively, we can say that a probabilistic state is a probability distribution over deterministic states.We can show all information as a single mathematical object, which is called as a stochastic vector. Remark that the state of any linear system is a linear combination of the vectors in the basis. Task 3 For a system with 4 states, randomly create a probabilistic state, and print its entries, e.g., $ 0.16~~0.17~~0.02~~0.65 $.Hint: You may pick your random numbers between 0 and 100 (or 1000), and then normalize each value by dividing the summation of all numbers.
|
#
# your solution is here
#
|
_____no_output_____
|
Apache-2.0
|
classical-systems/CS16_Probabilistic_States.ipynb
|
dev-aditya/QWorld_Summer_School_2021
|
click for our solution Task 4 [extra] As given in the hint for Task 3, you may pick your random numbers between 0 and $ 10^k $. For better precision, you may take bigger values of $ k $.Write a function that randomly creates a probabilisitic state of size $ n $ with a precision up to $ k $ digits. Test your function.
|
#
# your solution is here
#
|
_____no_output_____
|
Apache-2.0
|
classical-systems/CS16_Probabilistic_States.ipynb
|
dev-aditya/QWorld_Summer_School_2021
|
Sentiment Analysis: Data Gathering 1 (Vader)The original sentiments of domain dataset are unclean, especially for the neutral sentiment. Instead of manually going through and correcting sentiments by hand certain techniques are employed to assist this process. This notebook implements the first data annotation pipeline for the sentiment analysis task, which utilizes NLTK's VADER sentiment classifier in order to quickly get a different baseline sentiment to compare with the original. This process has been performed iteratively by manually inspecting the results and modiying VADER's internal library, which contains pre-defined weights towards certain sentiments. Data used here are texts that have been cleaned from stopwords (see ma_eda_all.ipynb)since certain words / phrases affect the results negatively, e.g. "kind regards", "good day", etc.Data used is also the normalized version in order to better target certain words and update the weights within VADER's vocabulary since some words, e.g. "worn", "hole", etc., are considered more negative in this domain as opposed to what VADER would classify it normally. Notes* Data: feedback_39k* Texts have been removed from certain stopwords that might skew the results of VADER* Using normalized words to better target words* Tuned by updating vocabulary of VADER Goal* Add additional column for VADER sentiments pos/neu/neg Results* Passable results to help with manual tasks* Very different sentiment distributions than original sentiments* Not good if too few words
|
import nltk
nltk.download('vader_lexicon')
nltk.download('punkt')
import re
import pandas as pd
import seaborn as sns; sns.set()
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sns.set(style='white', context='notebook', palette='deep')
from google.colab import drive
drive.mount('/content/drive')
PROJECT_PATH = '/content/drive/MyDrive/Colab/data/ma_data/'
DATA = PROJECT_PATH + 'feedback_all_normalized.csv'
DATA_EXPORT = PROJECT_PATH + 'feedback_all_vader_1.csv'
sia = SentimentIntensityAnalyzer()
print(sia.lexicon)
domain_words = {"bruise": -3.0, "pity": -3.0, "thanks": 0.0, "glue": -2.0, "shortcoming": -3.0, "break": -3.0, "inflamed": -2.0, "reminder": -1.0, "reliable": 3.0, "uncomplicated": 2.0, "fast": 2.0, "kindly": 0.0, "confuse": -2.0, "blister": -3.0, "flaw": -3.0, "stain": -3.0, "complain": -2.0, "dissolve": -3.0, "apalled": -4.0, "discolor": -3.0, "spot": -2.0, "big": -1.5, "small": -1.5, "broken": -3.0, "worn": -3.0, "torn": -3.0, "hole": -3.0, "dirt": -3.0}
sia.lexicon.update(domain_words)
df_raw = pd.read_csv(DATA)
df_raw[6:11]
df = df_raw.copy()
%%time
pos_treshold = 0.8
neg_treshold = -0.25
df['vader'] = df['normalized_with_stopwords'].apply(lambda x: 'POSITIVE' if sia.polarity_scores(str(x))['compound'] >= pos_treshold
else ('NEGATIVE' if sia.polarity_scores(str(x))['compound'] <= neg_treshold
else 'NONE'))
df['vader score'] = df['normalized_with_stopwords'].apply(lambda x: sia.polarity_scores(str(x))['compound'])
df.iloc[idx, 8]
# Original sentiment distribution
df["sentiment"].value_counts(normalize=True)
# Vader initial predictions
df["vader"].value_counts(normalize=True)
# No including stopwords
df["vader"].value_counts(normalize=True)
# With more stopwords v2
df["vader"].value_counts(normalize=True)
# With more stopwords v3
df["vader"].value_counts(normalize=True)
test_sia = "material error on the belt loop leather color flake off"
sia.polarity_scores(test_sia)
df_export = df[["feedback_text_en", "sentiment", "vader", "vader score", "delivery", "feedback_return", "product", "monetary", "one_hot_labels", "feedback_normalized", "normalized_with_stopwords"]]
df_export.to_csv(DATA_EXPORT)
|
_____no_output_____
|
MIT
|
notebooks/ma02_data_sa_vader.ipynb
|
CouchCat/ma-zdash-nlp
|
Linear independence
|
import numpy as np
from sympy.solvers import solve
from sympy import Symbol
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
|
_____no_output_____
|
MIT
|
juypter/notebooks/linear-algebra/3_linear_independence.ipynb
|
JamesMcGuigan/ecosystem-research
|
The set of vectors are called linearly independent because each of the vectors in the set {V0, V1, …, Vn−1} cannot be written as a combination of the others in the set. Linear Independent Arrays
|
A = np.array([1,1,1])
B = np.array([0,1,1])
C = np.array([0,0,1])
Z = np.array([0,0,0])
np.array_equal(
Z,
0*A + 0*B + 0*C
)
solve(x*A + y*B + z*C)
|
_____no_output_____
|
MIT
|
juypter/notebooks/linear-algebra/3_linear_independence.ipynb
|
JamesMcGuigan/ecosystem-research
|
Linear Dependent Arrays
|
A = np.array([1,1,1])
B = np.array([0,0,1])
C = np.array([1,1,0])
1*A + -1*B + -1*C
solve(x*A + y*B + z*C)
A = np.array([1,2,3])
B = np.array([1,-4,-4])
C = np.array([3,0,2])
2*A + 1*B + -C
solve(x*A + y*B + z*C)
|
_____no_output_____
|
MIT
|
juypter/notebooks/linear-algebra/3_linear_independence.ipynb
|
JamesMcGuigan/ecosystem-research
|
Datasets and Neural NetworksThis notebook will step through the process of loading an arbitrary dataset in PyTorch, and creating a simple neural network for regression. DatasetsWe will first work through loading an arbitrary dataset in PyTorch. For this project, we chose the delve abalone dataset. First, download and unzip the dataset from the link above, then unzip `Dataset.data.gz` and move `Dataset.data` into `hackpack-ml/models/data`.We are given the following attribute information in the spec:```Attributes: 1 sex u M F I Gender or Infant (I) 2 length u (0,Inf] Longest shell measurement (mm) 3 diameter u (0,Inf] perpendicular to length (mm) 4 height u (0,Inf] with meat in shell (mm) 5 whole_weight u (0,Inf] whole abalone (gr) 6 shucked_weight u (0,Inf] weight of meat (gr) 7 viscera_weight u (0,Inf] gut weight (after bleeding) (gr) 8 shell_weight u (0,Inf] after being dried (gr) 9 rings u 0..29 +1.5 gives the age in years```
|
import math
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
import pandas as pd
from torch.utils.data import Dataset, DataLoader
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
Pandas is a data manipulation library that works really well with structured data. We can use Pandas DataFrames to load the dataset.
|
col_names = ['sex', 'length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight', 'rings']
abalone_df = pd.read_csv('../data/Dataset.data', sep=' ', names=col_names)
abalone_df.head(n=3)
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
We define a subclass of PyTorch Dataset for our Abalone dataset.
|
class AbaloneDataset(data.Dataset):
"""Abalone dataset. Provides quick iteration over rows of data."""
def __init__(self, csv):
"""
Args: csv (string): Path to the Abalone dataset.
"""
self.features = ['sex', 'length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight']
self.y = ['rings']
self.abalone_df = pd.read_csv(csv, sep=' ', names=(self.features + self.y))
# Turn categorical data into machine interpretable format (one hot)
self.abalone_df['sex'] = pd.get_dummies(self.abalone_df['sex'])
def __len__(self):
return len(self.abalone_df)
def __getitem__(self, idx):
"""Return (x,y) pair where x are abalone features and y is age."""
features = self.abalone_df.iloc[idx][self.features].values
y = self.abalone_df.iloc[idx][self.y]
return torch.Tensor(features).float(), torch.Tensor(y).float()
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
Neural NetworksThe task is to predict the age (number of rings) of abalone from physical measurements. We build a simple neural network with one hidden layer to model the regression.
|
class Net(nn.Module):
def __init__(self, feature_size):
super(Net, self).__init__()
# feature_size input channels (8), 1 output channels
self.fc1 = nn.Linear(feature_size, 4)
self.fc2 = nn.Linear(4, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
We instantiate an Abalone dataset instance and create DataLoaders for train and test sets.
|
dataset = AbaloneDataset('../data/Dataset.data')
train_split, test_split = math.floor(len(dataset) * 0.8), math.ceil(len(dataset) * 0.2)
trainset = [dataset[i] for i in range(train_split)]
testset = [dataset[train_split + j] for j in range(test_split)]
batch_sz = len(trainset) # Compact data allows for big batch size
trainloader = data.DataLoader(trainset, batch_size=batch_sz, shuffle=True, num_workers=4)
testloader = data.DataLoader(testset, batch_size=batch_sz, shuffle=False, num_workers=4)
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
Now, we can initialize our network and define train and test functions
|
net = Net(len(dataset.features))
loss_fn = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.1)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
gpu_ids = [0] # On Colab, we have access to one GPU. Change this value as you see fit
def train(epoch):
"""
Trains our net on data from the trainloader for a single epoch
"""
net.train()
with tqdm(total=len(trainloader.dataset)) as progress_bar:
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad() # Clear any stored gradients for new step
outputs = net(inputs.float())
loss = loss_fn(outputs, targets) # Calculate loss between prediction and label
loss.backward() # Backpropagate gradient updates through net based on loss
optimizer.step() # Update net weights based on gradients
progress_bar.set_postfix(loss=loss.item())
progress_bar.update(inputs.size(0))
def test(epoch):
"""
Run net in inference mode on test data.
"""
net.eval()
# Ensures the net will not update weights
with torch.no_grad():
with tqdm(total=len(testloader.dataset)) as progress_bar:
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device).float(), targets.to(device).float()
outputs = net(inputs)
loss = loss_fn(outputs, targets)
progress_bar.set_postfix(testloss=loss.item())
progress_bar.update(inputs.size(0))
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
Now that everything is prepared, it's time to train!
|
test_freq = 5 # Frequency to run model on validation data
for epoch in range(0, 200):
train(epoch)
if epoch % test_freq == 0:
test(epoch)
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
We use the network's eval mode to do a sample prediction to see how well it does.
|
net.eval()
sample = testset[0]
predicted_age = net(sample[0])
true_age = sample[1]
print(f'Input features: {sample[0]}')
print(f'Predicted age: {predicted_age.item()}, True age: {true_age[0]}')
|
_____no_output_____
|
MIT
|
models/dataset_nn/dataset_neural_nets.ipynb
|
TreeHacks/hackpack-ml
|
Optimization with equality constraints
|
import math
import numpy as np
from scipy import optimize as opt
|
_____no_output_____
|
MIT
|
00-pre-requisitos/2-math/otimização-II.ipynb
|
sn3fru/datascience_course
|
maximize $.4\,\log(x_1)+.6\,\log(x_2)$ s.t. $x_1+3\,x_2=50$.
|
I = 50
p = np.array([1, 3])
U = lambda x: (.4*math.log(x[0])+.6*math.log(x[1]))
x0 = (I/len(p))/np.array(p)
budget = ({'type': 'eq', 'fun': lambda x: I-np.sum(np.multiply(x, p))})
opt.minimize(lambda x: -U(x), x0, method='SLSQP', constraints=budget, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
def consumer(U, p, I):
budget = ({'type': 'eq', 'fun': lambda x: I-np.sum(np.multiply(x, p))})
x0 = (I/len(p))/np.array(p)
sol = opt.minimize(lambda x: -U(x), x0, method='SLSQP', constraints=budget, tol=1e-08,
options={'disp': False, 'ftol': 1e-08})
if sol.status == 0:
return {'x': sol.x, 'V': -sol.fun, 'MgU': -sol.jac, 'mult': -sol.jac[0]/p[0]}
else:
return 0
consumer(U, p, I)
delta=.01
(consumer(U, p, I+delta)['V']-consumer(U, p, I-delta)['V'])/(2*delta)
delta=.001
numerador = (consumer(U,p+np.array([delta, 0]), I)['V']-consumer(U,p+np.array([-delta, 0]), I)['V'])/(2*delta)
denominador = (consumer(U, p, I+delta)['V']-consumer(U, p, I-delta)['V'])/(2*delta)
-numerador/denominador
|
_____no_output_____
|
MIT
|
00-pre-requisitos/2-math/otimização-II.ipynb
|
sn3fru/datascience_course
|
Cost function
|
# Production function
F = lambda x: (x[0]**.8)*(x[1]**.2)
w = np.array([5, 4])
y = 1
constraint = ({'type': 'eq', 'fun': lambda x: y-F(x)})
x0 = np.array([.5, .5])
cost = opt.minimize(lambda x: w@x, x0, method='SLSQP', constraints=constraint, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
F(cost.x)
cost
|
_____no_output_____
|
MIT
|
00-pre-requisitos/2-math/otimização-II.ipynb
|
sn3fru/datascience_course
|
Exercise
|
a = 2
u = lambda c: -np.exp(-a*c)
R = 2
Z2 = np.array([.72, .92, 1.12, 1.32])
Z3 = np.array([.86, .96, 1.06, 1.16])
def U(x):
states = len(Z2)*len(Z3)
U = u(x[0])
for z2 in Z2:
for z3 in Z3:
U += (1/states)*u(x[1]*R+x[2]*z2+x[3]*z3)
return U
p = np.array([1, 1, .5, .5])
I = 4
# a=1
consumer(U, p, I)
# a=5
consumer(U, p, I)
# a=2
consumer(U, p, I)
import matplotlib.pyplot as plt
x = np.arange(0.0, 2.0, 0.01)
a = 2
u = lambda c: -np.exp(-a*c)
plt.plot(x, u(x))
a = -2
plt.plot(x, u(x))
|
_____no_output_____
|
MIT
|
00-pre-requisitos/2-math/otimização-II.ipynb
|
sn3fru/datascience_course
|
Optimization with inequality constraints
|
f = lambda x: -x[0]**3+x[1]**2-2*x[0]*(x[2]**2)
constraints =({'type': 'eq', 'fun': lambda x: 2*x[0]+x[1]**2+x[2]-5},
{'type': 'ineq', 'fun': lambda x: 5*x[0]**2-x[1]**2-x[2]-2})
constraints =({'type': 'eq', 'fun': lambda x: x[0]**3-x[1]})
x0 = np.array([.5, .5, 2])
opt.minimize(f, x0, method='SLSQP', constraints=constraints, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
|
Optimization terminated successfully. (Exit mode 0)
Current function value: -19.000000000000256
Iterations: 11
Function evaluations: 56
Gradient evaluations: 11
|
MIT
|
00-pre-requisitos/2-math/otimização-II.ipynb
|
sn3fru/datascience_course
|
Params:
|
aggregate_by_state = False
outcome_type = 'cases'
|
_____no_output_____
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
Basic Data Visualization
|
# Just something to quickly summarize the number of cases and distributions each day
# 'deaths' and 'cases' contain the time-series of the outbreak
df = load_data.load_county_level(data_dir = '../data/')
df = df.sort_values('#Deaths_3/30/2020', ascending=False)
# outcome_cases = load_data.outcome_cases # most recent day
# outcome_deaths = load_data.outcome_deaths
important_vars = load_data.important_keys(df)
very_important_vars = ['PopulationDensityperSqMile2010',
# 'MedicareEnrollment,AgedTot2017',
'PopulationEstimate2018',
'#ICU_beds',
'MedianAge2010',
'Smokers_Percentage',
'DiabetesPercentage',
'HeartDiseaseMortality',
'#Hospitals'
# 'PopMale60-642010',
# 'PopFmle60-642010',
# 'PopMale65-742010',
# 'PopFmle65-742010',
# 'PopMale75-842010',
# 'PopFmle75-842010',
# 'PopMale>842010',
# 'PopFmle>842010'
]
def sum_lists(list_of_lists):
arr = np.array(list(list_of_lists))
sum_arr = np.sum(arr,0)
return list(sum_arr)
if aggregate_by_state:
# Aggregate by State
state_deaths_df = df.groupby('StateNameAbbreviation').deaths.agg(sum_lists).to_frame()
state_cases_df = df.groupby('StateNameAbbreviation').cases.agg(sum_lists).to_frame()
df = pd.concat([state_cases_df,state_deaths_df],axis =1 )
# Distribution of the maximum number of cases
_cases = list(df['cases'])
max_cases = []
for i in range(len(df)):
max_cases.append(max(_cases[i]))
print('Number of counties with non-zero cases')
print(sum([v >0 for v in max_cases]))
# cases truncated below 20 and above 1000 for plot readability
plt.hist([v for v in max_cases if v > 20 and v < 1000],bins = 100)
sum(max_cases)
print(sum([v > 50 for v in max_cases]))
np.quantile(max_cases,.5)
# Distribution of the maximum number of cases
_deaths = list(df['deaths'])
max_deaths = []
for i in range(len(df)):
max_deaths.append(max(_deaths[i]))
print('Number of counties with non-zero deaths')
print(sum([v > 0 for v in max_deaths]))
# plt.hist(max_cases)
# print(sum([v >0 for v in max_cases]))
plt.hist([v for v in max_deaths if v > 5],bins=30)
sum(max_deaths)
max(max_deaths)
np.quantile(max_deaths,.7)
|
_____no_output_____
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
Clean data
|
# Remove counties with zero cases
max_cases = [max(v) for v in df['cases']]
df['max_cases'] = max_cases
max_deaths = [max(v) for v in df['deaths']]
df['max_deaths'] = max_deaths
df = df[df['max_cases'] > 0]
|
_____no_output_____
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
Predict data from model:
|
method_keys = []
# clear predictions
for m in method_keys:
del df[m]
# target_day = np.array([1])
# # Trains model on train_df and produces predictions for the final day for test_df and writes prediction
# # to a new column for test_df
# # fit_and_predict(df, method='exponential', outcome=outcome_type, mode='eval_mode',target_day=target_day)
# # fit_and_predict(df,method='shared_exponential', outcome=outcome_type, mode='eval_mode',target_day=target_day)
# # fit_and_predict(train_df, test_df,'shared_exponential', mode='eval_mode',demographic_vars=important_vars)
# # fit_and_predict(df,method='shared_exponential', outcome=outcome_type, mode='eval_mode',demographic_vars=very_important_vars,target_day=target_day)
# fit_and_predict(df, outcome=outcome_type, mode='eval_mode',demographic_vars=[],
# method='ensemble',target_day=target_day)
# fit_and_predict(df, outcome=outcome_type, mode='eval_mode',demographic_vars=[],
# method='ensemble',target_day=np.array([1,2,3]))
# # fit_and_predict(train_df, test_d f,method='exponential',mode='eval_mode',target_day = np.array([1,2]))
# # Finds the names of all the methods
# method_keys = [c for c in df if 'predicted' in c]
# method_keys
# for days_ahead in [1, 2, 3]:
# for method in ['exponential', 'shared_exponential', 'ensemble']:
# fit_and_predict(df, method=method, outcome=outcome_type, mode='eval_mode',target_day=np.array([days_ahead]))
# if method == 'shared_exponential':
# fit_and_predict(df,method='shared_exponential',
# outcome=outcome_type,
# mode='eval_mode',
# demographic_vars=very_important_vars,
# target_day=np.array([days_ahead]))
# method_keys = [c for c in df if 'predicted' in c]
# geo = ['countyFIPS', 'CountyNamew/StateAbbrev']
# method_keys = [c for c in df if 'predicted' in c]
# df_preds = df[method_keys + geo + ['deaths']]
# df_preds.to_pickle("multi_day_6.pkl")
|
_____no_output_____
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
Ensemble predictions
|
exponential = {'model_type':'exponential'}
shared_exponential = {'model_type':'shared_exponential'}
demographics = {'model_type':'shared_exponential', 'demographic_vars':very_important_vars}
linear = {'model_type':'linear'}
# import fit_and_predict
# for d in [1, 2, 3]:
# df = fit_and_predict.fit_and_predict_ensemble(df,
# target_day=np.array([d]),
# mode='eval_mode',
# outcome=outcome_type,
# output_key=f'predicted_{outcome_type}_ensemble_{d}'
# )
import fit_and_predict
for d in [1, 3, 5, 7]:
df = fit_and_predict.fit_and_predict_ensemble(df,
target_day=np.array(range(1, d+1)),
mode='eval_mode',
outcome=outcome_type,
methods=[exponential,
shared_exponential,
demographics,
linear
],
output_key=f'predicted_{outcome_type}_ensemble_{d}_with_exponential'
)
method_keys = [c for c in df if 'predicted' in c]
# df = fit_and_predict.fit_and_predict_ensemble(df)
method_keys
|
_____no_output_____
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
Evaluate and visualize models Compute MSE and log MSE on relevant cases
|
# TODO: add average rank as metric
# Computes the mse in log space and non-log space for all columns
def l1(arr1,arr2,norm=True):
"""
arr2 ground truth
arr1 predictions
"""
if norm:
sum_percent_dif = 0
for i in range(len(arr1)):
sum_percent_dif += np.abs(arr2[i]-arr1[i])/arr1[i]
return sum_percent_dif/len(arr1)
return sum([np.abs(a1-a2) for (a1,a2) in zip(arr1,arr2)])/len(arr1)
mse = sklearn.metrics.mean_squared_error
# Only evaluate points that exceed this number of deaths
# lower_threshold, upper_threshold = 10, 100000
lower_threshold, upper_threshold = 10, np.inf
# Log scaled
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1] + 1) for p in df[key][(outcome > lower_threshold)]] # * (outcome < upper_threshold)]]
print('Log scale MSE for '+key)
print(mse(np.log(outcome[(outcome > lower_threshold) * (outcome < upper_threshold)] + 1),preds))
# Log scaled
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1] + 1) for p in df[key][outcome > lower_threshold]]
print('Log scale l1 for '+key)
print(l1(np.log(outcome[outcome > lower_threshold] + 1),preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw MSE for '+key)
print(mse(outcome[outcome > lower_threshold],preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw l1 for '+key)
print(l1(outcome[outcome > lower_threshold],preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw l1 for '+key)
print(l1(outcome[outcome > lower_threshold],preds,norm=False))
|
Raw l1 for predicted_cases_ensemble_1
15.702192279696032
Raw l1 for predicted_cases_ensemble_3
56.27341453693248
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
Plot residuals
|
# TODO: Create bounds automatically, create a plot function and call it instead of copying code, figure out way
# to plot more than two things at once cleanly
# Creates residual plots log scaled and raw
# We only look at cases with number of deaths greater than 5
def method_name_to_pretty_name(key):
# TODO: hacky, fix
words = key.split('_')
words2 = []
for w in words:
if not w.isnumeric():
words2.append(w)
else:
num = w
model_name = ' '.join(words2[2:])
# model_name = 'model'
if num == '1':
model_name += ' predicting 1 day ahead'
else:
model_name += ' predicting ' +w+' days ahead'
return model_name
# Make log plots:
bounds = [1.5, 7]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1]) for p in df[key][outcome > 5]]
plt.scatter(np.log(outcome[outcome > 5]),preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
# Make log plots zoomed in for the counties that have a fewer number of deaths
bounds = [1.5, 4]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1]) for p in df[key][outcome > 5]]
plt.scatter(np.log(outcome[outcome > 5]),preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
# Make non-log plots zoomed in for the counties that have a fewer number of deaths# We set bounds
bounds = [10,400]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > 5]]
plt.scatter(outcome[outcome > 5],preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
|
_____no_output_____
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
Graph Visualizations
|
# Here we visualize predictions on a per county level.
# The blue lines are the true number of deaths, and the dots are our predictions for each model for those days.
def plot_prediction(row):
"""
Plots model predictions vs actual
row: dataframe row
window: autoregressive window size
"""
gold_key = outcome_type
for i,val in enumerate(row[gold_key]):
if val > 0:
start_point = i
break
# plt.plot(row[gold_key][start_point:], label=gold_key)
if len(row[gold_key][start_point:]) < 3:
return
sns.lineplot(list(range(len(row[gold_key][start_point:]))),row[gold_key][start_point:], label=gold_key)
for key in method_keys:
preds = row[key]
sns.scatterplot(list(range(len(row[gold_key][start_point:])))[-len(preds):],preds,label=method_name_to_pretty_name(key))
# plt.scatter(list(range(len(row[gold_key][start_point:])))[-len(preds):],preds,label=key)
# plt.legend()
# plt.show()
# sns.legend()
plt.title(row['CountyName']+' in '+row['StateNameAbbreviation'])
plt.ylabel(outcome_type)
plt.xlabel('Days since first death')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure(dpi=500)
plt.show()
# feature_vals = {
# 'PopulationDensityperSqMile2010' : 1.1525491065255939e-05,
# "MedicareEnrollment,AgedTot2017" : -2.119520577282583e-06,
# 'PopulationEstimate2018' : 2.8898343032154275e-07,
# '#ICU_beds' : -0.000647030727828718,
# 'MedianAge2010' : 0.05032666600339253,
# 'Smokers_Percentage' : -0.013410742818946319,
# 'DiabetesPercentage' : 0.04395318355581005,
# 'HeartDiseaseMortality' : 0.0015473771787186525,
# '#Hospitals': 0.019248102357644396,
# 'log(deaths)' : 0.8805209010821442,
# 'bias' : -1.871552103871495
# }
df = df.sort_values(by='max_deaths',ascending=False)
for i in range(len(df)):
row = df.iloc[i]
# If number of deaths greater than 10
if max(row['deaths']) > 10:
print(row['CountyName']+' in '+row['StateNameAbbreviation'])
plot_prediction(row)
for v in very_important_vars:
print(v+ ': '+str(row[v])) #+';\t contrib: '+ str(feature_vals[v]*float(row[v])))
print('\n')
|
_____no_output_____
|
MIT
|
modeling/basic_model_framework.ipynb
|
rahul263-stack/covid19-severity-prediction
|
0) Carregamento as bibliotecas
|
# Mostra múltiplos resultados em uma única saída:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython.display import Math
import pandas as pd
import numpy as np
import geopandas as gpd
import os
import pysal
from pyproj import CRS
from shapely.geometry import Point, MultiPoint, Polygon, mapping
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import pickle
|
C:\Users\Jorge\Anaconda3\lib\site-packages\pysal\explore\segregation\network\network.py:16: UserWarning: You need pandana and urbanaccess to work with segregation's network module
You can install them with `pip install urbanaccess pandana` or `conda install -c udst pandana urbanaccess`
"You need pandana and urbanaccess to work with segregation's network module\n"
C:\Users\Jorge\Anaconda3\lib\site-packages\pysal\model\spvcm\abstracts.py:10: UserWarning: The `dill` module is required to use the sqlite backend fully.
from .sqlite import head_to_sql, start_sql
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
1) Leitura dos Banco de Dados: **(a) Dados SIH 2019:**
|
df = pd.read_csv("NT02 - Bahia/SIH/sih_17-19.csv")
#pickle.dump(df, open('sih_2019', 'wb'))
#df = pickle.load(open('sih_2019','rb'))
df.info()
df.head()
df.rename(columns={'MES_CMPT':'Mes','DT_INTER':'DT_Inter','DT_SAIDA':'DT_Saida','MUNIC_RES':'Cod_Municipio_Res',
'MUNIC_MOV':'Cod_Municipio','DIAG_PRINC':'Diagnostico','PROC_REA':'Procedimento','COMPLEX':'Complexidade',
'QT_DIARIAS':'Quantidade Diarias'}, inplace=True)
df = df.astype({'Cod_Municipio_Res': 'str','Cod_Municipio':'str','DT_Inter':'str','DT_Saida':'str',
'Complexidade':'str','Procedimento':'str'})
df.info()
df['Complexidade'] = df['Complexidade'].replace(['2','3'],['Média','Alta'])
df.head()
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
* **Formatação para datas:**
|
from datetime import datetime
df['DT_Inter'] = df['DT_Inter'].apply(lambda x: pd.to_datetime(x, format = '%Y%m%d'))
df['DT_Saida'] = df['DT_Saida'].apply(lambda x: pd.to_datetime(x, format = '%Y%m%d'))
pickle.dump(df, open('sih', 'wb'))
df = pickle.load(open('sih','rb'))
df2 = df.drop_duplicates(subset ="N_AIH",keep = 'last')
len(df2) #Total de internações em hospitais baianos
len(df2[df2['Cod_Municipio_Res'].str.startswith('29')]) # Internações em hospitais baianos de indivíduos que moram na bahia
2550223/2579967
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
**(b) Shape municípios:**
|
mun = gpd.read_file("NT02 - Bahia/mun_br.shp")
mun = mun.to_crs(CRS("WGS84"));
mun.crs
mun.info()
mun.head()
mun.plot();
plt.show();
mun_ba = mun[mun['GEOCODIGO'].str.startswith('29')].copy()
mun_ba.head()
mun_ba[mun_ba['GEOCODIGO'].str.startswith('290160')]
mun_ba[mun_ba['NOME']=='Sítio do Quinto']
mun_ba[mun_ba['NOME']=='Antas']
mun_ba.plot();
plt.show();
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
**Adicionando a população de 2019 (IBGE):**
|
pop = gpd.read_file('NT02 - Bahia/IBGE - Estimativa popul 2019.shp')
pop.head()
mun_ba['Pop'] = 0
for i, row in mun_ba.iterrows():
mun_ba.loc[i,'Pop'] = pop[pop['Codigo']==row['GEOCODIGO']]['p_pop_2019'].values[0]
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
**Adicionando Casos até 24/04:**
|
casos = gpd.read_file('NT02 - Bahia/Evolução/data_shape_ba_mod(1).shp')
casos.info()
mun_ba['c20200424'] = 0
for i, row in mun_ba.iterrows():
mun_ba.loc[i,'c20200424'] = casos[casos['Codigo']==row['GEOCODIGO']]['2020-04-24'].values[0]
mun_ba['c20200424'] = mun_ba['c20200424'].fillna(0)
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
**Calculando prevalências (com base em 24/04):**
|
mun_ba['prev'] = (mun_ba['c20200424']/mun_ba['Pop'])*100000
mun_ba.sort_values(by='prev', ascending = False)
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
(2) Internações nos Hospitais BA **(a) Quantidade de indivíduos:**
|
mun_ba['Qtd_Tot'] = 0
mun_ba['Qtd_Fora'] = 0
mun_ba['Qtd_CplxM'] = 0
mun_ba['Qtd_CplxA'] = 0
mun_ba['Dia_Tot'] = 0
mun_ba['Dia_CplxM'] = 0
mun_ba['Dia_CplxA'] = 0
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
**Período de 01/07/2018 a 30/06/2019:**
|
from datetime import date
per = pd.date_range(date(2018,7,1), periods=365).tolist()
per[0]
per[-1]
# Entraram em alguma data até 30/06/2019 e saíram entre 01/07/2018 até 30/06/2019
df_BA = df2[(df2['DT_Inter'] <= per[-1]) & (df2['DT_Saida'] >= per[0]) & (df2['DT_Saida'] <= per[-1])]
#df_BA = df2[(df2['Cod_Municipio'].str.startswith('29')) & (df2['Cod_Municipio_Res'].str.startswith('29'))].copy()
df_BA.head()
for i, row in mun_ba.iterrows():
mun_ba.loc[i,'Qtd_Tot'] = len(df_BA[df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]])
mun_ba.loc[i,'Qtd_Fora'] = len(df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) & (df2['Cod_Municipio_Res']!=row['GEOCODIGO'][:-1])])
mun_ba.loc[i,'Qtd_CplxM'] = len(df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Média')])
mun_ba.loc[i,'Qtd_CplxA'] = len(df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Alta')])
mun_ba.loc[i,'Dia_Tot'] = df_BA[df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]]['Quantidade Diarias'].sum()
mun_ba.loc[i,'Dia_CplxM'] = df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Média')]['Quantidade Diarias'].sum()
mun_ba.loc[i,'Dia_CplxA'] = df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Alta')]['Quantidade Diarias'].sum()
fig, ax = plt.subplots(figsize=(15,15));
mun_ba.plot(ax = ax, column = 'Qtd_Tot');
mun_ba.to_file('NT02 - Bahia/intern_ba.shp')
mun_ba = gpd.read_file('NT02 - Bahia/intern_ba.shp')
|
_____no_output_____
|
MIT
|
NT02-Bahia (NRS Sul).ipynb
|
pedreirajr/GeoCombatCOVID19
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.