hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
4a6bc0435d6634f68084df6a680e94538eddd631
11,016
ipynb
Jupyter Notebook
5_run_ldsc.ipynb
ch6845/regional_heritability_analysis
215c04748e41b85c8533f6d5baa505d3712b20e7
[ "MIT" ]
1
2020-07-15T12:40:55.000Z
2020-07-15T12:40:55.000Z
5_run_ldsc.ipynb
ch6845/regional_heritability_analysis
215c04748e41b85c8533f6d5baa505d3712b20e7
[ "MIT" ]
null
null
null
5_run_ldsc.ipynb
ch6845/regional_heritability_analysis
215c04748e41b85c8533f6d5baa505d3712b20e7
[ "MIT" ]
null
null
null
41.885932
1,708
0.598402
[ [ [ "import sys\nimport os\nimport glob\nimport subprocess as sp\nimport multiprocessing as mp\nimport pandas as pd\nimport numpy as np\n\nfrom basic_tools import *\n\ndebug=False", "_____no_output_____" ], [ "def run_ldsc(pheno_code,ld,output,mode='original',samp_prev=np.nan,pop_prev=np.nan):\n if os.path.exists(ldsc_path.format(pheno_code)+'.log'):\n print(\"Congratulations!. ldsc result of\",pheno_code,\"exists. passed.\")\n return\n if mode=='original':\n script=['ldsc.py','--h2',sumstats_path.format(pheno_code)+'.sumstats.gz', \n '--ref-ld-chr',ld_path.format(ld,''),\n '--w-ld-chr',wld_path,\n '--out',ldsc_path.format(output)]\n elif mode=='my':\n script=['ldsc_my.py','--h2',sumstats_path.format(pheno_code)+'.sumstats.gz', \n '--ref-ld-chr',ld_path.format(ld,''),\n '--w-ld-chr',wld_path,\n '--out',ldsc_path.format(output)] \n else:\n print(\"run_ldsc mode Error!!!!!!!\")\n \n if np.isnan(samp_prev)==False and np.isnan(pop_prev)==False:\n script+=['--samp-prev',str(samp_prev),'--pop-prev',str(pop_prev)]\n \n print('Started:',' '.join(script))\n sp.call(script) \n print('Finished:',' '.join(script))", "_____no_output_____" ], [ "def run_ldsc_wrapper(prefix,scale,pheno_code,samp_prev=np.nan,pop_prev=np.nan):\n run_ldsc(pheno_code,prefix,'{}.{}'.format(prefix,pheno_code),mode='original' if mode=='uni' else 'my',samp_prev=samp_prev,pop_prev=pop_prev)\n ", "_____no_output_____" ], [ "sys.argv#uni 0 20 x x", "_____no_output_____" ], [ "mode=sys.argv[1]\nscale=int(sys.argv[2])\ncores=int(sys.argv[3])\nstart=int(sys.argv[4])\nend=int(sys.argv[5])\n\nif mode=='uni':\n prefix=mode\nelse:\n prefix=mode+str(scale)", "_____no_output_____" ], [ "#start,end,prefix=0,1000,'bp300'", "_____no_output_____" ], [ "phenotypes_uni_filtered['prevalence']=phenotypes_uni_filtered['n_cases']/phenotypes_uni_filtered['n_non_missing']", "_____no_output_____" ], [ "phenotypes_uni_filtered.shape", "_____no_output_____" ], [ "pheno_code_list_todo=[]\n\nfor idx,row in phenotypes_uni_filtered.iloc[start:end].iterrows():\n if os.path.exists(ldsc_path.format('{}.{}'.format(prefix,idx))+'.log'):\n #print(ldsc_path.format('{}.{}'.format(prefix,idx))+'.log','exists')\n continue\n print(idx,end=' ')\n pheno_code_list_todo.append((idx,row['prevalence']))\n", "_____no_output_____" ], [ "\"\"\"\nphenotypes_filtered['prevalence']=phenotypes_filtered['n_cases']/phenotypes_filtered['n_non_missing']\n\nphenotypes_filtered.shape\n\npheno_code_list_todo=[]\nfor idx,row in phenotypes_filtered.iloc[start:end].iterrows():\n if os.path.exists(ldsc_path.format('{}.{}'.format(prefix,idx))+'.log'):\n continue\n print(idx,end=' ')\n pheno_code_list_todo.append((idx,row['prevalence']))\n\"\"\"", "_____no_output_____" ] ], [ [ "```\njupyter nbconvert 5_run_ldsc.ipynb --to script\n\nexport SCREENDIR=$HOME/.screen\n\nstart=0;end=600;mode=uni\npython 5_run_ldsc.py $mode 0 10 $start $end \n\nstart=0;end=600;mode=bp\npython 5_run_ldsc.py $mode 300 10 $start $end && python 5_run_ldsc.py $mode 128 10 $start $end && python 5_run_ldsc.py $mode 64 5 $start $end && python 5_run_ldsc.py $mode 32 5 $start $end && python 5_run_ldsc.py $mode 16 5 $start $end && python 5_run_ldsc.py $mode 8 2 $start $end\n```", "_____no_output_____" ] ], [ [ "#pool = mp.Pool(processes=15)\n#pool.starmap(run_ldsc_wrapper,[(mode,scale,pheno_code,prevelence,prevelence) for (pheno_code,prevelence) in pheno_code_list_todo])", "_____no_output_____" ], [ "pool = mp.Pool(processes=cores)\n#pool.starmap(run_ldsc_wrapper,[(mode,scale,pheno_code) for pheno_code in pheno_code_list_todo])\npool.starmap(run_ldsc_wrapper,[(prefix,scale,pheno_code,prevelence,prevelence) for (pheno_code,prevelence) in pheno_code_list_todo])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6bde7296dd1e59de62d4c0425b4ad0786a08e5
1,970
ipynb
Jupyter Notebook
student-admissions/.ipynb_checkpoints/StudentAdmissionsSolutions-checkpoint.ipynb
schintalapudi/deep-learning
2b631ad4fbbc33a0f84a9b5ffb483f20c1eac2a7
[ "MIT" ]
null
null
null
student-admissions/.ipynb_checkpoints/StudentAdmissionsSolutions-checkpoint.ipynb
schintalapudi/deep-learning
2b631ad4fbbc33a0f84a9b5ffb483f20c1eac2a7
[ "MIT" ]
null
null
null
student-admissions/.ipynb_checkpoints/StudentAdmissionsSolutions-checkpoint.ipynb
schintalapudi/deep-learning
2b631ad4fbbc33a0f84a9b5ffb483f20c1eac2a7
[ "MIT" ]
null
null
null
19.50495
94
0.522843
[ [ [ "# Solutions", "_____no_output_____" ], [ "### One-hot encoding the rank", "_____no_output_____" ] ], [ [ "# Make dummy variables for rank\none_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)\n\n# Drop the previous rank column\none_hot_data = one_hot_data.drop('rank', axis=1)\n\n# Print the first 10 rows of our data\none_hot_data[:10]", "_____no_output_____" ] ], [ [ "### Scaling the data", "_____no_output_____" ] ], [ [ "# Copying our data\nprocessed_data = one_hot_data[:]\n\n# Scaling the columns\nprocessed_data['gre'] = processed_data['gre']/800\nprocessed_data['gpa'] = processed_data['gpa']/4.0\nprocessed_data[:10]", "_____no_output_____" ] ], [ [ "### Backpropagating the data", "_____no_output_____" ] ], [ [ "def error_term_formula(y, output):\n return (y-output) * output * (1 - output)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6be10a79131995c5c9fb0bb763f9703fd59b70
11,766
ipynb
Jupyter Notebook
demo.ipynb
EthanCarragher/anesthetic
b577d4ca415292e8875e2afc3a9a97d6b1a4b931
[ "MIT" ]
34
2019-05-14T07:23:29.000Z
2022-03-01T21:49:16.000Z
demo.ipynb
EthanCarragher/anesthetic
b577d4ca415292e8875e2afc3a9a97d6b1a4b931
[ "MIT" ]
184
2019-04-17T08:58:01.000Z
2022-02-16T23:56:02.000Z
demo.ipynb
EthanCarragher/anesthetic
b577d4ca415292e8875e2afc3a9a97d6b1a4b931
[ "MIT" ]
15
2019-05-07T12:13:19.000Z
2022-03-16T22:05:28.000Z
23.48503
134
0.553459
[ [ [ "# anesthetic plot gallery\n This functions as both some examples of plots that can be produced, and a tutorial.\n Any difficulties/issues/requests should be posted as a [GitHub issue](https://github.com/williamjameshandley/anesthetic/issues)", "_____no_output_____" ], [ "## Download example data\n Download some example data from github (or alternatively use your own chains files)\n\n This downloads the PLA chains for the planck baseline cosmology,\n and the equivalent nested sampling chains:", "_____no_output_____" ] ], [ [ "import requests\nimport tarfile\n\nfor filename in [\"plikHM_TTTEEE_lowl_lowE_lensing.tar.gz\",\"plikHM_TTTEEE_lowl_lowE_lensing_NS.tar.gz\"]:\n github_url = \"https://github.com/williamjameshandley/cosmo_example/raw/master/\"\n url = github_url + filename\n open(filename, 'wb').write(requests.get(url).content)\n tarfile.open(filename).extractall()", "_____no_output_____" ] ], [ [ "## Marginalised posterior plotting\n Import anesthetic and load the MCMC samples:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom anesthetic import MCMCSamples, make_2d_axes\nmcmc_root = 'plikHM_TTTEEE_lowl_lowE_lensing/base_plikHM_TTTEEE_lowl_lowE_lensing'\nmcmc = MCMCSamples(root=mcmc_root)", "_____no_output_____" ] ], [ [ "We have plotting tools for 1D plots ...", "_____no_output_____" ] ], [ [ "fig, axes = mcmc.plot_1d('omegabh2') ;", "_____no_output_____" ] ], [ [ "... multiple 1D plots ...", "_____no_output_____" ] ], [ [ "fig, axes = mcmc.plot_1d(['omegabh2','omegach2','H0','tau','logA','ns']);\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "... triangle plots ...", "_____no_output_____" ] ], [ [ "mcmc.plot_2d(['omegabh2','omegach2','H0'], types={'lower':'kde','diagonal':'kde'});", "_____no_output_____" ] ], [ [ "... triangle plots (with the equivalent scatter plot filling up the left hand side) ...", "_____no_output_____" ] ], [ [ "mcmc.plot_2d(['omegabh2','omegach2','H0']);", "_____no_output_____" ] ], [ [ "... and rectangle plots.", "_____no_output_____" ] ], [ [ "mcmc.plot_2d([['omegabh2','omegach2','H0'], ['logA', 'ns']]);", "_____no_output_____" ] ], [ [ "Rectangle plots are pretty flexible with what they can do:", "_____no_output_____" ] ], [ [ "mcmc.plot_2d([['omegabh2','omegach2','H0'], ['H0','omegach2']]);", "_____no_output_____" ] ], [ [ "## Changing the appearance\n\nAnesthetic tries to follow matplotlib conventions as much as possible, so \nmost changes to the appearance should be relatively straight forward. \nHere are some examples:", "_____no_output_____" ], [ "* **figure size**:", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(5, 5))\nfig, axes = make_2d_axes(['omegabh2', 'omegach2', 'H0'], fig=fig, tex=mcmc.tex)\nmcmc.plot_2d(axes);", "_____no_output_____" ] ], [ [ "* **legends**:", "_____no_output_____" ] ], [ [ "fig, axes = make_2d_axes(['omegabh2', 'omegach2', 'H0'], tex=mcmc.tex)\nmcmc.plot_2d(axes, label='Posterior');\naxes.iloc[-1, 0].legend(bbox_to_anchor=(len(axes), len(axes)), loc='upper left');", "_____no_output_____" ] ], [ [ "* **unfilled contours**   &   **modifying individual axes**:", "_____no_output_____" ] ], [ [ "fig, axes = make_2d_axes(['omegabh2', 'omegach2', 'H0'], tex=mcmc.tex)\nmcmc.plot_2d(axes.iloc[0:1, :], types=dict(upper='kde', lower='kde', diagonal='kde'), fc=None);\nmcmc.plot_2d(axes.iloc[1:2, :], types=dict(upper='kde', lower='kde', diagonal='kde'), fc=None, cmap=plt.cm.Oranges, lw=3);\nmcmc.plot_2d(axes.iloc[2:3, :], types=dict(upper='kde', lower='kde', diagonal='kde'), fc='C2', ec='C3', c='C4', lw=2);", "_____no_output_____" ] ], [ [ "## Defining new parameters\n\n You can see that samples are stored as a pandas array", "_____no_output_____" ] ], [ [ "mcmc[:6]", "_____no_output_____" ] ], [ [ "Since it's a (weighted) pandas array, we compute things like the mean and variance \n of samples", "_____no_output_____" ] ], [ [ "mcmc.mean()", "_____no_output_____" ] ], [ [ "We can define new parameters with relative ease.\n For example, the default cosmoMC setup does not include omegab, only omegabh2:", "_____no_output_____" ] ], [ [ "'omegab' in mcmc", "_____no_output_____" ] ], [ [ "However, this is pretty trivial to recompute:", "_____no_output_____" ] ], [ [ "h = mcmc['H0']/100\nmcmc['omegab'] = mcmc['omegabh2']/h**2\nmcmc.tex['omegab'] = '$\\Omega_b$'\nmcmc.plot_1d('omegab');", "_____no_output_____" ] ], [ [ "## Nested sampling plotting\n Anethestic really comes to the fore for nested sampling. We can do all of\n the above, and more with the power that NS chains provide", "_____no_output_____" ] ], [ [ "from anesthetic import NestedSamples\nnested_root = 'plikHM_TTTEEE_lowl_lowE_lensing_NS/NS_plikHM_TTTEEE_lowl_lowE_lensing'\nnested = NestedSamples(root=nested_root)", "_____no_output_____" ] ], [ [ "We can infer the evidence, KL divergence and Bayesian model dimensionality:", "_____no_output_____" ] ], [ [ "ns_output = nested.ns_output()", "_____no_output_____" ] ], [ [ "This is a set of ``MCMCSamples``, with columns yielding the log of the Bayesian evidence \n (logZ), the Kullback-Leibler divergence (D) and the Bayesian model dimensionality (d).", "_____no_output_____" ] ], [ [ "ns_output[:6]", "_____no_output_____" ] ], [ [ "The evidence, KL divergence and Bayesian model dimensionality, with their corresponding errors, are:", "_____no_output_____" ] ], [ [ "for x in ns_output:\n print('%10s = %9.2f +/- %4.2f' % (x, ns_output[x].mean(), ns_output[x].std()))", "_____no_output_____" ] ], [ [ "Since ``ns_output`` is a set of ``MCMCSamples``, it may be plotted as usual. \n Here we illustrate slightly more fine-grained control of the axes construction \n (demanding three columns)", "_____no_output_____" ] ], [ [ "from anesthetic import make_1d_axes\nfig, axes = make_1d_axes(['logZ', 'D', 'd'], ncols=3, tex=ns_output.tex)\nns_output.plot_1d(axes);", "_____no_output_____" ] ], [ [ "We can also inspect the correlation between these inferences:", "_____no_output_____" ] ], [ [ "ns_output.plot_2d(['logZ','D']);", "_____no_output_____" ] ], [ [ "Here is a comparison of the base and NS output", "_____no_output_____" ] ], [ [ "h = nested['H0']/100\nnested['omegab'] = nested['omegabh2']/h**2\nnested.tex['omegab'] = '$\\Omega_b$'\n\nfig, axes = mcmc.plot_2d(['sigma8','omegab'])\nnested.plot_2d(axes=axes);", "_____no_output_____" ] ], [ [ "With nested samples, we can plot the prior (or any temperature), by\n passing beta=0. We also introduce here how to create figure legends.", "_____no_output_____" ] ], [ [ "prior = nested.set_beta(0)\nfig, axes = prior.plot_2d(['ns','tau'], label='prior')\nnested.plot_2d(axes=axes, label='posterior')\nhandles, labels = axes['ns']['tau'].get_legend_handles_labels()\nleg = fig.legend(handles, labels)\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "We can also set up an interactive plot, which allows us to replay a nested\n sampling run after the fact.", "_____no_output_____" ] ], [ [ "nested.gui()", "_____no_output_____" ] ], [ [ "There are also tools for converting to alternative formats, in case you have\n pipelines in other plotters:", "_____no_output_____" ] ], [ [ "from anesthetic.convert import to_getdist\ngetdist_samples = to_getdist(nested)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6be563f7b9036d9b5f0ba06c9f1da32c6d4594
17,089
ipynb
Jupyter Notebook
transformers/t1_next_word_pred_org.ipynb
warisgill/ml-dl-tutorials
bf8cd628ea8a97d02316a908cdf707211b89a7bc
[ "MIT" ]
7
2021-02-15T06:43:23.000Z
2022-01-13T10:43:32.000Z
PyTorch/Visual-Audio/Torchscript/transformer_tutorial.ipynb
MitchellTesla/Quantm
57045e0ea9ee7b965ecd26e4a8d0c1902df65245
[ "MIT" ]
1
2021-04-19T12:32:49.000Z
2021-04-19T12:32:49.000Z
PyTorch/Visual-Audio/Torchscript/transformer_tutorial.ipynb
MitchellTesla/Quantm
57045e0ea9ee7b965ecd26e4a8d0c1902df65245
[ "MIT" ]
1
2021-02-14T23:10:58.000Z
2021-02-14T23:10:58.000Z
76.977477
2,208
0.620633
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\nSequence-to-Sequence Modeling with nn.Transformer and TorchText\n===============================================================\n\nThis is a tutorial on how to train a sequence-to-sequence model\nthat uses the\n`nn.Transformer <https://pytorch.org/docs/master/nn.html?highlight=nn%20transformer#torch.nn.Transformer>`__ module.\n\nPyTorch 1.2 release includes a standard transformer module based on the\npaper `Attention is All You\nNeed <https://arxiv.org/pdf/1706.03762.pdf>`__. The transformer model\nhas been proved to be superior in quality for many sequence-to-sequence\nproblems while being more parallelizable. The ``nn.Transformer`` module\nrelies entirely on an attention mechanism (another module recently\nimplemented as `nn.MultiheadAttention <https://pytorch.org/docs/master/nn.html?highlight=multiheadattention#torch.nn.MultiheadAttention>`__) to draw global dependencies\nbetween input and output. The ``nn.Transformer`` module is now highly\nmodularized such that a single component (like `nn.TransformerEncoder <https://pytorch.org/docs/master/nn.html?highlight=nn%20transformerencoder#torch.nn.TransformerEncoder>`__\nin this tutorial) can be easily adapted/composed.\n\n![](../_static/img/transformer_architecture.jpg)\n\n\n\n", "_____no_output_____" ], [ "Define the model\n----------------\n\n\n", "_____no_output_____" ], [ "In this tutorial, we train ``nn.TransformerEncoder`` model on a\nlanguage modeling task. The language modeling task is to assign a\nprobability for the likelihood of a given word (or a sequence of words)\nto follow a sequence of words. A sequence of tokens are passed to the embedding\nlayer first, followed by a positional encoding layer to account for the order\nof the word (see the next paragraph for more details). The\n``nn.TransformerEncoder`` consists of multiple layers of\n`nn.TransformerEncoderLayer <https://pytorch.org/docs/master/nn.html?highlight=transformerencoderlayer#torch.nn.TransformerEncoderLayer>`__. Along with the input sequence, a square\nattention mask is required because the self-attention layers in\n``nn.TransformerEncoder`` are only allowed to attend the earlier positions in\nthe sequence. For the language modeling task, any tokens on the future\npositions should be masked. To have the actual words, the output\nof ``nn.TransformerEncoder`` model is sent to the final Linear\nlayer, which is followed by a log-Softmax function.\n\n\n", "_____no_output_____" ] ], [ [ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass TransformerModel(nn.Module):\n\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n super(TransformerModel, self).__init__()\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n self.model_type = 'Transformer'\n self.pos_encoder = PositionalEncoding(ninp, dropout)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.zero_()\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src, src_mask):\n src = self.encoder(src) * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n output = self.transformer_encoder(src, src_mask)\n output = self.decoder(output)\n return output", "_____no_output_____" ] ], [ [ "``PositionalEncoding`` module injects some information about the\nrelative or absolute position of the tokens in the sequence. The\npositional encodings have the same dimension as the embeddings so that\nthe two can be summed. Here, we use ``sine`` and ``cosine`` functions of\ndifferent frequencies.\n\n\n", "_____no_output_____" ] ], [ [ "class PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)", "_____no_output_____" ] ], [ [ "Load and batch data\n-------------------\n\n\n", "_____no_output_____" ], [ "This tutorial uses ``torchtext`` to generate Wikitext-2 dataset. The\nvocab object is built based on the train dataset and is used to numericalize\ntokens into tensors. Starting from sequential data, the ``batchify()``\nfunction arranges the dataset into columns, trimming off any tokens remaining\nafter the data has been divided into batches of size ``batch_size``.\nFor instance, with the alphabet as the sequence (total length of 26)\nand a batch size of 4, we would divide the alphabet into 4 sequences of\nlength 6:\n\n\\begin{align}\\begin{bmatrix}\n \\text{A} & \\text{B} & \\text{C} & \\ldots & \\text{X} & \\text{Y} & \\text{Z}\n \\end{bmatrix}\n \\Rightarrow\n \\begin{bmatrix}\n \\begin{bmatrix}\\text{A} \\\\ \\text{B} \\\\ \\text{C} \\\\ \\text{D} \\\\ \\text{E} \\\\ \\text{F}\\end{bmatrix} &\n \\begin{bmatrix}\\text{G} \\\\ \\text{H} \\\\ \\text{I} \\\\ \\text{J} \\\\ \\text{K} \\\\ \\text{L}\\end{bmatrix} &\n \\begin{bmatrix}\\text{M} \\\\ \\text{N} \\\\ \\text{O} \\\\ \\text{P} \\\\ \\text{Q} \\\\ \\text{R}\\end{bmatrix} &\n \\begin{bmatrix}\\text{S} \\\\ \\text{T} \\\\ \\text{U} \\\\ \\text{V} \\\\ \\text{W} \\\\ \\text{X}\\end{bmatrix}\n \\end{bmatrix}\\end{align}\n\nThese columns are treated as independent by the model, which means that\nthe dependence of ``G`` and ``F`` can not be learned, but allows more\nefficient batch processing.\n\n\n", "_____no_output_____" ] ], [ [ "import io\nimport torch\nfrom torchtext.utils import download_from_url, extract_archive\nfrom torchtext.data.utils import get_tokenizer\nfrom torchtext.vocab import build_vocab_from_iterator\n\nurl = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'\ntest_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url))\ntokenizer = get_tokenizer('basic_english')\nvocab = build_vocab_from_iterator(map(tokenizer,\n iter(io.open(train_filepath,\n encoding=\"utf8\"))))\n\ndef data_process(raw_text_iter):\n data = [torch.tensor([vocab[token] for token in tokenizer(item)],\n dtype=torch.long) for item in raw_text_iter]\n return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))\n\ntrain_data = data_process(iter(io.open(train_filepath, encoding=\"utf8\")))\nval_data = data_process(iter(io.open(valid_filepath, encoding=\"utf8\")))\ntest_data = data_process(iter(io.open(test_filepath, encoding=\"utf8\")))\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef batchify(data, bsz):\n # Divide the dataset into bsz parts.\n nbatch = data.size(0) // bsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * bsz)\n # Evenly divide the data across the bsz batches.\n data = data.view(bsz, -1).t().contiguous()\n return data.to(device)\n\nbatch_size = 20\neval_batch_size = 10\ntrain_data = batchify(train_data, batch_size)\nval_data = batchify(val_data, eval_batch_size)\ntest_data = batchify(test_data, eval_batch_size)", "_____no_output_____" ] ], [ [ "Functions to generate input and target sequence\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n", "_____no_output_____" ], [ "``get_batch()`` function generates the input and target sequence for\nthe transformer model. It subdivides the source data into chunks of\nlength ``bptt``. For the language modeling task, the model needs the\nfollowing words as ``Target``. For example, with a ``bptt`` value of 2,\nwe’d get the following two Variables for ``i`` = 0:\n\n![](../_static/img/transformer_input_target.png)\n\n\nIt should be noted that the chunks are along dimension 0, consistent\nwith the ``S`` dimension in the Transformer model. The batch dimension\n``N`` is along dimension 1.\n\n\n", "_____no_output_____" ] ], [ [ "bptt = 35\ndef get_batch(source, i):\n seq_len = min(bptt, len(source) - 1 - i)\n data = source[i:i+seq_len]\n target = source[i+1:i+1+seq_len].reshape(-1)\n return data, target", "_____no_output_____" ] ], [ [ "Initiate an instance\n--------------------\n\n\n", "_____no_output_____" ], [ "The model is set up with the hyperparameter below. The vocab size is\nequal to the length of the vocab object.\n\n\n", "_____no_output_____" ] ], [ [ "ntokens = len(vocab.stoi) # the size of vocabulary\nemsize = 200 # embedding dimension\nnhid = 200 # the dimension of the feedforward network model in nn.TransformerEncoder\nnlayers = 2 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder\nnhead = 2 # the number of heads in the multiheadattention models\ndropout = 0.2 # the dropout value\nmodel = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout).to(device)", "_____no_output_____" ] ], [ [ "Run the model\n-------------\n\n\n", "_____no_output_____" ], [ "`CrossEntropyLoss <https://pytorch.org/docs/master/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__\nis applied to track the loss and\n`SGD <https://pytorch.org/docs/master/optim.html?highlight=sgd#torch.optim.SGD>`__\nimplements stochastic gradient descent method as the optimizer. The initial\nlearning rate is set to 5.0. `StepLR <https://pytorch.org/docs/master/optim.html?highlight=steplr#torch.optim.lr_scheduler.StepLR>`__ is\napplied to adjust the learn rate through epochs. During the\ntraining, we use\n`nn.utils.clip_grad_norm\\_ <https://pytorch.org/docs/master/nn.html?highlight=nn%20utils%20clip_grad_norm#torch.nn.utils.clip_grad_norm_>`__\nfunction to scale all the gradient together to prevent exploding.\n\n\n", "_____no_output_____" ] ], [ [ "criterion = nn.CrossEntropyLoss()\nlr = 5.0 # learning rate\noptimizer = torch.optim.SGD(model.parameters(), lr=lr)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)\n\nimport time\ndef train():\n model.train() # Turn on the train mode\n total_loss = 0.\n start_time = time.time()\n src_mask = model.generate_square_subsequent_mask(bptt).to(device)\n for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):\n data, targets = get_batch(train_data, i)\n optimizer.zero_grad()\n if data.size(0) != bptt:\n src_mask = model.generate_square_subsequent_mask(data.size(0)).to(device)\n output = model(data, src_mask)\n loss = criterion(output.view(-1, ntokens), targets)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n optimizer.step()\n\n total_loss += loss.item()\n log_interval = 200\n if batch % log_interval == 0 and batch > 0:\n cur_loss = total_loss / log_interval\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d}/{:5d} batches | '\n 'lr {:02.2f} | ms/batch {:5.2f} | '\n 'loss {:5.2f} | ppl {:8.2f}'.format(\n epoch, batch, len(train_data) // bptt, scheduler.get_lr()[0],\n elapsed * 1000 / log_interval,\n cur_loss, math.exp(cur_loss)))\n total_loss = 0\n start_time = time.time()\n\ndef evaluate(eval_model, data_source):\n eval_model.eval() # Turn on the evaluation mode\n total_loss = 0.\n src_mask = model.generate_square_subsequent_mask(bptt).to(device)\n with torch.no_grad():\n for i in range(0, data_source.size(0) - 1, bptt):\n data, targets = get_batch(data_source, i)\n if data.size(0) != bptt:\n src_mask = model.generate_square_subsequent_mask(data.size(0)).to(device)\n output = eval_model(data, src_mask)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * criterion(output_flat, targets).item()\n return total_loss / (len(data_source) - 1)", "_____no_output_____" ] ], [ [ "Loop over epochs. Save the model if the validation loss is the best\nwe've seen so far. Adjust the learning rate after each epoch.\n\n", "_____no_output_____" ] ], [ [ "best_val_loss = float(\"inf\")\nepochs = 3 # The number of epochs\nbest_model = None\n\nfor epoch in range(1, epochs + 1):\n epoch_start_time = time.time()\n train()\n val_loss = evaluate(model, val_data)\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '\n 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),\n val_loss, math.exp(val_loss)))\n print('-' * 89)\n\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_model = model\n\n scheduler.step()", "_____no_output_____" ] ], [ [ "Evaluate the model with the test dataset\n-------------------------------------\n\nApply the best model to check the result with the test dataset.\n\n", "_____no_output_____" ] ], [ [ "test_loss = evaluate(best_model, test_data)\nprint('=' * 89)\nprint('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(\n test_loss, math.exp(test_loss)))\nprint('=' * 89)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6be7f04253b0adaa6431ce58b69cdfaab305fd
28,352
ipynb
Jupyter Notebook
Course 1 Introduction to Data Science in Python/Guiding Notebooks/Week+2.ipynb
sayakpaul/Applied-Data-Science-w-Python-Specialization
2776ef692c34b1fa4119f8b15605ac533950f462
[ "MIT" ]
5
2019-03-26T16:34:26.000Z
2019-11-21T08:40:13.000Z
Course 1 Introduction to Data Science in Python/Guiding Notebooks/Week+2.ipynb
sayakpaul/Applied-Data-Science-w-Python-Specialization
2776ef692c34b1fa4119f8b15605ac533950f462
[ "MIT" ]
null
null
null
Course 1 Introduction to Data Science in Python/Guiding Notebooks/Week+2.ipynb
sayakpaul/Applied-Data-Science-w-Python-Specialization
2776ef692c34b1fa4119f8b15605ac533950f462
[ "MIT" ]
null
null
null
20.208125
296
0.431645
[ [ [ "---\n\n_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._\n\n---", "_____no_output_____" ], [ "# The Series Data Structure", "_____no_output_____" ] ], [ [ "import pandas as pd\npd.Series?", "_____no_output_____" ], [ "purchase_1 = pd.Series({'Name': 'Chris',\n 'Item Purchased': 'Dog Food',\n 'Cost': 22.50})\npurchase_2 = pd.Series({'Name': 'Kevyn',\n 'Item Purchased': 'Kitty Litter',\n 'Cost': 2.50})\npurchase_3 = pd.Series({'Name': 'Vinod',\n 'Item Purchased': 'Bird Seed',\n 'Cost': 5.00})\n\ndf = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])\ndf.head()\n", "_____no_output_____" ], [ "df[['Item Purchased']]", "_____no_output_____" ], [ "purchase_1 = pd.Series({'Name': 'Chris',\n 'Item Purchased': 'Dog Food',\n 'Cost': 22.50})\npurchase_2 = pd.Series({'Name': 'Kevyn',\n 'Item Purchased': 'Kitty Litter',\n 'Cost': 2.50})\npurchase_3 = pd.Series({'Name': 'Vinod',\n 'Item Purchased': 'Bird Seed',\n 'Cost': 5.00})\n\ndf = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])\n\ndf.head()", "_____no_output_____" ], [ "import pandas as pd\n\npurchase_1 = pd.Series({'Name': 'Chris',\n 'Item Purchased': 'Dog Food',\n 'Cost': 22.50})\npurchase_2 = pd.Series({'Name': 'Kevyn',\n 'Item Purchased': 'Kitty Litter',\n 'Cost': 2.50})\npurchase_3 = pd.Series({'Name': 'Vinod',\n 'Item Purchased': 'Bird Seed',\n 'Cost': 5.00})\n\ndf = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])\ndf.head()", "_____no_output_____" ], [ "df.set_index(name=['S'])", "_____no_output_____" ], [ "df[df['Cost']>3.0]", "_____no_output_____" ], [ "animals = ['Tiger', 'Bear', 'Moose']\npd.Series(animals)", "_____no_output_____" ], [ "numbers = [1, 2, 3]\npd.Series(numbers)", "_____no_output_____" ], [ "animals = ['Tiger', 'Bear', None]\npd.Series(animals)", "_____no_output_____" ], [ "numbers = [1, 2, None]\npd.Series(numbers)", "_____no_output_____" ], [ "import numpy as np\nnp.nan == None", "_____no_output_____" ], [ "np.nan == np.nan", "_____no_output_____" ], [ "np.isnan(np.nan)", "_____no_output_____" ], [ "sports = {'Archery': 'Bhutan',\n 'Golf': 'Scotland',\n 'Sumo': 'Japan',\n 'Taekwondo': 'South Korea'}\ns = pd.Series(sports)\ns", "_____no_output_____" ], [ "s.index", "_____no_output_____" ], [ "s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])\ns", "_____no_output_____" ], [ "sports = {'Archery': 'Bhutan',\n 'Golf': 'Scotland',\n 'Sumo': 'Japan',\n 'Taekwondo': 'South Korea'}\ns = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey'])\ns", "_____no_output_____" ] ], [ [ "# Querying a Series", "_____no_output_____" ] ], [ [ "sports = {'Archery': 'Bhutan',\n 'Golf': 'Scotland',\n 'Sumo': 'Japan',\n 'Taekwondo': 'South Korea'}\ns = pd.Series(sports)\ns", "_____no_output_____" ], [ "s.iloc[3]", "_____no_output_____" ], [ "s.loc['Golf']", "_____no_output_____" ], [ "s[3]", "_____no_output_____" ], [ "s['Golf']", "_____no_output_____" ], [ "sports = {99: 'Bhutan',\n 100: 'Scotland',\n 101: 'Japan',\n 102: 'South Korea'}\ns = pd.Series(sports)", "_____no_output_____" ], [ "s[0] #This won't call s.iloc[0] as one might expect, it generates an error instead", "_____no_output_____" ], [ "s = pd.Series([100.00, 120.00, 101.00, 3.00])\ns", "_____no_output_____" ], [ "total = 0\nfor item in s:\n total+=item\nprint(total)", "_____no_output_____" ], [ "import numpy as np\n\ntotal = np.sum(s)\nprint(total)", "_____no_output_____" ], [ "#this creates a big series of random numbers\ns = pd.Series(np.random.randint(0,1000,10000))\ns.head()", "_____no_output_____" ], [ "len(s)", "_____no_output_____" ], [ "%%timeit -n 100\nsummary = 0\nfor item in s:\n summary+=item", "_____no_output_____" ], [ "%%timeit -n 100\nsummary = np.sum(s)", "_____no_output_____" ], [ "s+=2 #adds two to each item in s using broadcasting\ns.head()", "_____no_output_____" ], [ "for label, value in s.iteritems():\n s.set_value(label, value+2)\ns.head()", "_____no_output_____" ], [ "%%timeit -n 10\ns = pd.Series(np.random.randint(0,1000,10000))\nfor label, value in s.iteritems():\n s.loc[label]= value+2", "_____no_output_____" ], [ "%%timeit -n 10\ns = pd.Series(np.random.randint(0,1000,10000))\ns+=2\n", "_____no_output_____" ], [ "s = pd.Series([1, 2, 3])\ns.loc['Animal'] = 'Bears'\ns", "_____no_output_____" ], [ "original_sports = pd.Series({'Archery': 'Bhutan',\n 'Golf': 'Scotland',\n 'Sumo': 'Japan',\n 'Taekwondo': 'South Korea'})\ncricket_loving_countries = pd.Series(['Australia',\n 'Barbados',\n 'Pakistan',\n 'England'], \n index=['Cricket',\n 'Cricket',\n 'Cricket',\n 'Cricket'])\nall_countries = original_sports.append(cricket_loving_countries)", "_____no_output_____" ], [ "original_sports", "_____no_output_____" ], [ "cricket_loving_countries", "_____no_output_____" ], [ "all_countries", "_____no_output_____" ], [ "all_countries.loc['Cricket']", "_____no_output_____" ] ], [ [ "# The DataFrame Data Structure", "_____no_output_____" ] ], [ [ "import pandas as pd\npurchase_1 = pd.Series({'Name': 'Chris',\n 'Item Purchased': 'Dog Food',\n 'Cost': 22.50})\npurchase_2 = pd.Series({'Name': 'Kevyn',\n 'Item Purchased': 'Kitty Litter',\n 'Cost': 2.50})\npurchase_3 = pd.Series({'Name': 'Vinod',\n 'Item Purchased': 'Bird Seed',\n 'Cost': 5.00})\ndf = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])\ndf.head()", "_____no_output_____" ], [ "df.loc['Store 2']", "_____no_output_____" ], [ "type(df.loc['Store 2'])", "_____no_output_____" ], [ "df.loc['Store 1']", "_____no_output_____" ], [ "df.loc['Store 1', 'Cost']", "_____no_output_____" ], [ "df.T", "_____no_output_____" ], [ "df.T.loc['Cost']", "_____no_output_____" ], [ "df['Cost']", "_____no_output_____" ], [ "df.loc['Store 1']['Cost']", "_____no_output_____" ], [ "df.loc[:,['Name', 'Cost']]", "_____no_output_____" ], [ "df.drop('Store 1')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "copy_df = df.copy()\ncopy_df = copy_df.drop('Store 1')\ncopy_df", "_____no_output_____" ], [ "copy_df.drop?", "_____no_output_____" ], [ "del copy_df['Name']\ncopy_df", "_____no_output_____" ], [ "df['Location'] = None\ndf", "_____no_output_____" ] ], [ [ "# Dataframe Indexing and Loading", "_____no_output_____" ] ], [ [ "costs = df['Cost']\ncosts", "_____no_output_____" ], [ "costs+=2\ncosts", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "!cat olympics.csv", "_____no_output_____" ], [ "df = pd.read_csv('olympics.csv')\ndf.head()", "_____no_output_____" ], [ "df = pd.read_csv('olympics.csv', index_col = 0, skiprows=1)\ndf.head()", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "for col in df.columns:\n if col[:2]=='01':\n df.rename(columns={col:'Gold' + col[4:]}, inplace=True)\n if col[:2]=='02':\n df.rename(columns={col:'Silver' + col[4:]}, inplace=True)\n if col[:2]=='03':\n df.rename(columns={col:'Bronze' + col[4:]}, inplace=True)\n if col[:1]=='№':\n df.rename(columns={col:'#' + col[1:]}, inplace=True) \n\ndf.head()", "_____no_output_____" ] ], [ [ "# Querying a DataFrame", "_____no_output_____" ] ], [ [ "df['Gold'] > 0", "_____no_output_____" ], [ "only_gold = df.where(df['Gold'] > 0)\nonly_gold.head()", "_____no_output_____" ], [ "only_gold['Gold'].count()", "_____no_output_____" ], [ "df['Gold'].count()", "_____no_output_____" ], [ "only_gold = only_gold.dropna()\nonly_gold.head()", "_____no_output_____" ], [ "only_gold = df[df['Gold'] > 0]\nonly_gold.head()", "_____no_output_____" ], [ "len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)])", "_____no_output_____" ], [ "df[(df['Gold.1'] > 0) & (df['Gold'] == 0)]", "_____no_output_____" ] ], [ [ "# Indexing Dataframes", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "df['country'] = df.index\ndf = df.set_index('Gold')\ndf.head()", "_____no_output_____" ], [ "df = df.reset_index()\ndf.head()", "_____no_output_____" ], [ "df = pd.read_csv('census.csv')\ndf.head()", "_____no_output_____" ], [ "df['SUMLEV'].unique()", "_____no_output_____" ], [ "df=df[df['SUMLEV'] == 50]\ndf.head()", "_____no_output_____" ], [ "columns_to_keep = ['STNAME',\n 'CTYNAME',\n 'BIRTHS2010',\n 'BIRTHS2011',\n 'BIRTHS2012',\n 'BIRTHS2013',\n 'BIRTHS2014',\n 'BIRTHS2015',\n 'POPESTIMATE2010',\n 'POPESTIMATE2011',\n 'POPESTIMATE2012',\n 'POPESTIMATE2013',\n 'POPESTIMATE2014',\n 'POPESTIMATE2015']\ndf = df[columns_to_keep]\ndf.head()", "_____no_output_____" ], [ "df = df.set_index(['STNAME', 'CTYNAME'])\ndf.head()", "_____no_output_____" ], [ "df.loc['Michigan', 'Washtenaw County']", "_____no_output_____" ], [ "df.loc[ [('Michigan', 'Washtenaw County'),\n ('Michigan', 'Wayne County')] ]", "_____no_output_____" ] ], [ [ "# Missing values", "_____no_output_____" ] ], [ [ "df = pd.read_csv('log.csv')\ndf", "_____no_output_____" ], [ "df.fillna?", "_____no_output_____" ], [ "df = df.set_index('time')\ndf = df.sort_index()\ndf", "_____no_output_____" ], [ "df = df.reset_index()\ndf = df.set_index(['time', 'user'])\ndf", "_____no_output_____" ], [ "df = df.fillna(method='ffill')\ndf.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a6bfa37d4b4bcf5658a26d7c79efaf89720c32e
9,358
ipynb
Jupyter Notebook
week_3/Cluster_ftest_spatiotemporal.ipynb
thearol/ACN_2021
07c888fd4601b7ff34f259866d69d09e6383a88d
[ "BSD-3-Clause" ]
1
2021-09-08T06:00:27.000Z
2021-09-08T06:00:27.000Z
week_3/Cluster_ftest_spatiotemporal.ipynb
thearol/ACN_2021
07c888fd4601b7ff34f259866d69d09e6383a88d
[ "BSD-3-Clause" ]
null
null
null
week_3/Cluster_ftest_spatiotemporal.ipynb
thearol/ACN_2021
07c888fd4601b7ff34f259866d69d09e6383a88d
[ "BSD-3-Clause" ]
13
2021-09-08T06:04:15.000Z
2021-09-21T11:54:17.000Z
32.606272
138
0.573627
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "This notebook is based on:\n\nhttps://mne.tools/stable/auto_tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.html", "_____no_output_____" ], [ "# Spatiotemporal permutation F-test on full sensor data\n\nTests for differential evoked responses in at least\none condition using a permutation clustering test.\nThe FieldTrip neighbor templates will be used to determine\nthe adjacency between sensors. This serves as a spatial prior\nto the clustering. Spatiotemporal clusters will then\nbe visualized using custom matplotlib code.", "_____no_output_____" ] ], [ [ "from mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom mne.stats import spatio_temporal_cluster_test\nfrom mne.channels import find_ch_adjacency\nfrom mne.viz import plot_compare_evokeds", "_____no_output_____" ] ], [ [ "## Read epochs\n\n", "_____no_output_____" ], [ "Your pipeline from the previous notebook(s) should go here. Basically from reading the raw to the epoching needs to be done here.\n\nOnce it is all epoched you can continue. _Remember to equalise your conditions!_\n\nThe MNE-python stats functions work on a numpy array with the shape: \n\n- n_observations $\\times$ n_times $\\times$ n_channels/n_vertices\n\nSo we need to extract the data and then transform it to the right shape. _Remember_ MNE-python epochs are in the shape:\n\n- n_observations $\\times$ n_channels/n_verticies $\\times$ n_times\n\nn_channels/n_verticies is because the functions works both on sensor space and source space data.\n\n\nYou should also select just two conditions, e.g. left vs right auditory or auditory vs visual.", "_____no_output_____" ] ], [ [ "X = [epochs[k].get_data() for k in event_dict] # as 3D matrix\nX = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering", "_____no_output_____" ] ], [ [ "## Find the FieldTrip neighbor definition to setup sensor adjacency\n\n", "_____no_output_____" ] ], [ [ "adjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='eeg')\n\nprint(type(adjacency)) # it's a sparse matrix!\n\nplt.imshow(adjacency.toarray(), cmap='gray', origin='lower',\n interpolation='nearest')\nplt.xlabel('{} EEG'.format(len(ch_names)))\nplt.ylabel('{} EEG'.format(len(ch_names)))\nplt.title('Between-sensor adjacency')", "_____no_output_____" ] ], [ [ "## Compute permutation statistic\n\nHow does it work? We use clustering to \"bind\" together features which are\nsimilar. Our features are the magnetic fields measured over our sensor\narray at different times. This reduces the multiple comparison problem.\nTo compute the actual test-statistic, we first sum all F-values in all\nclusters. We end up with one statistic for each cluster.\nThen we generate a distribution from the data by shuffling our conditions\nbetween our samples and recomputing our clusters and the test statistics.\nWe test for the significance of a given cluster by computing the probability\nof observing a cluster of that size. For more background read:\nMaris/Oostenveld (2007), \"Nonparametric statistical testing of EEG- and\nMEG-data\" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.\ndoi:10.1016/j.jneumeth.2007.03.024\n\n", "_____no_output_____" ], [ "## TASK\n\nLook up what the different parameters in the function does!", "_____no_output_____" ] ], [ [ "# set family-wise p-value\np_accept = 0.05\n\ncluster_stats = spatio_temporal_cluster_test(X, n_permutations=2000,\n threshold=None, tail=0,\n n_jobs=1, buffer_size=None,\n adjacency=adjacency)\n\nT_obs, clusters, p_values, _ = cluster_stats\ngood_cluster_inds = np.where(p_values < p_accept)[0]", "_____no_output_____" ] ], [ [ "Note. The same functions work with source estimate. The only differences\nare the origin of the data, the size, and the adjacency definition.\nIt can be used for single trials or for groups of subjects.\n\n## Visualize clusters\n\n", "_____no_output_____" ], [ "**Adjust the visualization to the conditions you have selected!**", "_____no_output_____" ] ], [ [ "# configure variables for visualization\ncolors = {\"Aud\": \"crimson\", \"Vis\": 'steelblue'}\nlinestyles = {\"L\": '-', \"R\": '--'}\n\n# organize data for plotting\nevokeds = {cond: epochs[cond].average() for cond in event_id}\n\n# loop over clusters\nfor i_clu, clu_idx in enumerate(good_cluster_inds):\n # unpack cluster information, get unique indices\n time_inds, space_inds = np.squeeze(clusters[clu_idx])\n ch_inds = np.unique(space_inds)\n time_inds = np.unique(time_inds)\n\n # get topography for F stat\n f_map = T_obs[time_inds, ...].mean(axis=0)\n\n # get signals at the sensors contributing to the cluster\n sig_times = epochs.times[time_inds]\n\n # create spatial mask\n mask = np.zeros((f_map.shape[0], 1), dtype=bool)\n mask[ch_inds, :] = True\n\n # initialize figure\n fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))\n\n # plot average test statistic and mark significant sensors\n f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0)\n f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds',\n vmin=np.min, vmax=np.max, show=False,\n colorbar=False, mask_params=dict(markersize=10))\n image = ax_topo.images[0]\n\n # create additional axes (for ERF and colorbar)\n divider = make_axes_locatable(ax_topo)\n\n # add axes for colorbar\n ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)\n plt.colorbar(image, cax=ax_colorbar)\n ax_topo.set_xlabel(\n 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))\n\n # add new axis for time courses and plot time courses\n ax_signals = divider.append_axes('right', size='300%', pad=1.2)\n title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))\n if len(ch_inds) > 1:\n title += \"s (mean)\"\n plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,\n colors=colors, linestyles=linestyles, show=False,\n split_legend=True, truncate_yaxis='auto')\n\n # plot temporal cluster extent\n ymin, ymax = ax_signals.get_ylim()\n ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],\n color='orange', alpha=0.3)\n\n # clean up viz\n mne.viz.tight_layout(fig=fig)\n fig.subplots_adjust(bottom=.05)\n plt.show()", "_____no_output_____" ] ], [ [ "## Exercises\n\n- What is the smallest p-value you can obtain, given the finite number of\n permutations?\n\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a6c10157aa9b3b9852c99bd5bffa4047f519acb
6,042
ipynb
Jupyter Notebook
workshop/Kappa - Collaborative Filtering.ipynb
dgarciaesc/docker-spark-recommendation
6e137c95b2f11343407f3f456e2c396d4f1529c2
[ "MIT" ]
null
null
null
workshop/Kappa - Collaborative Filtering.ipynb
dgarciaesc/docker-spark-recommendation
6e137c95b2f11343407f3f456e2c396d4f1529c2
[ "MIT" ]
null
null
null
workshop/Kappa - Collaborative Filtering.ipynb
dgarciaesc/docker-spark-recommendation
6e137c95b2f11343407f3f456e2c396d4f1529c2
[ "MIT" ]
null
null
null
27.715596
193
0.556769
[ [ [ "import os\nos.environ['PYSPARK_SUBMIT_ARGS'] = \\\n '--conf spark.cassandra.connection.host=cassandra --packages org.apache.spark:spark-streaming-kafka-0-8_2.11:2.0.2,com.datastax.spark:spark-cassandra-connector_2.11:2.0.2 pyspark-shell'", "_____no_output_____" ], [ "from pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import *\nfrom pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating", "_____no_output_____" ], [ "sc = SparkContext(appName=\"BigDataRiver\")\nsc.setLogLevel(\"WARN\")\nsc.setCheckpointDir('checkpoint/')\nssc = StreamingContext(sc, 60)\nsql = SQLContext(sc)", "_____no_output_____" ], [ "kafkaStream = KafkaUtils.createDirectStream(ssc, ['bdr'], {\"metadata.broker.list\": 'kafka:9092'})", "_____no_output_____" ], [ "parsed = kafkaStream.map(lambda v: v[1])", "_____no_output_____" ], [ "#split is_purchase column into two\nseparateClicksSchema = StructType([ \n StructField(\"purchased_count\", LongType(), False),\n StructField(\"clicked_count\", LongType(), False)\n])\n\ndef separateClicks(is_purchase):\n return (is_purchase, 1-is_purchase)\n\nseparateClicks_udf = F.udf(separateClicks, separateClicksSchema)", "_____no_output_____" ], [ "def buildCFModel(train):\n def isProductToRating(productCount, clickCount):\n return (productCount * 3.0) + clickCount\n \n ratings = train.rdd.\\\n map(lambda r: Rating(r.user_id, r.product, isProductToRating(r.purchased_count, r.clicked_count)))\n rank = 10\n numIterations = 20\n lambdaFactor = 0.01\n alpha = 0.01\n seed = 42\n return ALS.trainImplicit(ratings, rank, numIterations, alpha, seed=seed)", "_____no_output_____" ], [ "def recommendTopProducts(dfModel):\n numberOfRecommendationsRequired = 5\n rdd = dfModel.recommendProductsForUsers(numberOfRecommendationsRequired)\n recommendations = rdd.map(lambda (user,ratings): (user, map(lambda r: r.product, ratings)))\n topRecommendationsSchema = StructType([\n StructField(\"user_id\", IntegerType(), False),\n StructField(\"recommended_products\", ArrayType(IntegerType()), False)\n ])\n return sql.createDataFrame(recommendations, topRecommendationsSchema)", "_____no_output_____" ], [ "def processStream(rdd):\n df = sql.read.json(rdd)\n if(len(df.columns)):\n #store updated counters in C*\n df.withColumn('c', separateClicks_udf(df['is_purchase'])).\\\n select(\"user_id\",\"product\",\"c.purchased_count\",\"c.clicked_count\").\\\n write.format(\"org.apache.spark.sql.cassandra\").mode('append').\\\n options(table=\"users_interests\", keyspace=\"bdr\").save()\n \n #read all data from C*\n usersInterests = sql.read.format(\"org.apache.spark.sql.cassandra\").\\\n options(table=\"users_interests\", keyspace=\"bdr\").load().cache()\n\n dfModel = buildCFModel(usersInterests.select(\"user_id\",\"product\",\"clicked_count\",\"purchased_count\"))\n top5 = recommendTopProducts(dfModel)\n top5.show()\n top5.write.format(\"org.apache.spark.sql.cassandra\").mode('append').options(table=\"cf\", keyspace=\"bdr\").save()\n \n print \"Saved\"\n else:\n print \"Empty\"", "_____no_output_____" ], [ "parsed.foreachRDD(lambda rdd: processStream(rdd))", "_____no_output_____" ], [ "ssc.start()\nssc.awaitTermination()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6c1a48c734f08ed539436c7d7719ac4fde6fe2
10,163
ipynb
Jupyter Notebook
SigoptParameterTuner.ipynb
jbliss1234/ML
454ddef8e046f967fe0ece322b4b878c45f5f277
[ "Apache-2.0" ]
1
2017-01-17T16:55:56.000Z
2017-01-17T16:55:56.000Z
SigoptParameterTuner.ipynb
jbliss1234/ML
454ddef8e046f967fe0ece322b4b878c45f5f277
[ "Apache-2.0" ]
null
null
null
SigoptParameterTuner.ipynb
jbliss1234/ML
454ddef8e046f967fe0ece322b4b878c45f5f277
[ "Apache-2.0" ]
null
null
null
38.496212
1,089
0.591066
[ [ [ "import os\nimport xgboost as xgb\nimport pandas as pd\nimport numpy as np\nfrom utils import encode_numeric_zscore_list, encode_numeric_zscore_all, to_xy, encode_text_index_list, encode_numeric_log_all\nfrom xgboost.sklearn import XGBClassifier, XGBRegressor\nfrom sklearn import datasets\nfrom sigopt_sklearn.search import SigOptSearchCV", "/home/arvc/anaconda3/envs/tensorflow/lib/python3.5/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n/home/arvc/anaconda3/envs/tensorflow/lib/python3.5/site-packages/sklearn/grid_search.py:43: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.\n DeprecationWarning)\n" ], [ "path = \"./data/allstate\"\ninputFilePath = os.path.join(path, \"train.csv.zip\")\ndf = pd.read_csv(inputFilePath, compression=\"zip\", header=0, na_values=['NULL'])\ndf = df.reindex(np.random.permutation(df.index))\ndf.reset_index(inplace=True, drop=True)\ndf.drop('id', axis=1, inplace=True)\n#df = df.sample(frac=0.01)\n\n#encode categoricals as dummies\nencode_text_index_list(df, ['cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9', 'cat10', 'cat11', 'cat12', 'cat13', 'cat14', 'cat15', 'cat16', 'cat17', 'cat18', 'cat19', 'cat20', 'cat21', 'cat22', 'cat23', 'cat24', 'cat25', 'cat26', 'cat27', 'cat28', 'cat29', 'cat30', 'cat31', 'cat32', 'cat33', 'cat34', 'cat35', 'cat36', 'cat37', 'cat38', 'cat39', 'cat40', 'cat41', 'cat42', 'cat43', 'cat44', 'cat45', 'cat46', 'cat47', 'cat48', 'cat49', 'cat50', 'cat51', 'cat52', 'cat53', 'cat54', 'cat55', 'cat56', 'cat57', 'cat58', 'cat59', 'cat60', 'cat61', 'cat62', 'cat63', 'cat64', 'cat65', 'cat66', 'cat67', 'cat68', 'cat69', 'cat70', 'cat71', 'cat72', 'cat73', 'cat74', 'cat75', 'cat76', 'cat77', 'cat78', 'cat79', 'cat80', 'cat81', 'cat82', 'cat83', 'cat84', 'cat85', 'cat86', 'cat87', 'cat88', 'cat89', 'cat90', 'cat91', 'cat92', 'cat93', 'cat94', 'cat95', 'cat96', 'cat97', 'cat98', 'cat99', 'cat100', 'cat101', 'cat102', 'cat103', 'cat104', 'cat105', 'cat106', 'cat107', 'cat108', 'cat109', 'cat110', 'cat111', 'cat112', 'cat113', 'cat114', 'cat115', 'cat116'])\n\n#encode all numeric values to zscored values\nencode_numeric_zscore_list(df, ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13', 'cont14'])\n\n#discard rows where z-score > 2\ndf.fillna(0)\n# Create x(predictors) and y (expected outcome)\nX,Y = to_xy(df, \"loss\")", "float64\n" ], [ "# find your SigOpt client token here : https://sigopt.com/user/profile\nclient_token = \"UAJKINHBEGLJVIYYMGWANLUPRORPFRLTJMESGZKNPTHKOSIW\"\n\nxgb_params = {\n 'learning_rate' : [0.01, 0.5],\n 'n_estimators' : [10, 70],\n 'max_depth':[3, 50],\n 'min_child_weight':[1, 15],\n 'gamma':[0, 1.0],\n 'subsample':[0.1, 1.0],\n 'colsample_bytree':[0.1, 1.0],\n 'max_delta_step': [1,15],\n 'colsample_bylevel': [0.1, 1.0],\n #'lamda': [1,5],\n #'alpha': [1,5],\n 'scale_pos_weight': [0,5],\n #'objective': 'reg:linear',\n #'booster': ['gblinear', 'gbtree'] ,\n #'eval_metric': 'mae',\n #'tree_method': ['exact', 'approx']\n}", "_____no_output_____" ], [ "xgb = XGBRegressor()\n\nclf = SigOptSearchCV(xgb, xgb_params, cv=5,\n client_token=client_token, n_jobs=25, n_iter=700, verbose=1)\n\nclf.fit(X, Y)", "Creating SigOpt experiment: XGBRegressor (sklearn)\nExperiment progress available at : https://sigopt.com/experiment/10601\nEvaluating params : [{'scale_pos_weight': 0, 'learning_rate': 0.45767903718668396, 'gamma': 0.6490173787833018, 'max_depth': 31, 'n_estimators': 18, 'colsample_bylevel': 0.6177293232669435, 'colsample_bytree': 0.8858130111489914, 'subsample': 0.2707613887264699, 'min_child_weight': 4, 'max_delta_step': 3}]\n" ], [ "a = XGBRegressor()\na.get_params().keys()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4a6c266211174a817d021064f9ccb55efa447a75
90,207
ipynb
Jupyter Notebook
Kensho_Assessment_Model.ipynb
rtindru/CompStats
5c00a60fe231923e29b6b9dca7e0ee7cb1b61907
[ "MIT" ]
null
null
null
Kensho_Assessment_Model.ipynb
rtindru/CompStats
5c00a60fe231923e29b6b9dca7e0ee7cb1b61907
[ "MIT" ]
null
null
null
Kensho_Assessment_Model.ipynb
rtindru/CompStats
5c00a60fe231923e29b6b9dca7e0ee7cb1b61907
[ "MIT" ]
null
null
null
89.758209
25,718
0.725731
[ [ [ "<a href=\"https://colab.research.google.com/github/rtindru/CompStats/blob/master/Kensho_Assessment_Model.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (20, 4)\nimport seaborn as sns\nimport keras", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')\nPATH = \"/content/drive/My Drive/Kensho Asssessment/data/\"", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "tdf = pd.read_csv(PATH+'clean_train.csv')", "_____no_output_____" ], [ "tdf.head()", "_____no_output_____" ], [ "from keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n\nMAX_NB_WORDS = 1000\nMAX_SEQUENCE_LENGTH = 50\nBATCH_SIZE = 200\nEPOCHS = 3\n\n# Data has temporality, sort so that we train on past data and predict on recent data \ntdf = tdf.sort_values('date')\n\n# Tokenize the word meanings\ntokenizer = Tokenizer(num_words=MAX_NB_WORDS)\ntokenizer.fit_on_texts(tdf.clean_tite)\nsequences = tokenizer.texts_to_sequences(tdf.clean_tite)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\nX = pd.DataFrame(pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH))\n\n# Label encode the word labels\nle = LabelEncoder()\ny = pd.DataFrame(le.fit_transform(tdf.subject))\nNUM_CLASSES = len(le.classes_)\n\n# Split data into train, test, and valid\ntrain_size = int(tdf.shape[0] * 0.75)\nX_train, X_test, y_train, y_test = X[:train_size], X[train_size:], y[:train_size], y[train_size:]\n\nprint('Classes: ', NUM_CLASSES)\nprint('Shape of data tensor:', X_train.shape)\nprint('Shape of label tensor:', y_train.shape)\nprint('Shape of data tensor:', X_test.shape)\nprint('Shape of label tensor:', y_test.shape)", "Found 31686 unique tokens.\nClasses: 30\nShape of data tensor: (132345, 50)\nShape of label tensor: (132345, 1)\nShape of data tensor: (44115, 50)\nShape of label tensor: (44115, 1)\n" ], [ "# # Preload the embedding matrix\n# EMBEDDING_DIM = 300 # same as the lenght of the keyed vector\n# import gensim\n\n# def get_coefficients(word, model):\n# \"\"\"\n# Helper method to return coeffs for a model; or zeros!\n# \"\"\"\n# try:\n# return model.get_vector(word)\n# except KeyError:\n# return np.zeros(model.wv.vector_size)\n\n# gn_model = gensim.models.KeyedVectors.load_word2vec_format('/content/drive/My Drive/petrichor_new/GoogleNews-vectors-negative300.bin.gz', binary=True)\n\n# embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\n# for word, i in word_index.items():\n# embedding_matrix[i] = get_coefficients(word, gn_model)", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:12: DeprecationWarning: Call to deprecated `wv` (Attribute will be removed in 4.0.0, use self instead).\n if sys.path[0] == '':\n" ], [ "EMBEDDING_DIM = 30 # same as the lenght of the keyed vector", "_____no_output_____" ], [ "from keras.models import Sequential\nfrom keras.layers import Dense, Embedding, Dropout, SpatialDropout1D, Bidirectional\nfrom keras.layers import LSTM\n\nNUM_SAMPLES = len(X_train)\nLSTM_DIM = 64 # Good to go with 32/64; can be stacked\nMAX_LEN = MAX_SEQUENCE_LENGTH\nNUM_CLASSES = len(le.classes_)\n\nmodel = Sequential()\nmodel.add(Embedding(input_dim=len(word_index)+1, output_dim=EMBEDDING_DIM, input_length=MAX_LEN))\nmodel.add(LSTM(LSTM_DIM, dropout=0.2, recurrent_dropout=0.2, input_length=MAX_SEQUENCE_LENGTH)) # input_shape=(1, 3000, )\nmodel.add(Dense(NUM_CLASSES, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', 'top_k_categorical_accuracy'])\nprint(model.summary())", "Model: \"sequential_14\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_9 (Embedding) (None, 50, 30) 950610 \n_________________________________________________________________\nlstm_14 (LSTM) (None, 64) 24320 \n_________________________________________________________________\ndense_11 (Dense) (None, 30) 1950 \n=================================================================\nTotal params: 976,880\nTrainable params: 976,880\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ], [ "_y_train = keras.utils.to_categorical(y_train, num_classes=NUM_CLASSES)\n_y_test = keras.utils.to_categorical(y_test, num_classes=NUM_CLASSES)\nhistory = model.fit(X_train, _y_train, validation_data=(X_test, _y_test), verbose=True, epochs=EPOCHS)", "Epoch 1/3\n4136/4136 [==============================] - 355s 84ms/step - loss: 2.0269 - accuracy: 0.3803 - top_k_categorical_accuracy: 0.7820 - val_loss: 1.7193 - val_accuracy: 0.5068 - val_top_k_categorical_accuracy: 0.8264\nEpoch 2/3\n4136/4136 [==============================] - 349s 84ms/step - loss: 1.5521 - accuracy: 0.5221 - top_k_categorical_accuracy: 0.8858 - val_loss: 1.6519 - val_accuracy: 0.5159 - val_top_k_categorical_accuracy: 0.8441\nEpoch 3/3\n4136/4136 [==============================] - 348s 84ms/step - loss: 1.5106 - accuracy: 0.5275 - top_k_categorical_accuracy: 0.8942 - val_loss: 1.6533 - val_accuracy: 0.5161 - val_top_k_categorical_accuracy: 0.8479\n" ], [ "# list all data in history\nprint(history.history.keys())\n# summarize history for accuracy\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "dict_keys(['loss', 'accuracy', 'top_k_categorical_accuracy', 'val_loss', 'val_accuracy', 'val_top_k_categorical_accuracy'])\n" ], [ "# Predict on test data\n", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "history = model.fit(X_train, _y_train, validation_data=(X_test, _y_test), verbose=True, epochs=EPOCHS)", "_____no_output_____" ], [ "vdf = pd.read_csv(PATH+'clean_val.csv')\nsequences = tokenizer.texts_to_sequences(vdf.clean_tite)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\nX_val = pd.DataFrame(pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH))", "Found 31686 unique tokens.\n" ], [ "y_pred = model.predict(X_val, )", "_____no_output_____" ], [ "vdf['subject'] = le.inverse_transform(np.argmax(y_pred, axis=1))\nvdf['month'] = vdf['date'].astype('datetime64').apply(lambda x: x.replace(day=1))", "_____no_output_____" ], [ "vdf.dtypes", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "vdf.head()", "_____no_output_____" ], [ "sub = vdf[['month', 'subject']].value_counts().reset_index(name=\"article_count\")", "_____no_output_____" ], [ "sub = pd.read_csv(PATH+'submission.csv')\nsub.head()", "_____no_output_____" ], [ "sub.sort_values('month').to_csv(PATH+'submsson0.csv')", "_____no_output_____" ], [ "y_train_pred = model.predict(pd.concat([X_train, X_test]))\ntdf['pred_sub'] = le.inverse_transform(np.argmax(y_train_pred, axis=1))", "_____no_output_____" ], [ "def mse(df1, df2):\n return np.sum(np.square(df1['count'] - df2['count']))/df1.shape[0]\n\ndf_true = tdf[['month', 'subject']].value_counts().reset_index(name='count').set_index(['month', 'subject']).sort_values(['month', 'subject'])\ndf_pred = tdf[['month', 'pred_sub']].value_counts().reset_index(name='count').set_index(['month', 'pred_sub']).sort_values(['month', 'pred_sub'])\n\nprint(df_true.head())\nprint(df_pred.head())\nprint(mse(df_true, df_pred))", " count\nmonth subject \n1992-03-01 00:00:00+00:00 condensed matter 1\n1992-04-01 00:00:00+00:00 condensed matter 18\n1992-05-01 00:00:00+00:00 condensed matter 16\n high energy physics - lattice 3\n high energy physics - theory 2\n count\nmonth pred_sub \n1992-03-01 00:00:00+00:00 statistical mechanics 1\n1992-04-01 00:00:00+00:00 condensed matter 1\n disordered systems and neural networks 1\n materials science 1\n mesoscale and nanoscale physics 1\n91764.10219399538\n" ], [ "from sklearn.metrics import classification_report\nprint(classification_report(tdf.pred_sub, tdf.subject))", "/usr/local/lib/python3.7/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Recall and F-score are ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.\n _warn_prf(average, modifier, msg_start, len(result))\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6c2cd88a03802f5a7121ff4c8516ddc2c67a55
46,368
ipynb
Jupyter Notebook
2585774.ipynb
geckwen/SkateDetection
e2f8716373079a839e0f48d51b239a9e3ae0e6dd
[ "Apache-2.0" ]
null
null
null
2585774.ipynb
geckwen/SkateDetection
e2f8716373079a839e0f48d51b239a9e3ae0e6dd
[ "Apache-2.0" ]
null
null
null
2585774.ipynb
geckwen/SkateDetection
e2f8716373079a839e0f48d51b239a9e3ae0e6dd
[ "Apache-2.0" ]
null
null
null
70.575342
337
0.638263
[ [ [ "<h1 id=\"header-ch\">2021 CCF BDCI基于飞桨实现花样滑冰选手骨骼点动作识别-第6名方案</h1>", "_____no_output_____" ], [ "# 赛题介绍\n\n人体运动分析是近几年许多领域研究的热点问题。在学科的交叉研究上,人体运动分析涉及到计算机科学、运动人体科学、环境行为学和材料科学等。随着研究的深入以及计算机视觉、5G通信的飞速发展,人体运动分析技术已应用于自动驾驶、影视创作、安防异常事件监测和体育竞技分析、康复等实际场景人体运动分析已成为人工智能领域研究的前沿课题。目前的研究数据普遍缺少细粒度语义信息,导致现存的分割或识别任务缺少时空细粒度动作语义模型。此类研究在竞技体育、运动康复、日常健身等方面有非常重大的意义。相比于图片的细粒度研究,时空细粒度语义的人体动作具有动作的类内方差大、类间方差小这一特点,这将导致由细粒度语义产生的一系列问题,利用粗粒度语义的识别模型进行学习难得获得理想的结果。\n\n基于实际需求以及图深度学习模型的发展,本比赛旨在构建基于骨骼点的细粒度人体动作识别方法。通过本赛题建立精度高、细粒度意义明确的动作识别模型,希望大家探索时空细粒度模型的新方法。\n", "_____no_output_____" ], [ "# RES2CTR-GCN介绍\n\n## 整体结构\n\n本算法是基于[CTR-GCN](https://arxiv.org/pdf/2107.12213v2.pdf)进行改进,采用多流同结构算法整体框架如下图所示:\n### 双流算法流程图\n<p align=\"center\">\n <img src=\"multi_stream.png\" width = \"500\" height = \"50\" alt=\"\" align=\"center\" />\n\n ### 单流算法流程图\n<p align=\"center\">\n <img src=\"model.png\" width = \"500\" height = \"50\" alt=\"\" align=\"center\" />\n\n### RES2CTR-GCN模块\n\n <img src=\"RES2CTR-GCN.png\" width = \"500\" height = \"50\" alt=\"\" align=\"center\" />\n <img src=\"CTR-GC.png\" width = \"500\" height = \"50\" alt=\"\" align=\"center\" />\n <img src=\"TEMORAL_MODELING.png\" width = \"500\" height = \"50\" alt=\"\" align=\"center\" />", "_____no_output_____" ], [ "## 数据增强/清洗策略\n\n### 数据流的构建\n\n本模型采用多流同结构 故需要对训练的数据流以及预测的数据流进行预先的构建。构建的代码在data文件夹下的get_train_data.py,get_test_data.py\n\n### 数据增强\n在训练的情况下,采用mixup数据增强的策略,", "_____no_output_____" ], [ "## AI模型开发过程、训练技巧、创新思路", "_____no_output_____" ], [ "## 下载githee模型代码", "_____no_output_____" ] ], [ [ "# 进入到gitclone 的 ccf 目录下\r\n%cd ~/work/\r\n!git clone https://gitee.com/mark_twain/ccf.git", "/home/aistudio/work\nfatal: destination path 'ccf' already exists and is not an empty directory.\n" ] ], [ [ "## 配置代码环境,安装相应的依赖包", "_____no_output_____" ] ], [ [ "%cd ~/work/ccf\r\n!python3.7 -m pip install --upgrade pip\r\n!python3.7 -m pip install --upgrade -r requirements.txt", "/home/aistudio/work/ccf\nLooking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\nRequirement already satisfied: pip in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (21.3.1)\nLooking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\nRequirement already satisfied: numpy in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 1)) (1.20.3)\nCollecting numpy\n Downloading https://pypi.tuna.tsinghua.edu.cn/packages/5b/0d/de55834c5ea0dd287cb1cb156c8bc120af2863c36e4d49b4dc28f174e278/numpy-1.21.4-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.7 MB)\n |████████████████████████████████| 15.7 MB 438 kB/s \n\u001b[?25hRequirement already satisfied: pandas in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 2)) (1.1.5)\nCollecting pandas\n Downloading https://pypi.tuna.tsinghua.edu.cn/packages/74/0f/118a4201f552e2b6adb63cfcde4d16c7b3ae545490d4107a9265e8462db8/pandas-1.3.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.3 MB)\n |████████████████████████████████| 11.3 MB 1.2 MB/s \n\u001b[?25hRequirement already satisfied: tqdm in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 3)) (4.36.1)\nCollecting tqdm\n Downloading https://pypi.tuna.tsinghua.edu.cn/packages/63/f3/b7a1b8e40fd1bd049a34566eb353527bb9b8e9b98f8b6cf803bb64d8ce95/tqdm-4.62.3-py2.py3-none-any.whl (76 kB)\n |████████████████████████████████| 76 kB 4.4 MB/s \n\u001b[?25hRequirement already satisfied: PyYAML>=5.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 4)) (5.1.2)\nCollecting PyYAML>=5.1\n Downloading https://pypi.tuna.tsinghua.edu.cn/packages/eb/5f/6e6fe6904e1a9c67bc2ca5629a69e7a5a0b17f079da838bab98a1e548b25/PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)\n |████████████████████████████████| 596 kB 3.9 MB/s \n\u001b[?25hCollecting opencv-python==4.2.0.32\n Downloading https://pypi.tuna.tsinghua.edu.cn/packages/34/a3/403dbaef909fee9f9f6a8eaff51d44085a14e5bb1a1ff7257117d744986a/opencv_python-4.2.0.32-cp37-cp37m-manylinux1_x86_64.whl (28.2 MB)\n |████████████████████████████████| 28.2 MB 185 kB/s \n\u001b[?25hCollecting decord==0.4.2\n Downloading https://pypi.tuna.tsinghua.edu.cn/packages/c0/0c/7d99cfcde7b85f80c9ea9b0b19441339ad3cef59ee7fa5386598db714efe/decord-0.4.2-py2.py3-none-manylinux1_x86_64.whl (11.8 MB)\n |████████████████████████████████| 11.8 MB 1.6 MB/s \n\u001b[?25hCollecting av==8.0.3\n Downloading https://pypi.tuna.tsinghua.edu.cn/packages/66/ff/bacde7314c646a2bd2f240034809a10cc3f8b096751284d0828640fff3dd/av-8.0.3-cp37-cp37m-manylinux2010_x86_64.whl (37.2 MB)\n |████████████████████████████████| 37.2 MB 98 kB/s \n\u001b[?25hRequirement already satisfied: pytz>=2017.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pandas->-r requirements.txt (line 2)) (2019.3)\nRequirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pandas->-r requirements.txt (line 2)) (2.8.0)\nRequirement already satisfied: six>=1.5 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from python-dateutil>=2.7.3->pandas->-r requirements.txt (line 2)) (1.15.0)\nInstalling collected packages: numpy, tqdm, PyYAML, pandas, opencv-python, decord, av\n Attempting uninstall: numpy\n Found existing installation: numpy 1.20.3\n Uninstalling numpy-1.20.3:\n Successfully uninstalled numpy-1.20.3\n Attempting uninstall: tqdm\n Found existing installation: tqdm 4.36.1\n Uninstalling tqdm-4.36.1:\n Successfully uninstalled tqdm-4.36.1\n Attempting uninstall: PyYAML\n Found existing installation: PyYAML 5.1.2\n Uninstalling PyYAML-5.1.2:\n Successfully uninstalled PyYAML-5.1.2\n Attempting uninstall: pandas\n Found existing installation: pandas 1.1.5\n Uninstalling pandas-1.1.5:\n Successfully uninstalled pandas-1.1.5\n Attempting uninstall: opencv-python\n Found existing installation: opencv-python 4.1.1.26\n Uninstalling opencv-python-4.1.1.26:\n Successfully uninstalled opencv-python-4.1.1.26\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\nblackhole 1.0.1 requires numpy<=1.19.5, but you have numpy 1.21.4 which is incompatible.\nblackhole 1.0.1 requires pandas<=1.1.5,>=0.24.0, but you have pandas 1.3.4 which is incompatible.\u001b[0m\nSuccessfully installed PyYAML-6.0 av-8.0.3 decord-0.4.2 numpy-1.21.4 opencv-python-4.2.0.32 pandas-1.3.4 tqdm-4.62.3\n" ] ], [ [ "## 解压数据集并将数据集移动到指定文件夹中", "_____no_output_____" ] ], [ [ "# 将数据集解压\r\n%cd ~/\r\n!unzip data/data118075/bdcidataset.zip", "/home/aistudio\nArchive: data/data118075/bdcidataset.zip\n inflating: test_A_data.npy \n inflating: test_B_data.npy \n inflating: train_label.npy \n inflating: train_data.npy \n" ], [ "# 将数据集移动到代码文件ccf/data中\r\n!mv test_A_data.npy ./work/ccf/data/\r\n!mv test_B_data.npy ./work/ccf/data/\r\n!mv train_label.npy ./work/ccf/data/\r\n!mv train_data.npy ./work/ccf/data/", "_____no_output_____" ], [ "# 进入到gitclone 的ccf目录下\n%cd ~/work/ccf/", "/home/aistudio/work/ccf\n" ] ], [ [ "## 配置文件\n\n由于是本模型是采用三流同结构算法 故有3个不同的配置文件以配置不同的输入流\n\n三个配置文件分别为:\n`configs/recognition/ctrgcn/res2ctrgcn_keypoint_bone.yaml`\n\n`configs/recognition/ctrgcn/res2ctrgcn_keypoint_joint.yaml`\n\n`configs/recognition/ctrgcn/res2ctrgcn_keypoint_velocity.yaml`\n\n\n### 以`configs/recognition/ctrgcn/res2ctrgcn_keypoint_bone.yaml`为例子的配置文件的内容\n通过yaml配置文件的方式选择不同的算法和训练参数等,这里我们使用`configs/recognition/ctrgcn/res2ctrgcn_keypoint_bone.yaml`配置文件完成RES2CTR-GCN模型算法训练。从该配置文件中,我们可以得到如下信息:\n\n### 网络结构\n```yaml\nMODEL: \n framework: \"RecognizerGCN\" \n backbone: \n name: \"RES2CTRGCN\"\n head:\n name: \"CTRGCNHead\" \n num_classes: 30\n```\n\n表示我们使用的是RES2CTR-GCN算法,framework为`RecognizerGCN`,backbone是时空图卷积网络`RES2CTR-GCN`,head使用对应的`CTRGCNHead`,采用soft-label计算损失函数,损失函数是`CrossEntropyLoss`。\n\n\n### 数据路径\n\n```yaml\nDATASET: \n batch_size: 8 \n num_workers: 4 \n test_batch_size: 1\n test_num_workers: 0\n train:\n format: \"SkeletonDataset\" \n file_path: \"data/train_bone_data.npy\" # 手动配置\n label_path: \"data/train_label.npy\" # 手动配置\n test:\n format: \"SkeletonDataset\" \n file_path: \"data/test_bone_B_data.npy\" # 手动配置\n test_mode: True\n```\n\n训练数据路径通过`DATASET.train.file_path`字段指定,训练标签路径通过`DATASET.train.label_path`字段指定,测试数据路径通过`DATASET.test.file_path`字段指定。这三个路径**需要用户在配置文件`configs/recognition/ctrgcn/res2ctrgcn_keypoint_joint.yaml`中手动配置好**。本项目中路径示例如上所示。\n\n### 数据处理\n\n```yaml\nPIPELINE: \n train: \n sample:\n name: \"SampleFrame\"\n window_size: 2000\n transform: \n - SkeletonNorm:\n test: \n sample:\n name: \"SampleFrame\"\n window_size: 2000\n transform: \n - SkeletonNorm:\n```\n\n数据处理主要包括两步操作,分别为`SampleFrame`和`SkeletonNorm`。\n\n### 优化器\n\n```yaml\nOPTIMIZER: #OPTIMIZER field\n name: 'Momentum'\n momentum: 0.9\n learning_rate:\n iter_step: True\n name: 'CustomWarmupCosineDecay'\n max_epoch: 100\n warmup_epochs: 10\n warmup_start_lr: 0.005\n cosine_base_lr: 0.1\n```\n\n网络训练使用的优化器为`Momentum`,学习率更新策略为`CustomWarmupCosineDecay`。\n\n", "_____no_output_____" ], [ "## 一键启动训练脚本\n### 运行脚本指令\n```bash\nbash train.sh\n```\n\n你将会看到类似如下的训练日志\n```txt\n[11/14 13:47:39] \u001b[35mepoch:[ 1/100]\u001b[0m \u001b[95mtrain step:0 \u001b[0m \u001b[92mloss: 3.45860 lr: 0.005000 top1: 0.11111 top5: 0.11111\u001b[0m \u001b[92mbatch_cost: 2.37207 sec,\u001b[0m \u001b[92mreader_cost: 0.15600 sec,\u001b[0m ips: 3.79416 instance/sec.\n[11/14 13:47:48] epoch:[ 1/100] \u001b[95mtrain step:10 \u001b[0m \u001b[92mloss: 3.21713 lr: 0.005286 top1: 0.00000 top5: 0.42159\u001b[0m \u001b[92mbatch_cost: 0.96801 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.29743 instance/sec.\n[11/14 13:47:58] epoch:[ 1/100] \u001b[95mtrain step:20 \u001b[0m \u001b[92mloss: 3.16619 lr: 0.005571 top1: 0.21693 top5: 0.32804\u001b[0m \u001b[92mbatch_cost: 0.98901 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.10002 instance/sec.\n[11/14 13:48:08] epoch:[ 1/100] \u001b[95mtrain step:30 \u001b[0m \u001b[92mloss: 2.65808 lr: 0.005857 top1: 0.22222 top5: 0.55555\u001b[0m \u001b[92mbatch_cost: 0.95300 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.44386 instance/sec.\n[11/14 13:48:17] epoch:[ 1/100] \u001b[95mtrain step:40 \u001b[0m \u001b[92mloss: 2.95422 lr: 0.006143 top1: 0.00000 top5: 0.49274\u001b[0m \u001b[92mbatch_cost: 0.92300 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.75082 instance/sec.\n[11/14 13:48:27] epoch:[ 1/100] \u001b[95mtrain step:50 \u001b[0m \u001b[92mloss: 3.37573 lr: 0.006428 top1: 0.07475 top5: 0.29899\u001b[0m \u001b[92mbatch_cost: 0.92377 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.74270 instance/sec.\n[11/14 13:48:36] epoch:[ 1/100] \u001b[95mtrain step:60 \u001b[0m \u001b[92mloss: 2.69240 lr: 0.006714 top1: 0.33333 top5: 0.58678\u001b[0m \u001b[92mbatch_cost: 0.91400 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.84682 instance/sec.\n[11/14 13:48:46] epoch:[ 1/100] \u001b[95mtrain step:70 \u001b[0m \u001b[92mloss: 2.88237 lr: 0.007000 top1: 0.00000 top5: 0.53872\u001b[0m \u001b[92mbatch_cost: 0.91900 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.79326 instance/sec.\n[11/14 13:48:55] epoch:[ 1/100] \u001b[95mtrain step:80 \u001b[0m \u001b[92mloss: 3.31014 lr: 0.007285 top1: 0.11111 top5: 0.44141\u001b[0m \u001b[92mbatch_cost: 0.95794 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.39519 instance/sec.\n[11/14 13:49:05] epoch:[ 1/100] \u001b[95mtrain step:90 \u001b[0m \u001b[92mloss: 2.97485 lr: 0.007571 top1: 0.11111 top5: 0.55454\u001b[0m \u001b[92mbatch_cost: 0.99122 sec,\u001b[0m \u001b[92mreader_cost: 0.00100 sec,\u001b[0m ips: 9.07971 instance/sec.\n[11/14 13:49:15] epoch:[ 1/100] \u001b[95mtrain step:100 \u001b[0m \u001b[92mloss: 3.22369 lr: 0.007857 top1: 0.00000 top5: 0.42589\u001b[0m \u001b[92mbatch_cost: 1.02620 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 8.77022 instance/sec.\n[11/14 13:49:24] epoch:[ 1/100] \u001b[95mtrain step:110 \u001b[0m \u001b[92mloss: 2.41740 lr: 0.008142 top1: 0.52294 top5: 0.83453\u001b[0m \u001b[92mbatch_cost: 0.98703 sec,\u001b[0m \u001b[92mreader_cost: 0.00100 sec,\u001b[0m ips: 9.11828 instance/sec.\n[11/14 13:49:34] epoch:[ 1/100] \u001b[95mtrain step:120 \u001b[0m \u001b[92mloss: 2.87388 lr: 0.008428 top1: 0.00000 top5: 0.62665\u001b[0m \u001b[92mbatch_cost: 0.93400 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.63598 instance/sec.\n[11/14 13:49:43] epoch:[ 1/100] \u001b[95mtrain step:130 \u001b[0m \u001b[92mloss: 2.71671 lr: 0.008714 top1: 0.33333 top5: 0.73594\u001b[0m \u001b[92mbatch_cost: 1.01655 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 8.85346 instance/sec.\n[11/14 13:49:53] epoch:[ 1/100] \u001b[95mtrain step:140 \u001b[0m \u001b[92mloss: 2.67541 lr: 0.008999 top1: 0.33332 top5: 0.77776\u001b[0m \u001b[92mbatch_cost: 0.91100 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.87926 instance/sec.\n[11/14 13:50:02] epoch:[ 1/100] \u001b[95mtrain step:150 \u001b[0m \u001b[92mloss: 3.34374 lr: 0.009285 top1: 0.00000 top5: 0.40388\u001b[0m \u001b[92mbatch_cost: 0.92400 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.74027 instance/sec.\n[11/14 13:50:11] epoch:[ 1/100] \u001b[95mtrain step:160 \u001b[0m \u001b[92mloss: 3.10928 lr: 0.009571 top1: 0.11111 top5: 0.55556\u001b[0m \u001b[92mbatch_cost: 0.92956 sec,\u001b[0m \u001b[92mreader_cost: 0.00100 sec,\u001b[0m ips: 9.68198 instance/sec.\n[11/14 13:50:21] epoch:[ 1/100] \u001b[95mtrain step:170 \u001b[0m \u001b[92mloss: 3.18118 lr: 0.009856 top1: 0.00000 top5: 0.55554\u001b[0m \u001b[92mbatch_cost: 0.92963 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.68124 instance/sec.\n[11/14 13:50:30] epoch:[ 1/100] \u001b[95mtrain step:180 \u001b[0m \u001b[92mloss: 2.78696 lr: 0.010142 top1: 0.33303 top5: 0.66576\u001b[0m \u001b[92mbatch_cost: 0.92700 sec,\u001b[0m \u001b[92mreader_cost: 0.00100 sec,\u001b[0m ips: 9.70872 instance/sec.\n[11/14 13:50:39] epoch:[ 1/100] \u001b[95mtrain step:190 \u001b[0m \u001b[92mloss: 3.06701 lr: 0.010427 top1: 0.22962 top5: 0.52839\u001b[0m \u001b[92mbatch_cost: 0.93000 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.67740 instance/sec.\n[11/14 13:50:48] epoch:[ 1/100] \u001b[95mtrain step:200 \u001b[0m \u001b[92mloss: 2.81732 lr: 0.010713 top1: 0.00000 top5: 0.66872\u001b[0m \u001b[92mbatch_cost: 0.92860 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.69201 instance/sec.\n[11/14 13:50:58] epoch:[ 1/100] \u001b[95mtrain step:210 \u001b[0m \u001b[92mloss: 2.74136 lr: 0.010999 top1: 0.22222 top5: 0.68260\u001b[0m \u001b[92mbatch_cost: 0.93100 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.66704 instance/sec.\n[11/14 13:51:07] epoch:[ 1/100] \u001b[95mtrain step:220 \u001b[0m \u001b[92mloss: 3.03122 lr: 0.011284 top1: 0.19732 top5: 0.44444\u001b[0m \u001b[92mbatch_cost: 0.95900 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.38478 instance/sec.\n[11/14 13:51:16] epoch:[ 1/100] \u001b[95mtrain step:230 \u001b[0m \u001b[92mloss: 2.72677 lr: 0.011570 top1: 0.21040 top5: 0.60753\u001b[0m \u001b[92mbatch_cost: 0.92000 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.78263 instance/sec.\n[11/14 13:51:26] epoch:[ 1/100] \u001b[95mtrain step:240 \u001b[0m \u001b[92mloss: 2.53266 lr: 0.011856 top1: 0.42706 top5: 0.64349\u001b[0m \u001b[92mbatch_cost: 0.92751 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.70338 instance/sec.\n[11/14 13:51:35] epoch:[ 1/100] \u001b[95mtrain step:250 \u001b[0m \u001b[92mloss: 2.67476 lr: 0.012141 top1: 0.11108 top5: 0.77764\u001b[0m \u001b[92mbatch_cost: 0.93000 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.67742 instance/sec.\n[11/14 13:51:44] epoch:[ 1/100] \u001b[95mtrain step:260 \u001b[0m \u001b[92mloss: 2.96337 lr: 0.012427 top1: 0.09552 top5: 0.52438\u001b[0m \u001b[92mbatch_cost: 0.93500 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.62569 instance/sec.\n[11/14 13:51:53] epoch:[ 1/100] \u001b[95mtrain step:270 \u001b[0m \u001b[92mloss: 3.01553 lr: 0.012713 top1: 0.22222 top5: 0.66056\u001b[0m \u001b[92mbatch_cost: 0.91700 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.81461 instance/sec.\n[11/14 13:52:03] epoch:[ 1/100] \u001b[95mtrain step:280 \u001b[0m \u001b[92mloss: 3.02630 lr: 0.012998 top1: 0.22222 top5: 0.51143\u001b[0m \u001b[92mbatch_cost: 0.91700 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.81463 instance/sec.\n[11/14 13:52:12] epoch:[ 1/100] \u001b[95mtrain step:290 \u001b[0m \u001b[92mloss: 2.64080 lr: 0.013284 top1: 0.10819 top5: 0.65205\u001b[0m \u001b[92mbatch_cost: 0.93278 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 9.64855 instance/sec.\n[11/14 13:52:21] epoch:[ 1/100] \u001b[95mtrain step:300 \u001b[0m \u001b[92mloss: 2.56226 lr: 0.013570 top1: 0.33230 top5: 0.77640\u001b[0m \u001b[92mbatch_cost: 0.92300 sec,\u001b[0m \u001b[92mreader_cost: 0.00100 sec,\u001b[0m ips: 9.75082 instance/sec.\n[11/14 13:52:30] epoch:[ 1/100] \u001b[95mtrain step:310 \u001b[0m \u001b[92mloss: 2.31375 lr: 0.013855 top1: 0.21937 top5: 0.87747\u001b[0m \u001b[92mbatch_cost: 0.91700 sec,\u001b[0m \u001b[92mreader_cost: 0.00100 sec,\u001b[0m ips: 9.81463 instance/sec.\n[11/14 13:52:40] epoch:[ 1/100] \u001b[95mtrain step:320 \u001b[0m \u001b[92mloss: 2.93230 lr: 0.014141 top1: 0.11111 top5: 0.44344\u001b[0m \u001b[92mbatch_cost: 0.88833 sec,\u001b[0m \u001b[92mreader_cost: 0.00000 sec,\u001b[0m ips: 10.13133 instance/sec.\n[11/14 13:52:42] [31mEND epoch:1 [0m [95mtrain\u001b[0m \u001b[92mloss_avg: 2.93599 top1_avg: 0.14609 top5_avg: 0.56468\u001b[0m \u001b[92mavg_batch_cost: 0.93686 sec,\u001b[0m \u001b[92mavg_reader_cost: 0.00000 sec,\u001b[0m \u001b[92mbatch_cost_sum: 305.87457 sec,\u001b[0m avg_ips: 9.53332 instance/sec.\n```\n\n#### 注意事项\n请使用<span style='color:red'>GPU版本</span>的配置环境运行本模块", "_____no_output_____" ] ], [ [ "# 开始训练\r\n!bash train.sh", "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\nRequirement already satisfied: numpy in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 1)) (1.21.4)\nRequirement already satisfied: pandas in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 2)) (1.3.4)\nRequirement already satisfied: tqdm in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 3)) (4.62.3)\nRequirement already satisfied: PyYAML>=5.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 4)) (6.0)\nRequirement already satisfied: opencv-python==4.2.0.32 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 5)) (4.2.0.32)\nRequirement already satisfied: decord==0.4.2 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 6)) (0.4.2)\nRequirement already satisfied: av==8.0.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from -r requirements.txt (line 7)) (8.0.3)\nRequirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pandas->-r requirements.txt (line 2)) (2.8.0)\nRequirement already satisfied: pytz>=2017.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pandas->-r requirements.txt (line 2)) (2019.3)\nRequirement already satisfied: six>=1.5 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from python-dateutil>=2.7.3->pandas->-r requirements.txt (line 2)) (1.15.0)\n100%|███████████████████████████████████████████| 25/25 [00:15<00:00, 1.60it/s]\n100%|██████████████████████████████████████| 2499/2499 [00:04<00:00, 526.70it/s]\n[11/24 14:18:04] DALI is not installed, you can improve performance if use DALI\n[11/24 14:18:04] \u001b[35mDATASET\u001b[0m : \n[11/24 14:18:04] \u001b[35mbatch_size\u001b[0m : \u001b[92m9\u001b[0m\n[11/24 14:18:04] \u001b[35mnum_workers\u001b[0m : \u001b[92m4\u001b[0m\n[11/24 14:18:04] \u001b[35mtest\u001b[0m : \n[11/24 14:18:04] \u001b[35mfile_path\u001b[0m : \u001b[92mdata/test_B_data.npy\u001b[0m\n[11/24 14:18:04] \u001b[35mformat\u001b[0m : \u001b[92mSkeletonDataset\u001b[0m\n[11/24 14:18:04] \u001b[35mtest_mode\u001b[0m : \u001b[92mTrue\u001b[0m\n[11/24 14:18:04] \u001b[35mtest_batch_size\u001b[0m : \u001b[92m1\u001b[0m\n[11/24 14:18:04] \u001b[35mtest_num_workers\u001b[0m : \u001b[92m0\u001b[0m\n[11/24 14:18:04] \u001b[35mtrain\u001b[0m : \n[11/24 14:18:04] \u001b[35mfile_path\u001b[0m : \u001b[92mdata/train_data.npy\u001b[0m\n[11/24 14:18:04] \u001b[35mformat\u001b[0m : \u001b[92mSkeletonDataset\u001b[0m\n[11/24 14:18:04] \u001b[35mlabel_path\u001b[0m : \u001b[92mdata/train_label.npy\u001b[0m\n[11/24 14:18:04] ------------------------------------------------------------\n[11/24 14:18:04] \u001b[35mINFERENCE\u001b[0m : \n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mSTGCN_Inference_helper\u001b[0m\n[11/24 14:18:04] \u001b[35mnum_channels\u001b[0m : \u001b[92m2\u001b[0m\n[11/24 14:18:04] \u001b[35mperson_nums\u001b[0m : \u001b[92m1\u001b[0m\n[11/24 14:18:04] \u001b[35mvertex_nums\u001b[0m : \u001b[92m25\u001b[0m\n[11/24 14:18:04] \u001b[35mwindow_size\u001b[0m : \u001b[92m2000\u001b[0m\n[11/24 14:18:04] ------------------------------------------------------------\n[11/24 14:18:04] \u001b[35mMETRIC\u001b[0m : \n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mSkeletonMetric\u001b[0m\n[11/24 14:18:04] \u001b[35mout_file\u001b[0m : \u001b[92msubmission.csv\u001b[0m\n[11/24 14:18:04] ------------------------------------------------------------\n[11/24 14:18:04] \u001b[35mMIX\u001b[0m : \n[11/24 14:18:04] \u001b[35malpha\u001b[0m : \u001b[92m0.4\u001b[0m\n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mMixup\u001b[0m\n[11/24 14:18:04] ------------------------------------------------------------\n[11/24 14:18:04] \u001b[35mMODEL\u001b[0m : \n[11/24 14:18:04] \u001b[35mbackbone\u001b[0m : \n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mRES2CTRGCN\u001b[0m\n[11/24 14:18:04] \u001b[35mframework\u001b[0m : \u001b[92mRecognizerGCN\u001b[0m\n[11/24 14:18:04] \u001b[35mhead\u001b[0m : \n[11/24 14:18:04] \u001b[35min_channels\u001b[0m : \u001b[92m256\u001b[0m\n[11/24 14:18:04] \u001b[35mls_eps\u001b[0m : \u001b[92m0.1\u001b[0m\n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mCTRGCNHead\u001b[0m\n[11/24 14:18:04] \u001b[35mnum_classes\u001b[0m : \u001b[92m30\u001b[0m\n[11/24 14:18:04] ------------------------------------------------------------\n[11/24 14:18:04] \u001b[35mOPTIMIZER\u001b[0m : \n[11/24 14:18:04] \u001b[35mlearning_rate\u001b[0m : \n[11/24 14:18:04] \u001b[35mcosine_base_lr\u001b[0m : \u001b[92m0.1\u001b[0m\n[11/24 14:18:04] \u001b[35miter_step\u001b[0m : \u001b[92mTrue\u001b[0m\n[11/24 14:18:04] \u001b[35mmax_epoch\u001b[0m : \u001b[92m100\u001b[0m\n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mCustomWarmupCosineDecay\u001b[0m\n[11/24 14:18:04] \u001b[35mwarmup_epochs\u001b[0m : \u001b[92m10\u001b[0m\n[11/24 14:18:04] \u001b[35mwarmup_start_lr\u001b[0m : \u001b[92m0.005\u001b[0m\n[11/24 14:18:04] \u001b[35mmomentum\u001b[0m : \u001b[92m0.9\u001b[0m\n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mMomentum\u001b[0m\n[11/24 14:18:04] \u001b[35muse_nesterov\u001b[0m : \u001b[92mTrue\u001b[0m\n[11/24 14:18:04] \u001b[35mweight_decay\u001b[0m : \n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mL2\u001b[0m\n[11/24 14:18:04] \u001b[35mvalue\u001b[0m : \u001b[92m0.0004\u001b[0m\n[11/24 14:18:04] ------------------------------------------------------------\n[11/24 14:18:04] \u001b[35mPIPELINE\u001b[0m : \n[11/24 14:18:04] \u001b[35mtest\u001b[0m : \n[11/24 14:18:04] \u001b[35msample\u001b[0m : \n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mAutoPadding\u001b[0m\n[11/24 14:18:04] \u001b[35mwindow_size\u001b[0m : \u001b[92m2000\u001b[0m\n[11/24 14:18:04] \u001b[35mtransform\u001b[0m : \n[11/24 14:18:04] \u001b[35mSkeletonNorm\u001b[0m : \u001b[92mNone\u001b[0m\n[11/24 14:18:04] \u001b[35mtrain\u001b[0m : \n[11/24 14:18:04] \u001b[35msample\u001b[0m : \n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mAutoPadding\u001b[0m\n[11/24 14:18:04] \u001b[35mwindow_size\u001b[0m : \u001b[92m2000\u001b[0m\n[11/24 14:18:04] \u001b[35mtransform\u001b[0m : \n[11/24 14:18:04] \u001b[35mSkeletonNorm\u001b[0m : \u001b[92mNone\u001b[0m\n[11/24 14:18:04] \u001b[35mvalid\u001b[0m : \n[11/24 14:18:04] \u001b[35msample\u001b[0m : \n[11/24 14:18:04] \u001b[35mname\u001b[0m : \u001b[92mAutoPadding\u001b[0m\n[11/24 14:18:04] \u001b[35mwindow_size\u001b[0m : \u001b[92m2000\u001b[0m\n[11/24 14:18:04] \u001b[35mtransform\u001b[0m : \n[11/24 14:18:04] \u001b[35mSkeletonNorm\u001b[0m : \u001b[92mNone\u001b[0m\n[11/24 14:18:04] ------------------------------------------------------------\n[11/24 14:18:04] \u001b[35mepochs\u001b[0m : \u001b[92m100\u001b[0m\n[11/24 14:18:04] \u001b[35mlog_interval\u001b[0m : \u001b[92m10\u001b[0m\n[11/24 14:18:04] \u001b[35mmodel_name\u001b[0m : \u001b[92mRES2CTRGCNJOINT\u001b[0m\nW1124 14:18:06.581346 398 device_context.cc:404] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.0, Runtime API Version: 10.1\nW1124 14:18:06.585685 398 device_context.cc:422] device: 0, cuDNN Version: 7.6.\n[11/24 14:18:08] Loading data, it will take some moment...\n[11/24 14:18:10] Data Loaded!\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:641: UserWarning: When training, we now always track global mean and variance.\n \"When training, we now always track global mean and variance.\")\n[11/24 14:18:13] \u001b[35mepoch:[ 1/100]\u001b[0m \u001b[95mtrain step:0 \u001b[0m \u001b[92mloss: 3.45017 lr: 0.005000 top1: 0.02511 top5: 0.11111\u001b[0m \u001b[92mbatch_cost: 2.94600 sec,\u001b[0m \u001b[92mreader_cost: 1.48622 sec,\u001b[0m ips: 3.05499 instance/sec.\n[11/24 14:18:26] epoch:[ 1/100] \u001b[95mtrain step:10 \u001b[0m \u001b[92mloss: 3.16616 lr: 0.005286 top1: 0.00000 top5: 0.54298\u001b[0m \u001b[92mbatch_cost: 1.30128 sec,\u001b[0m \u001b[92mreader_cost: 0.00029 sec,\u001b[0m ips: 6.91627 instance/sec.\n[11/24 14:18:39] epoch:[ 1/100] \u001b[95mtrain step:20 \u001b[0m \u001b[92mloss: 3.06005 lr: 0.005571 top1: 0.16683 top5: 0.50016\u001b[0m \u001b[92mbatch_cost: 1.28639 sec,\u001b[0m \u001b[92mreader_cost: 0.00028 sec,\u001b[0m ips: 6.99635 instance/sec.\n[11/24 14:18:52] epoch:[ 1/100] \u001b[95mtrain step:30 \u001b[0m \u001b[92mloss: 2.81017 lr: 0.005857 top1: 0.00000 top5: 0.62779\u001b[0m \u001b[92mbatch_cost: 1.29926 sec,\u001b[0m \u001b[92mreader_cost: 0.00032 sec,\u001b[0m ips: 6.92704 instance/sec.\n[11/24 14:19:05] epoch:[ 1/100] \u001b[95mtrain step:40 \u001b[0m \u001b[92mloss: 2.81232 lr: 0.006143 top1: 0.11111 top5: 0.66515\u001b[0m \u001b[92mbatch_cost: 1.28826 sec,\u001b[0m \u001b[92mreader_cost: 0.00030 sec,\u001b[0m ips: 6.98615 instance/sec.\n[11/24 14:19:18] epoch:[ 1/100] \u001b[95mtrain step:50 \u001b[0m \u001b[92mloss: 3.13386 lr: 0.006428 top1: 0.06785 top5: 0.44444\u001b[0m \u001b[92mbatch_cost: 1.28672 sec,\u001b[0m \u001b[92mreader_cost: 0.00028 sec,\u001b[0m ips: 6.99453 instance/sec.\n[11/24 14:19:31] epoch:[ 1/100] \u001b[95mtrain step:60 \u001b[0m \u001b[92mloss: 2.67763 lr: 0.006714 top1: 0.43005 top5: 0.43965\u001b[0m \u001b[92mbatch_cost: 1.29626 sec,\u001b[0m \u001b[92mreader_cost: 0.00033 sec,\u001b[0m ips: 6.94306 instance/sec.\n[11/24 14:19:44] epoch:[ 1/100] \u001b[95mtrain step:70 \u001b[0m \u001b[92mloss: 3.10281 lr: 0.007000 top1: 0.00000 top5: 0.59629\u001b[0m \u001b[92mbatch_cost: 1.28682 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 6.99400 instance/sec.\n[11/24 14:19:57] epoch:[ 1/100] \u001b[95mtrain step:80 \u001b[0m \u001b[92mloss: 3.27048 lr: 0.007285 top1: 0.11111 top5: 0.43207\u001b[0m \u001b[92mbatch_cost: 1.28918 sec,\u001b[0m \u001b[92mreader_cost: 0.00028 sec,\u001b[0m ips: 6.98119 instance/sec.\n[11/24 14:20:09] epoch:[ 1/100] \u001b[95mtrain step:90 \u001b[0m \u001b[92mloss: 2.97861 lr: 0.007571 top1: 0.00000 top5: 0.43763\u001b[0m \u001b[92mbatch_cost: 1.28517 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 7.00297 instance/sec.\n[11/24 14:20:22] epoch:[ 1/100] \u001b[95mtrain step:100 \u001b[0m \u001b[92mloss: 3.02766 lr: 0.007857 top1: 0.00000 top5: 0.50840\u001b[0m \u001b[92mbatch_cost: 1.28793 sec,\u001b[0m \u001b[92mreader_cost: 0.00031 sec,\u001b[0m ips: 6.98794 instance/sec.\n[11/24 14:20:35] epoch:[ 1/100] \u001b[95mtrain step:110 \u001b[0m \u001b[92mloss: 2.51231 lr: 0.008142 top1: 0.53653 top5: 0.92390\u001b[0m \u001b[92mbatch_cost: 1.28473 sec,\u001b[0m \u001b[92mreader_cost: 0.00023 sec,\u001b[0m ips: 7.00535 instance/sec.\n[11/24 14:20:48] epoch:[ 1/100] \u001b[95mtrain step:120 \u001b[0m \u001b[92mloss: 2.72213 lr: 0.008428 top1: 0.10854 top5: 0.54784\u001b[0m \u001b[92mbatch_cost: 1.28522 sec,\u001b[0m \u001b[92mreader_cost: 0.00025 sec,\u001b[0m ips: 7.00270 instance/sec.\n[11/24 14:21:01] epoch:[ 1/100] \u001b[95mtrain step:130 \u001b[0m \u001b[92mloss: 3.02842 lr: 0.008714 top1: 0.19167 top5: 0.51391\u001b[0m \u001b[92mbatch_cost: 1.28688 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 6.99368 instance/sec.\n[11/24 14:21:14] epoch:[ 1/100] \u001b[95mtrain step:140 \u001b[0m \u001b[92mloss: 2.88236 lr: 0.008999 top1: 0.10893 top5: 0.44226\u001b[0m \u001b[92mbatch_cost: 1.28647 sec,\u001b[0m \u001b[92mreader_cost: 0.00032 sec,\u001b[0m ips: 6.99591 instance/sec.\n[11/24 14:21:27] epoch:[ 1/100] \u001b[95mtrain step:150 \u001b[0m \u001b[92mloss: 2.96539 lr: 0.009285 top1: 0.09894 top5: 0.71692\u001b[0m \u001b[92mbatch_cost: 1.28646 sec,\u001b[0m \u001b[92mreader_cost: 0.00025 sec,\u001b[0m ips: 6.99594 instance/sec.\n[11/24 14:21:39] epoch:[ 1/100] \u001b[95mtrain step:160 \u001b[0m \u001b[92mloss: 2.93765 lr: 0.009571 top1: 0.09437 top5: 0.58297\u001b[0m \u001b[92mbatch_cost: 1.28600 sec,\u001b[0m \u001b[92mreader_cost: 0.00026 sec,\u001b[0m ips: 6.99847 instance/sec.\n[11/24 14:21:52] epoch:[ 1/100] \u001b[95mtrain step:170 \u001b[0m \u001b[92mloss: 3.08934 lr: 0.009856 top1: 0.00000 top5: 0.44188\u001b[0m \u001b[92mbatch_cost: 1.28555 sec,\u001b[0m \u001b[92mreader_cost: 0.00029 sec,\u001b[0m ips: 7.00090 instance/sec.\n[11/24 14:22:05] epoch:[ 1/100] \u001b[95mtrain step:180 \u001b[0m \u001b[92mloss: 2.85335 lr: 0.010142 top1: 0.20229 top5: 0.60688\u001b[0m \u001b[92mbatch_cost: 1.28826 sec,\u001b[0m \u001b[92mreader_cost: 0.00026 sec,\u001b[0m ips: 6.98615 instance/sec.\n[11/24 14:22:18] epoch:[ 1/100] \u001b[95mtrain step:190 \u001b[0m \u001b[92mloss: 2.72741 lr: 0.010427 top1: 0.33333 top5: 0.44444\u001b[0m \u001b[92mbatch_cost: 1.29118 sec,\u001b[0m \u001b[92mreader_cost: 0.00033 sec,\u001b[0m ips: 6.97038 instance/sec.\n[11/24 14:22:31] epoch:[ 1/100] \u001b[95mtrain step:200 \u001b[0m \u001b[92mloss: 2.50079 lr: 0.010713 top1: 0.44444 top5: 0.55555\u001b[0m \u001b[92mbatch_cost: 1.28481 sec,\u001b[0m \u001b[92mreader_cost: 0.00024 sec,\u001b[0m ips: 7.00494 instance/sec.\n[11/24 14:22:44] epoch:[ 1/100] \u001b[95mtrain step:210 \u001b[0m \u001b[92mloss: 2.97883 lr: 0.010999 top1: 0.11111 top5: 0.55556\u001b[0m \u001b[92mbatch_cost: 1.28620 sec,\u001b[0m \u001b[92mreader_cost: 0.00025 sec,\u001b[0m ips: 6.99738 instance/sec.\n[11/24 14:22:57] epoch:[ 1/100] \u001b[95mtrain step:220 \u001b[0m \u001b[92mloss: 3.08429 lr: 0.011284 top1: 0.20639 top5: 0.33333\u001b[0m \u001b[92mbatch_cost: 1.28693 sec,\u001b[0m \u001b[92mreader_cost: 0.00026 sec,\u001b[0m ips: 6.99339 instance/sec.\n[11/24 14:23:10] epoch:[ 1/100] \u001b[95mtrain step:230 \u001b[0m \u001b[92mloss: 2.68696 lr: 0.011570 top1: 0.21711 top5: 0.54788\u001b[0m \u001b[92mbatch_cost: 1.28948 sec,\u001b[0m \u001b[92mreader_cost: 0.00028 sec,\u001b[0m ips: 6.97954 instance/sec.\n[11/24 14:23:22] epoch:[ 1/100] \u001b[95mtrain step:240 \u001b[0m \u001b[92mloss: 2.71186 lr: 0.011856 top1: 0.17174 top5: 0.65157\u001b[0m \u001b[92mbatch_cost: 1.28853 sec,\u001b[0m \u001b[92mreader_cost: 0.00028 sec,\u001b[0m ips: 6.98470 instance/sec.\n[11/24 14:23:35] epoch:[ 1/100] \u001b[95mtrain step:250 \u001b[0m \u001b[92mloss: 2.78273 lr: 0.012141 top1: 0.10723 top5: 0.64724\u001b[0m \u001b[92mbatch_cost: 1.28744 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 6.99063 instance/sec.\n[11/24 14:23:48] epoch:[ 1/100] \u001b[95mtrain step:260 \u001b[0m \u001b[92mloss: 3.20129 lr: 0.012427 top1: 0.07988 top5: 0.49309\u001b[0m \u001b[92mbatch_cost: 1.29022 sec,\u001b[0m \u001b[92mreader_cost: 0.00026 sec,\u001b[0m ips: 6.97554 instance/sec.\n[11/24 14:24:01] epoch:[ 1/100] \u001b[95mtrain step:270 \u001b[0m \u001b[92mloss: 2.94801 lr: 0.012713 top1: 0.22177 top5: 0.55465\u001b[0m \u001b[92mbatch_cost: 1.28802 sec,\u001b[0m \u001b[92mreader_cost: 0.00026 sec,\u001b[0m ips: 6.98745 instance/sec.\n[11/24 14:24:14] epoch:[ 1/100] \u001b[95mtrain step:280 \u001b[0m \u001b[92mloss: 3.25182 lr: 0.012998 top1: 0.17108 top5: 0.45328\u001b[0m \u001b[92mbatch_cost: 1.28791 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 6.98804 instance/sec.\n[11/24 14:24:27] epoch:[ 1/100] \u001b[95mtrain step:290 \u001b[0m \u001b[92mloss: 2.85983 lr: 0.013284 top1: 0.10405 top5: 0.52729\u001b[0m \u001b[92mbatch_cost: 1.28742 sec,\u001b[0m \u001b[92mreader_cost: 0.00025 sec,\u001b[0m ips: 6.99075 instance/sec.\n[11/24 14:24:40] epoch:[ 1/100] \u001b[95mtrain step:300 \u001b[0m \u001b[92mloss: 2.92792 lr: 0.013570 top1: 0.33093 top5: 0.55074\u001b[0m \u001b[92mbatch_cost: 1.28779 sec,\u001b[0m \u001b[92mreader_cost: 0.00026 sec,\u001b[0m ips: 6.98871 instance/sec.\n[11/24 14:24:53] epoch:[ 1/100] \u001b[95mtrain step:310 \u001b[0m \u001b[92mloss: 2.44690 lr: 0.013855 top1: 0.10594 top5: 0.85785\u001b[0m \u001b[92mbatch_cost: 1.28897 sec,\u001b[0m \u001b[92mreader_cost: 0.00029 sec,\u001b[0m ips: 6.98230 instance/sec.\n[11/24 14:25:06] epoch:[ 1/100] \u001b[95mtrain step:320 \u001b[0m \u001b[92mloss: 3.31511 lr: 0.014141 top1: 0.08648 top5: 0.34592\u001b[0m \u001b[92mbatch_cost: 1.28683 sec,\u001b[0m \u001b[92mreader_cost: 0.00018 sec,\u001b[0m ips: 6.99394 instance/sec.\n[11/24 14:25:10] \u001b[31mEND epoch:1 \u001b[0m \u001b[95mtrain\u001b[0m \u001b[92mloss_avg: 2.96312 top1_avg: 0.13750 top5_avg: 0.55254\u001b[0m \u001b[92mavg_batch_cost: 1.28680 sec,\u001b[0m \u001b[92mavg_reader_cost: 0.00020 sec,\u001b[0m \u001b[92mbatch_cost_sum: 418.93246 sec,\u001b[0m avg_ips: 6.96055 instance/sec.\n[11/24 14:25:12] \u001b[35mepoch:[ 2/100]\u001b[0m \u001b[95mtrain step:0 \u001b[0m \u001b[92mloss: 2.64611 lr: 0.014255 top1: 0.11111 top5: 0.77332\u001b[0m \u001b[92mbatch_cost: 2.67765 sec,\u001b[0m \u001b[92mreader_cost: 1.03384 sec,\u001b[0m ips: 3.36115 instance/sec.\n[11/24 14:25:25] epoch:[ 2/100] \u001b[95mtrain step:10 \u001b[0m \u001b[92mloss: 2.78904 lr: 0.014541 top1: 0.22222 top5: 0.53015\u001b[0m \u001b[92mbatch_cost: 1.29937 sec,\u001b[0m \u001b[92mreader_cost: 0.00032 sec,\u001b[0m ips: 6.92643 instance/sec.\n[11/24 14:25:38] epoch:[ 2/100] \u001b[95mtrain step:20 \u001b[0m \u001b[92mloss: 3.25412 lr: 0.014827 top1: 0.00000 top5: 0.58023\u001b[0m \u001b[92mbatch_cost: 1.28778 sec,\u001b[0m \u001b[92mreader_cost: 0.00028 sec,\u001b[0m ips: 6.98876 instance/sec.\n[11/24 14:25:51] epoch:[ 2/100] \u001b[95mtrain step:30 \u001b[0m \u001b[92mloss: 2.69714 lr: 0.015112 top1: 0.20045 top5: 0.63401\u001b[0m \u001b[92mbatch_cost: 1.28814 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 6.98679 instance/sec.\n[11/24 14:26:04] epoch:[ 2/100] \u001b[95mtrain step:40 \u001b[0m \u001b[92mloss: 2.84845 lr: 0.015398 top1: 0.09906 top5: 0.61847\u001b[0m \u001b[92mbatch_cost: 1.28706 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 6.99270 instance/sec.\n[11/24 14:26:17] epoch:[ 2/100] \u001b[95mtrain step:50 \u001b[0m \u001b[92mloss: 2.58167 lr: 0.015684 top1: 0.33027 top5: 0.66156\u001b[0m \u001b[92mbatch_cost: 1.28917 sec,\u001b[0m \u001b[92mreader_cost: 0.00029 sec,\u001b[0m ips: 6.98125 instance/sec.\n[11/24 14:26:30] epoch:[ 2/100] \u001b[95mtrain step:60 \u001b[0m \u001b[92mloss: 3.24338 lr: 0.015969 top1: 0.09170 top5: 0.40563\u001b[0m \u001b[92mbatch_cost: 1.29062 sec,\u001b[0m \u001b[92mreader_cost: 0.00043 sec,\u001b[0m ips: 6.97342 instance/sec.\n[11/24 14:26:43] epoch:[ 2/100] \u001b[95mtrain step:70 \u001b[0m \u001b[92mloss: 3.21697 lr: 0.016255 top1: 0.11111 top5: 0.29626\u001b[0m \u001b[92mbatch_cost: 1.28854 sec,\u001b[0m \u001b[92mreader_cost: 0.00027 sec,\u001b[0m ips: 6.98464 instance/sec.\n[11/24 14:26:56] epoch:[ 2/100] \u001b[95mtrain step:80 \u001b[0m \u001b[92mloss: 2.38690 lr: 0.016541 top1: 0.00032 top5: 0.88793\u001b[0m \u001b[92mbatch_cost: 1.28941 sec,\u001b[0m \u001b[92mreader_cost: 0.00181 sec,\u001b[0m ips: 6.97996 instance/sec.\n[11/24 14:27:09] epoch:[ 2/100] \u001b[95mtrain step:90 \u001b[0m \u001b[92mloss: 2.68135 lr: 0.016826 top1: 0.19618 top5: 0.61458\u001b[0m \u001b[92mbatch_cost: 1.28978 sec,\u001b[0m \u001b[92mreader_cost: 0.00025 sec,\u001b[0m ips: 6.97792 instance/sec.\n[11/24 14:27:22] epoch:[ 2/100] \u001b[95mtrain step:100 \u001b[0m \u001b[92mloss: 3.04612 lr: 0.017112 top1: 0.26571 top5: 0.55556\u001b[0m \u001b[92mbatch_cost: 1.28800 sec,\u001b[0m \u001b[92mreader_cost: 0.00026 sec,\u001b[0m ips: 6.98759 instance/sec.\n[11/24 14:27:34] epoch:[ 2/100] \u001b[95mtrain step:110 \u001b[0m \u001b[92mloss: 2.92766 lr: 0.017398 top1: 0.17346 top5: 0.48241\u001b[0m \u001b[92mbatch_cost: 1.28832 sec,\u001b[0m \u001b[92mreader_cost: 0.00029 sec,\u001b[0m ips: 6.98584 instance/sec.\n^C\n[11/24 14:27:45] main proc 729 exit, kill process group 364\n[11/24 14:27:45] main proc 731 exit, kill process group 364\n[11/24 14:27:45] main proc 730 exit, kill process group 364\n[11/24 14:27:45] main proc 732 exit, kill process group 364\n" ] ], [ [ "## 测试脚本\n模型训练完成后,可使用测试脚本进行评估,\n\n该测试脚本会执行多流的模型预测,然后将模型预测的结果进行融合\n### 测试脚本启动命令\n```bash\nbash inference.sh\n```\n\n- 评估结果保存在`final_submission.csv`文件中,可在[评测官网](https://aistudio.baidu.com/aistudio/competition/detail/115)提交查看得分。", "_____no_output_____" ] ], [ [ "#启动预测脚本指令\r\n!bash inference.sh", "100%|██████████████████████████████████████| 634/634 [00:00<00:00, 79187.30it/s]\r" ] ], [ [ "测试脚本运行完成后,可以在当前目录中得到`final_submission.csv`文件,将该文件提交至[评测官网](https://aistudio.baidu.com/aistudio/competition/detail/115),即可以查看在A榜得分。示例给出的模型文件,在A榜的得分为72.452,在B榜的得分为66.246", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6c3318ed72fe61cc8b6425a8a8384552055050
72,029
ipynb
Jupyter Notebook
_doc/notebooks/lectures/wines_knn_eval.ipynb
sdpython/papierstat
f69de884c59ada30b58224dca39f2a44d92122c1
[ "MIT" ]
7
2019-03-21T09:52:31.000Z
2021-01-17T16:56:27.000Z
_doc/notebooks/lectures/wines_knn_eval.ipynb
sdpython/papierstat
f69de884c59ada30b58224dca39f2a44d92122c1
[ "MIT" ]
33
2018-02-08T23:56:57.000Z
2021-02-10T23:55:43.000Z
_doc/notebooks/lectures/wines_knn_eval.ipynb
sdpython/papierstat
f69de884c59ada30b58224dca39f2a44d92122c1
[ "MIT" ]
1
2021-02-11T09:16:33.000Z
2021-02-11T09:16:33.000Z
215.01194
26,906
0.902859
[ [ [ "# Plus proches voisins - évaluation\n\n\nComment évaluer la pertinence d'un modèle des plus proches voisins.", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "from papierstat.datasets import load_wines_dataset\ndf = load_wines_dataset()\nX = df.drop(['quality', 'color'], axis=1)\ny = df['quality']", "_____no_output_____" ], [ "from sklearn.neighbors import KNeighborsRegressor\nknn = KNeighborsRegressor(n_neighbors=1)\nknn.fit(X, y)", "_____no_output_____" ], [ "prediction = knn.predict(X)", "_____no_output_____" ] ], [ [ "Le modèle ne fait pas d'erreur sur tous les exemples de la base de vins. C'est normal puisque le plus proche voisin d'un vin est nécessairement lui-même, la note prédite et la sienne.", "_____no_output_____" ] ], [ [ "min(prediction - y), max(prediction - y)", "_____no_output_____" ] ], [ [ "Il est difficile dans ces conditions de dire si la prédiction et de bonne qualité. On pourrait estimer la qualité de la prédiction sur un vin nouveau mais il n'y en a aucun pour le moment et ce n'est pas l'ordinateur qui va les fabriquer. On peut peut-être regarder combien de fois le plus proche voisin d'un vin autre que le vin lui-même partage la même note.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import NearestNeighbors\nnn = NearestNeighbors(n_neighbors=2)\nnn.fit(X)", "_____no_output_____" ], [ "distance, index = nn.kneighbors(X)\nproche = index[:, 1].ravel()\nnote_proche = [y[i] for i in proche]", "_____no_output_____" ] ], [ [ "Il ne reste plus qu'à calculer la différence entre la note d'un vin et celle de son plus proche voisin autre que lui-même.", "_____no_output_____" ] ], [ [ "diff = y - note_proche\nax = diff.hist(bins=20, figsize=(3,3))\nax.set_title('Histogramme des différences\\nde prédiction')", "_____no_output_____" ] ], [ [ "Ca marche pour les deux tiers de la base, pour le tiers restant, les notes diffèrent. On peut maintenant regarder si la distance entre ces deux voisins pourrait être corrélée à cette différence.", "_____no_output_____" ] ], [ [ "import pandas\ndif = pandas.DataFrame(dict(dist=distance[:,1], diff=diff))\nax = dif.plot(x=\"dist\", y=\"diff\", kind='scatter', figsize=(3,3))\nax.set_title('Graphe XY - distance / différence');", "_____no_output_____" ] ], [ [ "Ce n'est pas très lisible. Essayons un autre type de graphique.", "_____no_output_____" ] ], [ [ "from seaborn import violinplot, boxplot\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(1, 2, figsize=(8,3))\nviolinplot(x=\"diff\", y=\"dist\", data=dif, ax=ax[0])\nax[0].set_ylim([0,25])\nax[0].set_title('Violons distribution\\ndifférence / distance')\nboxplot(x=\"diff\", y=\"dist\", data=dif, ax=ax[1])\nax[1].set_title('Boxplots distribution\\ndifférence / distance')\nax[1].set_ylim([0,25]);", "_____no_output_____" ] ], [ [ "A priori le modèle n'est pas si mauvais, les voisins partageant la même note ont l'air plus proches que ceux qui ont des notes différentes.", "_____no_output_____" ] ], [ [ "import numpy\ndif['abs_diff'] = numpy.abs(dif['diff'])", "_____no_output_____" ], [ "from seaborn import jointplot\nax = jointplot(\"dist\", \"abs_diff\", data=dif[dif.dist <= 10],\n kind=\"kde\", space=0, color=\"g\", size=4)\nax.ax_marg_y.set_title('Heatmap distribution distance / différence');", "_____no_output_____" ] ], [ [ "Les vins proches se ressemblent pour la plupart. C'est rassurant pour la suite. 61% des vins ont un voisin proche partageant la même note.", "_____no_output_____" ] ], [ [ "len(dif[dif['abs_diff'] == 0]) / dif.shape[0]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6c4d5a888e3b137a82c6e4e76c1f78b06d620b
439,677
ipynb
Jupyter Notebook
_notebooks/2020-10-20-ScikitLearn-tutorial-part3.ipynb
berdakh/blog
250e884289fbe966e5edb161ad5671582bb975df
[ "Apache-2.0" ]
null
null
null
_notebooks/2020-10-20-ScikitLearn-tutorial-part3.ipynb
berdakh/blog
250e884289fbe966e5edb161ad5671582bb975df
[ "Apache-2.0" ]
4
2020-10-21T10:24:00.000Z
2022-02-26T09:52:59.000Z
_notebooks/2020-10-20-ScikitLearn-tutorial-part3.ipynb
berdakh/blog
250e884289fbe966e5edb161ad5671582bb975df
[ "Apache-2.0" ]
2
2021-10-17T06:03:18.000Z
2022-02-03T20:11:30.000Z
67.590623
17,388
0.655809
[ [ [ "# Introduction to the scikit-learn -- supervised learning and model selection (part 3) \n\n- toc: true\n- badges: true\n- categories: [EEG, jupyter]\n- description: To visualize the workings of machine learning algorithms, it is often helpful to study two-dimensional or one-dimensional data, that is data with only one or two features. While in practice, datasets usually have many more features, it is hard to plot high-dimensional data in on two-dimensional screens.", "_____no_output_____" ], [ "To visualize the workings of machine learning algorithms, it is often helpful to study two-dimensional or one-dimensional data, that is data with only one or two features. While in practice, datasets usually have many more features, it is hard to plot high-dimensional data in on two-dimensional screens.\n\nWe will illustrate some very simple examples before we move on to more \"real world\" data sets.\n\nFirst, we will look at a two class classification problem in two dimensions. We use the synthetic data generated by the ``make_blobs`` function.", "_____no_output_____" ] ], [ [ "from IPython.display import YouTubeVideo\nYouTubeVideo('wCpCDbkDJXQ', width=700, height=400)", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ], [ "import torch", "_____no_output_____" ], [ "from sklearn.datasets import make_blobs\nX, y = make_blobs(centers=2, random_state=0)\n\nprint('X ~ n_samples x n_features:', X.shape)\nprint('y ~ n_samples:', y.shape)\n\nprint('\\nFirst 5 samples:\\n', X[:5, :])\nprint('\\nFirst 5 labels:', y[:5])", "X ~ n_samples x n_features: (100, 2)\ny ~ n_samples: (100,)\n\nFirst 5 samples:\n [[ 4.21850347 2.23419161]\n [ 0.90779887 0.45984362]\n [-0.27652528 5.08127768]\n [ 0.08848433 2.32299086]\n [ 3.24329731 1.21460627]]\n\nFirst 5 labels: [1 1 0 0 1]\n" ] ], [ [ "As the data is two-dimensional, we can plot each sample as a point in a two-dimensional coordinate system, with the first feature being the x-axis and the second feature being the y-axis.", "_____no_output_____" ] ], [ [ "plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue', s=40, label='0')\nplt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s')\n\nplt.xlabel('first feature')\nplt.ylabel('second feature')\nplt.legend(loc='upper right');", "_____no_output_____" ] ], [ [ "Classification is a supervised task, and since we are interested in its performance on unseen data, we split our data into two parts:\n\n1. a training set that the learning algorithm uses to fit the model\n2. a test set to evaluate the generalization performance of the model", "_____no_output_____" ], [ "The ``train_test_split`` function from the ``model_selection`` module does that for us -- we will use it to split a dataset into 75% training data and 25% test data.\n\n<img src=\"figures/train_test_split_matrix.svg\" width=\"25%\">", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.25,\n random_state=1234,\n stratify=y)", "_____no_output_____" ] ], [ [ "### The scikit-learn estimator API\n<img src=\"figures/supervised_workflow.svg\" width=\"30%\">\n", "_____no_output_____" ], [ "Every algorithm is exposed in scikit-learn via an ''Estimator'' object. (All models in scikit-learn have a very consistent interface). For instance, we first import the logistic regression class.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ] ], [ [ "Next, we instantiate the estimator object.", "_____no_output_____" ] ], [ [ "classifier = LogisticRegression()", "_____no_output_____" ], [ "X_train.shape", "_____no_output_____" ], [ "y_train.shape", "_____no_output_____" ] ], [ [ "To built the model from our data, that is to learn how to classify new points, we call the ``fit`` function with the training data, and the corresponding training labels (the desired output for the training data point):", "_____no_output_____" ] ], [ [ "classifier.fit?", "_____no_output_____" ], [ "classifier.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "(Some estimator methods such as `fit` return `self` by default. Thus, after executing the code snippet above, you will see the default parameters of this particular instance of `LogisticRegression`. Another way of retrieving the estimator's ininitialization parameters is to execute `classifier.get_params()`, which returns a parameter dictionary.)", "_____no_output_____" ] ], [ [ "classifier.coef_", "_____no_output_____" ], [ "classifier.get_params()", "_____no_output_____" ] ], [ [ "We can then apply the model to unseen data and use the model to predict the estimated outcome using the ``predict`` method:", "_____no_output_____" ] ], [ [ "prediction = classifier.predict(X_test)", "_____no_output_____" ] ], [ [ "We can compare these against the true labels:", "_____no_output_____" ] ], [ [ "print(prediction)\nprint(y_test)", "[1 0 1 0 1 1 1 1 1 1 1 0 0 0 0 1 0 0 1 0 0 0 1 1 0]\n[1 1 1 0 1 1 0 1 1 0 1 0 0 0 0 1 0 0 1 0 0 1 1 1 0]\n" ] ], [ [ "We can evaluate our classifier quantitatively by measuring what fraction of predictions is correct. This is called **accuracy**:", "_____no_output_____" ] ], [ [ "prediction == y_test", "_____no_output_____" ], [ "np.mean(prediction == y_test)", "_____no_output_____" ] ], [ [ "There is also a convenience function , ``score``, that all scikit-learn classifiers have to compute this directly from the test data:\n ", "_____no_output_____" ] ], [ [ "classifier.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "It is often helpful to compare the generalization performance (on the test set) to the performance on the training set:", "_____no_output_____" ] ], [ [ "classifier.score(X_train, y_train)", "_____no_output_____" ] ], [ [ "LogisticRegression is a so-called linear model,\nthat means it will create a decision that is linear in the input space. In 2d, this simply means it finds a line to separate the blue from the red:", "_____no_output_____" ] ], [ [ "from figures import plot_2d_separator\n\nplt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue',s=40, label='0')\nplt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s')\n\nplt.xlabel(\"first feature\")\nplt.ylabel(\"second feature\")\nplot_2d_separator(classifier, X)\nplt.legend(loc='upper right');", "_____no_output_____" ] ], [ [ "**Estimated parameters**: All the estimated model parameters are attributes of the estimator object ending by an underscore. Here, these are the coefficients and the offset of the line:", "_____no_output_____" ] ], [ [ "print(classifier.coef_)\nprint(classifier.intercept_)", "[[ 0.87015709 -2.23877721]]\n[4.64737766]\n" ] ], [ [ "K Nearest Neighbors (KNN)\n------------------------------------------------", "_____no_output_____" ], [ "Another popular and easy to understand classifier is K nearest neighbors (kNN). It has one of the simplest learning strategies: given a new, unknown observation, look up in your reference database which ones have the closest features and assign the predominant class.", "_____no_output_____" ], [ "The KNN classifier is a non-parametric classifier that simply stores the training data $\\mathcal{D}$\nand classifies each new instance $x$ using a majority vote over its' set of $K$ nearest neighbors $\\mathcal{N}_K(x)$ computed using any distance function $d: R^D \\times\\mathbb{R}^D \\rightarrow \\mathbb{R} $.\n\nKNN Classification Function:\n\n$$g_{KNN}(x) = argmax_{y\\in\\mathcal{Y}} \\sum_{i\\in \\mathcal{N}_K(x)} \\mathbb{I}[y_i=y]$$\n\n\nUse of KNN requires choosing the distance function $d$ and the number of neighbors $K$.", "_____no_output_____" ], [ "![](https://cdn-images-1.medium.com/max/900/1*k8WEP2Kn3YDOopnLzljAJA.png)\n![](https://cdn-images-1.medium.com/max/900/1*CZwsWrWNj2KqF1jJ-Z3SPA.png)\n\n![](https://cdn-images-1.medium.com/max/1200/1*4vdvnkZoWgOp0vcLF4wFcA.png)", "_____no_output_____" ], [ "- In general, KNN can work with any distance function $d$ satisfying non-negativity $d(\\bf{x},\\bf{x}')\\geq 0$ and identity of indiscernibles $d(\\bf{x},\\bf{x})=0$.\n\n- Alternatively, KNN can work with any similarity function $s$ satisfying non-negativity $s(\\bf{x},\\bf{y})\\geq 0$ that attains it's maximum on indiscernibles $s(\\bf{x},\\bf{x})=\\max_{\\bf{x}'} s(\\bf{x},\\bf{x}')$.", "_____no_output_____" ], [ "- However, the more structure the distance or similarity function has (symmetry, triangle inequality), \nthe more structure you can exploit when designing algorithms.", "_____no_output_____" ], [ "## Minkowski Distance ($\\ell_p$ norms)}\n\nGiven two data vectors $\\bf{x},\\bf{x}' \\in \\mathbb{R}^D$, the Minkowski Distance with parameter $p$ (the $\\ell_p$ norm) is a proper metric defined as follows:\n\n\\begin{align*}\n d_p(\\bf{x},\\bf{x}') &= ||\\bf{x}-\\bf{x}'||_p \\\\\n &=\\left(\\sum_{i=1}^D |x_d-x'_d|^p\\right)^{1/p}\n\\end{align*}\n\n\nSpecial cases include Euclidean distance ($p=2$), Manhattan distance ($p=1$) and Chebyshev distance ($p=\\infty$).", "_____no_output_____" ], [ "## Brute Force KNN\n\n- Given any distance function $d$, brute force KNN works by computing the distance $d_i = d(\\bf{x}_i,\\bf{x}_*)$ from a target point $\\bf{x}_*$ to all of the training points $\\bf{x}_i$.\n\n- You then simply sort the distances $\\{d_i,i=1:N\\}$ and choose the data cases with the $K$ smallest distances to form the neighbor set $\\mathcal{N}_K(\\bf{x}_*)$. Using a similarity function is identical, but you select the $K$ most similar data cases. \n\n- Once the $K$ neighbors are selected, applying the classification rule is easy.\n", "_____no_output_____" ], [ "In Sklearn the KNN interface is exactly the same as for ``LogisticRegression above``.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier", "_____no_output_____" ] ], [ [ "This time we set a parameter of the KNeighborsClassifier to tell it we only want to look at one nearest neighbor:", "_____no_output_____" ] ], [ [ "knn = KNeighborsClassifier(n_neighbors=1)", "_____no_output_____" ] ], [ [ "We fit the model with out training data", "_____no_output_____" ] ], [ [ "knn.fit(X_train, y_train)", "_____no_output_____" ], [ "plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue', s=40, label='0')\nplt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s')\n\nplt.xlabel(\"first feature\")\nplt.ylabel(\"second feature\")\nplot_2d_separator(knn, X)\nplt.legend(loc='upper right');", "_____no_output_____" ], [ "knn.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>\n Apply the KNeighborsClassifier to the ``iris`` dataset. Play with different values of the ``n_neighbors`` and observe how training and test score change.\n </li>\n </ul>\n</div>", "_____no_output_____" ] ], [ [ "# %load solutions/05A_knn_with_diff_k.py\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\niris = load_iris()\nX = iris.data\ny = iris.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.25,\n random_state=1234,\n stratify=y)\n\nX_trainsub, X_valid, y_trainsub, y_valid = train_test_split(X_train, y_train,\n test_size=0.5,\n random_state=1234,\n stratify=y_train)\n\nfor k in range(1, 20):\n knn = KNeighborsClassifier(n_neighbors=k)\n train_score = knn.fit(X_trainsub, y_trainsub).\\\n score(X_trainsub, y_trainsub)\n valid_score = knn.score(X_valid, y_valid)\n print('k: %d, Train/Valid Acc: %.3f/%.3f' %\n (k, train_score, valid_score))\n\nknn = KNeighborsClassifier(n_neighbors=9)\nknn.fit(X_train, y_train)\nprint('k=9 Test Acc: %.3f' % knn.score(X_test, y_test))\n", "k: 1, Train/Valid Acc: 1.000/0.946\nk: 2, Train/Valid Acc: 1.000/0.964\nk: 3, Train/Valid Acc: 1.000/0.946\nk: 4, Train/Valid Acc: 1.000/0.964\nk: 5, Train/Valid Acc: 1.000/0.929\nk: 6, Train/Valid Acc: 1.000/0.929\nk: 7, Train/Valid Acc: 1.000/0.929\nk: 8, Train/Valid Acc: 1.000/0.929\nk: 9, Train/Valid Acc: 1.000/0.929\nk: 10, Train/Valid Acc: 1.000/0.946\nk: 11, Train/Valid Acc: 1.000/0.946\nk: 12, Train/Valid Acc: 1.000/0.964\nk: 13, Train/Valid Acc: 1.000/0.929\nk: 14, Train/Valid Acc: 1.000/0.946\nk: 15, Train/Valid Acc: 1.000/0.929\nk: 16, Train/Valid Acc: 1.000/0.929\nk: 17, Train/Valid Acc: 1.000/0.929\nk: 18, Train/Valid Acc: 0.964/0.946\nk: 19, Train/Valid Acc: 0.964/0.929\nk=9 Test Acc: 0.974\n" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "# Supervised Learning -- Regression Analysis", "_____no_output_____" ], [ "In regression we are trying to predict a continuous output variable -- in contrast to the nominal variables we were predicting in the previous classification examples. \n\nLet's start with a simple toy example with one feature dimension (explanatory variable) and one target variable. We will create a dataset out of a sine curve with some noise:", "_____no_output_____" ] ], [ [ "x = np.linspace(-3, 3, 100)\nprint(x)", "[-3. -2.93939394 -2.87878788 -2.81818182 -2.75757576 -2.6969697\n -2.63636364 -2.57575758 -2.51515152 -2.45454545 -2.39393939 -2.33333333\n -2.27272727 -2.21212121 -2.15151515 -2.09090909 -2.03030303 -1.96969697\n -1.90909091 -1.84848485 -1.78787879 -1.72727273 -1.66666667 -1.60606061\n -1.54545455 -1.48484848 -1.42424242 -1.36363636 -1.3030303 -1.24242424\n -1.18181818 -1.12121212 -1.06060606 -1. -0.93939394 -0.87878788\n -0.81818182 -0.75757576 -0.6969697 -0.63636364 -0.57575758 -0.51515152\n -0.45454545 -0.39393939 -0.33333333 -0.27272727 -0.21212121 -0.15151515\n -0.09090909 -0.03030303 0.03030303 0.09090909 0.15151515 0.21212121\n 0.27272727 0.33333333 0.39393939 0.45454545 0.51515152 0.57575758\n 0.63636364 0.6969697 0.75757576 0.81818182 0.87878788 0.93939394\n 1. 1.06060606 1.12121212 1.18181818 1.24242424 1.3030303\n 1.36363636 1.42424242 1.48484848 1.54545455 1.60606061 1.66666667\n 1.72727273 1.78787879 1.84848485 1.90909091 1.96969697 2.03030303\n 2.09090909 2.15151515 2.21212121 2.27272727 2.33333333 2.39393939\n 2.45454545 2.51515152 2.57575758 2.63636364 2.6969697 2.75757576\n 2.81818182 2.87878788 2.93939394 3. ]\n" ], [ "rng = np.random.RandomState(42)\ny = np.sin(4 * x) + x + rng.uniform(size=len(x))", "_____no_output_____" ], [ "plt.plot(x, y, 'o');", "_____no_output_____" ] ], [ [ "Linear Regression\n=================", "_____no_output_____" ], [ "The first model that we will introduce is the so-called simple linear regression. Here, we want to fit a line to the data, which \n\nOne of the simplest models again is a linear one, that simply tries to predict the data as lying on a line. One way to find such a line is `LinearRegression` (also known as [*Ordinary Least Squares (OLS)*](https://en.wikipedia.org/wiki/Ordinary_least_squares) regression).\nThe interface for LinearRegression is exactly the same as for the classifiers before, only that ``y`` now contains float values, instead of classes.\n\nAs we remember, the scikit-learn API requires us to provide the target variable (`y`) as a 1-dimensional array; scikit-learn's API expects the samples (`X`) in form a 2-dimensional array -- even though it may only consist of 1 feature. Thus, let us convert the 1-dimensional `x` NumPy array into an `X` array with 2 axes:\n", "_____no_output_____" ] ], [ [ "print('Before: ', x.shape)\nX = x[:, np.newaxis]\nprint('After: ', X.shape)", "Before: (100,)\nAfter: (100, 1)\n" ] ], [ [ "Again, we start by splitting our dataset into a training (75%) and a test set (25%):", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)", "_____no_output_____" ] ], [ [ "Next, we use the learning algorithm implemented in `LinearRegression` to **fit a regression model to the training data**:", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "After fitting to the training data, we paramerterized a linear regression model with the following values.", "_____no_output_____" ] ], [ [ "print('Weight coefficients: ', regressor.coef_)\nprint('y-axis intercept: ', regressor.intercept_)", "Weight coefficients: [0.90211711]\ny-axis intercept: 0.44840974988268\n" ] ], [ [ "Since our regression model is a linear one, the relationship between the target variable (y) and the feature variable (x) is defined as \n\n$$y = weight \\times x + \\text{intercept}$$.\n\nPlugging in the min and max values into thos equation, we can plot the regression fit to our training data:", "_____no_output_____" ] ], [ [ "min_pt = X.min() * regressor.coef_[0] + regressor.intercept_\nmax_pt = X.max() * regressor.coef_[0] + regressor.intercept_\n\nprint(min_pt, max_pt, X.min(), X.max())", "-2.2579415855468374 3.154761085312198 -3.0 3.0\n" ], [ "plt.plot([X.min(), X.max()], [min_pt, max_pt])\nplt.plot(X_train, y_train, 'o');", "_____no_output_____" ] ], [ [ "Similar to the estimators for classification in the previous notebook, we use the `predict` method to predict the target variable. And we expect these predicted values to fall onto the line that we plotted previously:", "_____no_output_____" ] ], [ [ "y_pred_train = regressor.predict(X_train)", "_____no_output_____" ], [ "plt.plot(X_train, y_train, 'o', label=\"data\")\nplt.plot(X_train, y_pred_train, 'o', label=\"prediction\")\nplt.plot([X.min(), X.max()], [min_pt, max_pt], label='fit')\nplt.legend(loc='best')", "_____no_output_____" ] ], [ [ "As we can see in the plot above, the line is able to capture the general slope of the data, but not many details.", "_____no_output_____" ], [ "Next, let's try the test set:", "_____no_output_____" ] ], [ [ "y_pred_test = regressor.predict(X_test)", "_____no_output_____" ], [ "print(X_test.shape)\nprint(y_pred_test.shape)", "(25, 1)\n(25,)\n" ], [ "plt.plot(X_test, y_test, 'o', label=\"data\")\nplt.plot(X_test, y_pred_test, 'o', label=\"prediction\")\nplt.plot([X.min(), X.max()], [min_pt, max_pt], label='fit')\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "Again, scikit-learn provides an easy way to evaluate the prediction quantitatively using the ``score`` method. For regression tasks, this is the R<sup>2</sup> score. Another popular way would be the Mean Squared Error (MSE). As its name implies, the MSE is simply the average squared difference over the predicted and actual target values\n\n$$MSE = \\frac{1}{n} \\sum_{i=1}^{n} (\\text{predicted}_i - \\text{true}_i)^2$$", "_____no_output_____" ] ], [ [ "regressor.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>\n Add a feature containing `sin(4x)` to `X` and redo the fit. Visualize the predictions with this new richer, yet linear, model.\n </li>\n </ul>\n</div>", "_____no_output_____" ] ], [ [ "# %load solutions/06B_lin_with_sine.py", "_____no_output_____" ] ], [ [ "KNeighborsRegression\n=======================\nAs for classification, we can also use a neighbor based method for regression. We can simply take the output of the nearest point, or we could average several nearest points. This method is less popular for regression than for classification, but still a good baseline.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nkneighbor_regression = KNeighborsRegressor(n_neighbors=1)\n\nkneighbor_regression.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "Again, let us look at the behavior on training and test set:", "_____no_output_____" ] ], [ [ "y_pred_train = kneighbor_regression.predict(X_train)\n\nplt.plot(X_train, y_train, 'o', label=\"data\", markersize=10)\nplt.plot(X_train, y_pred_train, 's', label=\"prediction\", markersize=4)\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "On the training set, we do a perfect job: each point is its own nearest neighbor!", "_____no_output_____" ] ], [ [ "y_pred_test = kneighbor_regression.predict(X_test)\n\nplt.plot(X_test, y_test, 'o', label=\"data\", markersize=8)\nplt.plot(X_test, y_pred_test, 's', label=\"prediction\", markersize=4)\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "On the test set, we also do a better job of capturing the variation, but our estimates look much messier than before.\nLet us look at the R<sup>2</sup> score:", "_____no_output_____" ] ], [ [ "kneighbor_regression.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "Much better than before! Here, the linear model was not a good fit for our problem; it was lacking in complexity and thus under-fit our data.", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>\n Compare the KNeighborsRegressor and LinearRegression on the boston housing dataset. You can load the dataset using ``sklearn.datasets.load_boston``. You can learn about the dataset by reading the ``DESCR`` attribute.\n </li>\n </ul>\n</div>", "_____no_output_____" ] ], [ [ "# %load solutions/06A_knn_vs_linreg.py", "_____no_output_____" ] ], [ [ "# Cross-Validation and scoring methods", "_____no_output_____" ], [ "In the previous sections and notebooks, we split our dataset into two parts, a training set and a test set. We used the training set to fit our model, and we used the test set to evaluate its generalization performance -- how well it performs on new, unseen data.", "_____no_output_____" ], [ "<img src=\"figures/train_test_split.svg\" width=\"80%\">", "_____no_output_____" ], [ "However, often (labeled) data is precious, and this approach lets us only use ~ 3/4 of our data for training. On the other hand, we will only ever try to apply our model 1/4 of our data for testing.\nA common way to use more of the data to build a model, but also get a more robust estimate of the generalization performance, is cross-validation.\nIn cross-validation, the data is split repeatedly into a training and non-overlapping test-sets, with a separate model built for every pair. The test-set scores are then aggregated for a more robust estimate.\n\nThe most common way to do cross-validation is k-fold cross-validation, in which the data is first split into k (often 5 or 10) equal-sized folds, and then for each iteration, one of the k folds is used as test data, and the rest as training data:", "_____no_output_____" ], [ "<img src=\"figures/cross_validation.svg\" width=\"80%\">\n", "_____no_output_____" ], [ "This way, each data point will be in the test-set exactly once, and we can use all but a k'th of the data for training.\nLet us apply this technique to evaluate the KNeighborsClassifier algorithm on the Iris dataset:", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\nfrom sklearn.neighbors import KNeighborsClassifier\n\niris = load_iris()\nX, y = iris.data, iris.target\n\nclassifier = KNeighborsClassifier()", "_____no_output_____" ] ], [ [ "The labels in iris are sorted, which means that if we split the data as illustrated above, the first fold will only have the label 0 in it, while the last one will only have the label 2:", "_____no_output_____" ] ], [ [ "y", "_____no_output_____" ] ], [ [ "To avoid this problem in evaluation, we first shuffle our data:", "_____no_output_____" ] ], [ [ "import numpy as np\nrng = np.random.RandomState(0)\n\npermutation = rng.permutation(len(X))\nX, y = X[permutation], y[permutation]\nprint(y)", "[2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0\n 1 1 1 2 0 2 0 0 1 2 2 2 2 1 2 1 1 2 2 2 2 1 2 1 0 2 1 1 1 1 2 0 0 2 1 0 0\n 1 0 2 1 0 1 2 1 0 2 2 2 2 0 0 2 2 0 2 0 2 2 0 0 2 0 0 0 1 2 2 0 0 0 1 1 0\n 0 1 0 2 1 2 1 0 2 0 2 0 0 2 0 2 1 1 1 2 2 1 1 0 1 2 2 0 1 1 1 1 0 0 0 2 1\n 2 0]\n" ] ], [ [ "Now implementing cross-validation is easy:", "_____no_output_____" ] ], [ [ "k = 5\nn_samples = len(X)\nfold_size = n_samples // k\nscores = []\nmasks = []\n\nfor fold in range(k):\n # generate a boolean mask for the test set in this fold\n test_mask = np.zeros(n_samples, dtype=bool)\n test_mask[fold * fold_size : (fold + 1) * fold_size] = True\n \n # store the mask for visualization\n masks.append(test_mask)\n \n # create training and test sets using this mask\n X_test, y_test = X[test_mask], y[test_mask]\n X_train, y_train = X[~test_mask], y[~test_mask]\n \n # fit the classifier\n classifier.fit(X_train, y_train)\n # compute the score and record it\n \n scores.append(classifier.score(X_test, y_test))", "_____no_output_____" ] ], [ [ "Let's check that our test mask does the right thing:", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nplt.matshow(masks, cmap='gray_r')", "_____no_output_____" ] ], [ [ "And now let's look a the scores we computed:", "_____no_output_____" ] ], [ [ "print(scores)", "[0.9666666666666667, 0.9, 1.0, 1.0, 0.9333333333333333]\n" ], [ "print(np.mean(scores))", "0.96\n" ] ], [ [ "As you can see, there is a rather wide spectrum of scores from 90% correct to 100% correct. If we only did a single split, we might have gotten either answer.", "_____no_output_____" ], [ "As cross-validation is such a common pattern in machine learning, there are functions to do the above for you with much more flexibility and less code.\nThe ``sklearn.model_selection`` module has all functions related to cross validation. There easiest function is ``cross_val_score`` which takes an estimator and a dataset, and will do all of the splitting for you:", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import cross_val_score\nscores = cross_val_score(classifier, X, y)\nprint(scores)\nprint(np.mean(scores))", "[1. 0.93333333 1. 1. 0.93333333]\n0.9733333333333334\n" ] ], [ [ "As you can see, the function uses three folds by default. You can change the number of folds using the cv argument:", "_____no_output_____" ] ], [ [ "cross_val_score(classifier, X, y, cv=5)", "_____no_output_____" ] ], [ [ "There are also helper objects in the cross-validation module that will generate indices for you for all kinds of different cross-validation methods, including k-fold:", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import KFold, StratifiedKFold, ShuffleSplit", "_____no_output_____" ] ], [ [ "By default, cross_val_score will use ``StratifiedKFold`` for classification, which ensures that the class proportions in the dataset are reflected in each fold. If you have a binary classification dataset with 90% of data point belonging to class 0, that would mean that in each fold, 90% of datapoints would belong to class 0.\nIf you would just use KFold cross-validation, it is likely that you would generate a split that only contains class 0.\nIt is generally a good idea to use ``StratifiedKFold`` whenever you do classification.\n\n``StratifiedKFold`` would also remove our need to shuffle ``iris``.\nLet's see what kinds of folds it generates on the unshuffled iris dataset.\nEach cross-validation class is a generator of sets of training and test indices:", "_____no_output_____" ] ], [ [ "cv = StratifiedKFold(n_splits=5)\nfor train, test in cv.split(iris.data, iris.target):\n print(test)", "[ 0 1 2 3 4 5 6 7 8 9 50 51 52 53 54 55 56 57\n 58 59 100 101 102 103 104 105 106 107 108 109]\n[ 10 11 12 13 14 15 16 17 18 19 60 61 62 63 64 65 66 67\n 68 69 110 111 112 113 114 115 116 117 118 119]\n[ 20 21 22 23 24 25 26 27 28 29 70 71 72 73 74 75 76 77\n 78 79 120 121 122 123 124 125 126 127 128 129]\n[ 30 31 32 33 34 35 36 37 38 39 80 81 82 83 84 85 86 87\n 88 89 130 131 132 133 134 135 136 137 138 139]\n[ 40 41 42 43 44 45 46 47 48 49 90 91 92 93 94 95 96 97\n 98 99 140 141 142 143 144 145 146 147 148 149]\n" ] ], [ [ "As you can see, there are a couple of samples from the beginning, then from the middle, and then from the end, in each of the folds.\nThis way, the class ratios are preserved. Let's visualize the split:", "_____no_output_____" ] ], [ [ "def plot_cv(cv, features, labels):\n masks = []\n for train, test in cv.split(features, labels):\n mask = np.zeros(len(labels), dtype=bool)\n mask[test] = 1\n masks.append(mask)\n \n plt.matshow(masks, cmap='gray_r')", "_____no_output_____" ], [ "plot_cv(StratifiedKFold(n_splits=5), iris.data, iris.target)", "_____no_output_____" ] ], [ [ "For comparison, again the standard KFold, that ignores the labels:", "_____no_output_____" ] ], [ [ "plot_cv(KFold(n_splits=5), iris.data, iris.target)", "_____no_output_____" ] ], [ [ "Keep in mind that increasing the number of folds will give you a larger training dataset, but will lead to more repetitions, and therefore a slower evaluation:", "_____no_output_____" ] ], [ [ "plot_cv(KFold(n_splits=10), iris.data, iris.target)", "_____no_output_____" ] ], [ [ "Another helpful cross-validation generator is ``ShuffleSplit``. This generator simply splits of a random portion of the data repeatedly. This allows the user to specify the number of repetitions and the training set size independently:", "_____no_output_____" ] ], [ [ "plot_cv(ShuffleSplit(n_splits=5, test_size=.2), iris.data, iris.target)", "_____no_output_____" ] ], [ [ "If you want a more robust estimate, you can just increase the number of splits:", "_____no_output_____" ] ], [ [ "plot_cv(ShuffleSplit(n_splits=20, test_size=.2), iris.data, iris.target)", "_____no_output_____" ] ], [ [ "You can use all of these cross-validation generators with the `cross_val_score` method:", "_____no_output_____" ] ], [ [ "cv = ShuffleSplit(n_splits=5, test_size=.2)\ncross_val_score(classifier, X, y, cv=cv)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>\n Perform three-fold cross-validation using the ``KFold`` class on the iris dataset without shuffling the data. Can you explain the result?\n </li>\n </ul>\n</div>", "_____no_output_____" ] ], [ [ "# %load solutions/13_cross_validation.py\ncv = KFold(n_splits=3)\ncross_val_score(classifier, iris.data, iris.target, cv=cv)\n", "_____no_output_____" ] ], [ [ "# A recap on Scikit-learn's estimator interface", "_____no_output_____" ], [ "Scikit-learn strives to have a uniform interface across all methods. Given a scikit-learn *estimator*\nobject named `model`, the following methods are available (not all for each model):\n\n- Available in **all Estimators**\n + `model.fit()` : fit training data. For supervised learning applications,\n this accepts two arguments: the data `X` and the labels `y` (e.g. `model.fit(X, y)`).\n For unsupervised learning applications, `fit` takes only a single argument,\n the data `X` (e.g. `model.fit(X)`).\n- Available in **supervised estimators**\n + `model.predict()` : given a trained model, predict the label of a new set of data.\n This method accepts one argument, the new data `X_new` (e.g. `model.predict(X_new)`),\n and returns the learned label for each object in the array.\n + `model.predict_proba()` : For classification problems, some estimators also provide\n this method, which returns the probability that a new observation has each categorical label.\n In this case, the label with the highest probability is returned by `model.predict()`.\n + `model.decision_function()` : For classification problems, some estimators provide an uncertainty estimate that is not a probability. For binary classification, a decision_function >= 0 means the positive class will be predicted, while < 0 means the negative class.\n + `model.score()` : for classification or regression problems, most (all?) estimators implement\n a score method. Scores are between 0 and 1, with a larger score indicating a better fit. For classifiers, the `score` method computes the prediction accuracy. For regressors, `score` computes the coefficient of determination (R<sup>2</sup>) of the prediction.\n + `model.transform()` : For feature selection algorithms, this will reduce the dataset to the selected features. For some classification and regression models such as some linear models and random forests, this method reduces the dataset to the most informative features. These classification and regression models can therefore also be used as feature selection methods.\n \n- Available in **unsupervised estimators**\n + `model.transform()` : given an unsupervised model, transform new data into the new basis.\n This also accepts one argument `X_new`, and returns the new representation of the data based\n on the unsupervised model.\n + `model.fit_transform()` : some estimators implement this method,\n which more efficiently performs a fit and a transform on the same input data.\n + `model.predict()` : for clustering algorithms, the predict method will produce cluster labels for new data points. Not all clustering methods have this functionality.\n + `model.predict_proba()` : Gaussian mixture models (GMMs) provide the probability for each point to be generated by a given mixture component.\n + `model.score()` : Density models like KDE and GMMs provide the likelihood of the data under the model.", "_____no_output_____" ], [ "Apart from ``fit``, the two most important functions are arguably ``predict`` to produce a target variable (a ``y``) ``transform``, which produces a new representation of the data (an ``X``).\nThe following table shows for which class of models which function applies:\n\n", "_____no_output_____" ], [ "<table>\n<tr style=\"border:None; font-size:20px; padding:10px;\"><th>``model.predict``</th><th>``model.transform``</th></tr>\n<tr style=\"border:None; font-size:20px; padding:10px;\"><td>Classification</td><td>Preprocessing</td></tr>\n<tr style=\"border:None; font-size:20px; padding:10px;\"><td>Regression</td><td>Dimensionality Reduction</td></tr>\n<tr style=\"border:None; font-size:20px; padding:10px;\"><td>Clustering</td><td>Feature Extraction</td></tr>\n<tr style=\"border:None; font-size:20px; padding:10px;\"><td>&nbsp;</td><td>Feature Selection</td></tr>\n\n</table>\n\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
4a6c52b5d48215480f4ccb1bc53315e5c340c8ab
36,372
ipynb
Jupyter Notebook
2.Convolutional Neural Networks in TensorFlow/Week-3/Course_2_Part_6_Lesson_3_Notebook.ipynb
PramitDutta1999/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate
ffe0e18f5349b56b520781aadd3a353b5bc39cf6
[ "MIT" ]
null
null
null
2.Convolutional Neural Networks in TensorFlow/Week-3/Course_2_Part_6_Lesson_3_Notebook.ipynb
PramitDutta1999/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate
ffe0e18f5349b56b520781aadd3a353b5bc39cf6
[ "MIT" ]
null
null
null
2.Convolutional Neural Networks in TensorFlow/Week-3/Course_2_Part_6_Lesson_3_Notebook.ipynb
PramitDutta1999/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate
ffe0e18f5349b56b520781aadd3a353b5bc39cf6
[ "MIT" ]
null
null
null
101.314763
21,610
0.774112
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ], [ "import os\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\n!wget --no-check-certificate \\\n https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \\\n -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5\n \nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\n\nlocal_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\npre_trained_model = InceptionV3(input_shape = (150, 150, 3), \n include_top = False, \n weights = None)\n\npre_trained_model.load_weights(local_weights_file)\n\nfor layer in pre_trained_model.layers:\n layer.trainable = False\n \n# pre_trained_model.summary()\n\nlast_layer = pre_trained_model.get_layer('mixed7')\nprint('last layer output shape: ', last_layer.output_shape)\nlast_output = last_layer.output", "--2020-08-27 14:00:28-- https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5\nResolving storage.googleapis.com (storage.googleapis.com)... 172.217.212.128, 172.217.214.128, 108.177.121.128, ...\nConnecting to storage.googleapis.com (storage.googleapis.com)|172.217.212.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 87910968 (84M) [application/x-hdf]\nSaving to: ‘/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5’\n\n/tmp/inception_v3_w 100%[===================>] 83.84M 273MB/s in 0.3s \n\n2020-08-27 14:00:29 (273 MB/s) - ‘/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5’ saved [87910968/87910968]\n\nlast layer output shape: (None, 7, 7, 768)\n" ], [ "from tensorflow.keras.optimizers import RMSprop\n\n# Flatten the output layer to 1 dimension\nx = layers.Flatten()(last_output)\n# Add a fully connected layer with 1,024 hidden units and ReLU activation\nx = layers.Dense(1024, activation='relu')(x)\n# Add a dropout rate of 0.2\nx = layers.Dropout(0.2)(x) \n# Add a final sigmoid layer for classification\nx = layers.Dense (1, activation='sigmoid')(x) \n\nmodel = Model( pre_trained_model.input, x) \n\nmodel.compile(optimizer = RMSprop(lr=0.0001), \n loss = 'binary_crossentropy', \n metrics = ['accuracy'])\n", "_____no_output_____" ], [ "!wget --no-check-certificate \\\n https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \\\n -O /tmp/cats_and_dogs_filtered.zip\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nimport os\nimport zipfile\n\nlocal_zip = '//tmp/cats_and_dogs_filtered.zip'\n\nzip_ref = zipfile.ZipFile(local_zip, 'r')\n\nzip_ref.extractall('/tmp')\nzip_ref.close()\n\n# Define our example directories and files\nbase_dir = '/tmp/cats_and_dogs_filtered'\n\ntrain_dir = os.path.join( base_dir, 'train')\nvalidation_dir = os.path.join( base_dir, 'validation')\n\n\ntrain_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training cat pictures\ntrain_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our training dog pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation cat pictures\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')# Directory with our validation dog pictures\n\ntrain_cat_fnames = os.listdir(train_cats_dir)\ntrain_dog_fnames = os.listdir(train_dogs_dir)\n\n# Add our data-augmentation parameters to ImageDataGenerator\ntrain_datagen = ImageDataGenerator(rescale = 1./255.,\n rotation_range = 40,\n width_shift_range = 0.2,\n height_shift_range = 0.2,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\n# Note that the validation data should not be augmented!\ntest_datagen = ImageDataGenerator( rescale = 1.0/255. )\n\n# Flow training images in batches of 20 using train_datagen generator\ntrain_generator = train_datagen.flow_from_directory(train_dir,\n batch_size = 20,\n class_mode = 'binary', \n target_size = (150, 150)) \n\n# Flow validation images in batches of 20 using test_datagen generator\nvalidation_generator = test_datagen.flow_from_directory( validation_dir,\n batch_size = 20,\n class_mode = 'binary', \n target_size = (150, 150))", "--2020-08-27 14:00:39-- https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip\nResolving storage.googleapis.com (storage.googleapis.com)... 108.177.120.128, 173.194.197.128, 74.125.124.128, ...\nConnecting to storage.googleapis.com (storage.googleapis.com)|108.177.120.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 68606236 (65M) [application/zip]\nSaving to: ‘/tmp/cats_and_dogs_filtered.zip’\n\n/tmp/cats_and_dogs_ 100%[===================>] 65.43M 64.6MB/s in 1.0s \n\n2020-08-27 14:00:40 (64.6 MB/s) - ‘/tmp/cats_and_dogs_filtered.zip’ saved [68606236/68606236]\n\nFound 2000 images belonging to 2 classes.\nFound 1000 images belonging to 2 classes.\n" ], [ "history = model.fit(\n train_generator,\n validation_data = validation_generator,\n steps_per_epoch = 100,\n epochs = 20,\n validation_steps = 50,\n verbose = 2)", "Epoch 1/20\n100/100 - 24s - loss: 0.3358 - accuracy: 0.8730 - val_loss: 0.1089 - val_accuracy: 0.9580\nEpoch 2/20\n100/100 - 23s - loss: 0.2178 - accuracy: 0.9180 - val_loss: 0.1243 - val_accuracy: 0.9560\nEpoch 3/20\n100/100 - 22s - loss: 0.2270 - accuracy: 0.9175 - val_loss: 0.0995 - val_accuracy: 0.9610\nEpoch 4/20\n100/100 - 23s - loss: 0.2066 - accuracy: 0.9235 - val_loss: 0.1775 - val_accuracy: 0.9420\nEpoch 5/20\n100/100 - 22s - loss: 0.1770 - accuracy: 0.9380 - val_loss: 0.0993 - val_accuracy: 0.9690\nEpoch 6/20\n100/100 - 22s - loss: 0.1839 - accuracy: 0.9375 - val_loss: 0.2148 - val_accuracy: 0.9380\nEpoch 7/20\n100/100 - 22s - loss: 0.1904 - accuracy: 0.9340 - val_loss: 0.1245 - val_accuracy: 0.9620\nEpoch 8/20\n100/100 - 22s - loss: 0.1594 - accuracy: 0.9385 - val_loss: 0.1822 - val_accuracy: 0.9490\nEpoch 9/20\n100/100 - 22s - loss: 0.1823 - accuracy: 0.9430 - val_loss: 0.1039 - val_accuracy: 0.9640\nEpoch 10/20\n100/100 - 22s - loss: 0.1571 - accuracy: 0.9425 - val_loss: 0.1454 - val_accuracy: 0.9560\nEpoch 11/20\n100/100 - 22s - loss: 0.1415 - accuracy: 0.9520 - val_loss: 0.1237 - val_accuracy: 0.9640\nEpoch 12/20\n100/100 - 22s - loss: 0.1396 - accuracy: 0.9500 - val_loss: 0.1357 - val_accuracy: 0.9630\nEpoch 13/20\n100/100 - 23s - loss: 0.1631 - accuracy: 0.9520 - val_loss: 0.1403 - val_accuracy: 0.9620\nEpoch 14/20\n100/100 - 22s - loss: 0.1495 - accuracy: 0.9500 - val_loss: 0.1237 - val_accuracy: 0.9670\nEpoch 15/20\n100/100 - 22s - loss: 0.1502 - accuracy: 0.9490 - val_loss: 0.1366 - val_accuracy: 0.9650\nEpoch 16/20\n100/100 - 22s - loss: 0.1263 - accuracy: 0.9575 - val_loss: 0.1067 - val_accuracy: 0.9690\nEpoch 17/20\n100/100 - 22s - loss: 0.1241 - accuracy: 0.9630 - val_loss: 0.1213 - val_accuracy: 0.9670\nEpoch 18/20\n100/100 - 22s - loss: 0.1288 - accuracy: 0.9555 - val_loss: 0.1186 - val_accuracy: 0.9700\nEpoch 19/20\n100/100 - 22s - loss: 0.1250 - accuracy: 0.9600 - val_loss: 0.1047 - val_accuracy: 0.9710\nEpoch 20/20\n100/100 - 22s - loss: 0.1304 - accuracy: 0.9605 - val_loss: 0.1209 - val_accuracy: 0.9710\n" ], [ "import matplotlib.pyplot as plt\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'r', label='Training accuracy')\nplt.plot(epochs, val_acc, 'b', label='Validation accuracy')\nplt.title('Training and validation accuracy')\nplt.legend(loc=0)\nplt.figure()\n\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a6c5c53410d307538ad81a5e944872fcbde481b
655,615
ipynb
Jupyter Notebook
.ipynb_checkpoints/Landuse_classification-Copy10-checkpoint.ipynb
tgrippa/Landscapeunits_features_computation
5d48809f8e7d12950985dc4f5a8ad1cefb5927df
[ "MIT" ]
8
2018-10-23T02:50:30.000Z
2021-01-14T21:00:46.000Z
.ipynb_checkpoints/Landuse_classification-Copy10-checkpoint.ipynb
tgrippa/Landscapeunits_features_computation
5d48809f8e7d12950985dc4f5a8ad1cefb5927df
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Landuse_classification-Copy10-checkpoint.ipynb
tgrippa/Landscapeunits_features_computation
5d48809f8e7d12950985dc4f5a8ad1cefb5927df
[ "MIT" ]
null
null
null
51.296064
4,642
0.632943
[ [ [ "<center> <font size=5> <h1>Define working environment</h1> </font> </center> ", "_____no_output_____" ], [ "The following cells are used to: \n- Import needed libraries\n- Set the environment variables for Python, Anaconda, GRASS GIS and R statistical computing \n- Define the [\"GRASSDATA\" folder](https://grass.osgeo.org/grass73/manuals/helptext.html), the name of \"location\" and \"mapset\" where you will to work.", "_____no_output_____" ], [ "**Import libraries**", "_____no_output_____" ] ], [ [ "## Import libraries needed for setting parameters of operating system \nimport os\nimport sys", "_____no_output_____" ] ], [ [ "<center> <font size=3> <h3>Environment variables when working on Linux Mint</h3> </font> </center> ", "_____no_output_____" ], [ "**Set 'Python' and 'GRASS GIS' environment variables**", "_____no_output_____" ], [ "Here, we set [the environment variables allowing to use of GRASS GIS](https://grass.osgeo.org/grass64/manuals/variables.html) inside this Jupyter notebook. Please change the directory path according to your own system configuration.", "_____no_output_____" ] ], [ [ "### Define GRASS GIS environment variables for LINUX UBUNTU Mint 18.1 (Serena)\n# Check is environmental variables exists and create them (empty) if not exists.\nif not 'PYTHONPATH' in os.environ:\n os.environ['PYTHONPATH']=''\nif not 'LD_LIBRARY_PATH' in os.environ:\n os.environ['LD_LIBRARY_PATH']=''\n# Set environmental variables\nos.environ['GISBASE'] = '/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu'\nos.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'bin')\nos.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'script')\nos.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib')\n#os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python')\nos.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python')\nos.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass')\nos.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass','script')\nos.environ['PYTHONLIB'] = '/usr/lib/python2.7'\nos.environ['LD_LIBRARY_PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib')\nos.environ['GIS_LOCK'] = '$$'\nos.environ['GISRC'] = os.path.join(os.environ['HOME'],'.grass7','rc')\nos.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons')\nos.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','bin')\nos.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons')\nos.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','scripts')\n\n## Define GRASS-Python environment\nsys.path.append(os.path.join(os.environ['GISBASE'],'etc','python'))", "_____no_output_____" ] ], [ [ "**Import GRASS Python packages**", "_____no_output_____" ] ], [ [ "## Import libraries needed to launch GRASS GIS in the jupyter notebook\nimport grass.script.setup as gsetup\n\n## Import libraries needed to call GRASS using Python\nimport grass.script as gscript\nfrom grass.script import core as grass", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "**Display current environment variables of your computer**", "_____no_output_____" ] ], [ [ "## Display the current defined environment variables\nfor key in os.environ.keys():\n print \"%s = %s \\t\" % (key,os.environ[key])", "MDMSESSION = mate \t\nMANDATORY_PATH = /usr/share/gconf/mate.mandatory.path \t\nMATE_DESKTOP_SESSION_ID = this-is-deprecated \t\nLESSOPEN = | /usr/bin/lesspipe %s \t\nMDM_LANG = fr_BE.UTF-8 \t\nLOGNAME = tais \t\nUSER = tais \t\nHOME = /home/tais \t\nXDG_VTNR = 9 \t\nPATH = /usr/local/bin:/home/tais/BIN:/home/tais/bin:/home/tais/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/bin:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/script:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/lib:/home/tais/.grass7/addons:/home/tais/.grass7/addons/bin:/home/tais/.grass7/addons:/home/tais/.grass7/addons/scripts \t\nCLICOLOR = 1 \t\nDISPLAY = :0.0 \t\nSSH_AGENT_PID = 5974 \t\nLANG = fr_BE.UTF-8 \t\nTERM = xterm-color \t\nSHELL = /bin/bash \t\nGIS_LOCK = $$ \t\nXAUTHORITY = /home/tais/.Xauthority \t\nSESSION_MANAGER = local/tais-HP-Z620-Workstation:@/tmp/.ICE-unix/5837,unix/tais-HP-Z620-Workstation:/tmp/.ICE-unix/5837 \t\nSHLVL = 1 \t\nQT_LINUX_ACCESSIBILITY_ALWAYS_ON = 1 \t\nINSIDE_CAJA_PYTHON = \t\nQT_ACCESSIBILITY = 1 \t\nLD_LIBRARY_PATH = :/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/lib \t\nCOMPIZ_CONFIG_PROFILE = mate \t\nWINDOWPATH = 9 \t\nGTK_OVERLAY_SCROLLING = 0 \t\nPYTHONPATH = :/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/etc/python:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/etc/python/grass:/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu/etc/python/grass/script \t\nGISBASE = /home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu \t\nCLUTTER_BACKEND = x11 \t\nUSERNAME = tais \t\nXDG_SESSION_DESKTOP = mate \t\nGDM_XSERVER_LOCATION = local \t\nXDG_RUNTIME_DIR = /run/user/1000 \t\nJPY_PARENT_PID = 28049 \t\nQT_STYLE_OVERRIDE = gtk \t\nSSH_AUTH_SOCK = /run/user/1000/keyring/ssh \t\nVTE_VERSION = 4205 \t\nGDMSESSION = mate \t\nGISRC = /home/tais/.grass7/rc \t\nGIT_PAGER = cat \t\nXDG_CONFIG_DIRS = /etc/xdg/xdg-mate:/etc/xdg \t\nXDG_CURRENT_DESKTOP = MATE \t\nXDG_SESSION_ID = c21 \t\nDBUS_SESSION_BUS_ADDRESS = unix:abstract=/tmp/dbus-oiw1S789SI,guid=e626cdc47bce079de737e4fe5a3fcda7 \t\n_ = /usr/local/bin/jupyter \t\nXDG_SESSION_COOKIE = 8441891e86e24d76b9616edf516d5734-1514130855.90561-444848216 \t\nDESKTOP_SESSION = mate \t\nWINDOWID = 88080563 \t\nLESSCLOSE = /usr/bin/lesspipe %s %s \t\nDEFAULTS_PATH = /usr/share/gconf/mate.default.path \t\nMPLBACKEND = module://ipykernel.pylab.backend_inline \t\nMDM_XSERVER_LOCATION = local \t\nGTK_MODULES = gail:atk-bridge \t\nXDG_DATA_DIRS = /usr/share/mate:/usr/local/share/:/usr/share/:/usr/share/mdm/ \t\nPWD = /media/tais/data/Dropbox/ULB/MAUPP/Traitements/Landscape_metrics/r.li \t\nCOLORTERM = mate-terminal \t\nPYTHONLIB = /usr/lib/python2.7 \t\nLS_COLORS = rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36: \t\nPAGER = cat \t\nXDG_SEAT = seat0 \t\n" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "<center> <font size=5> <h1>Define functions</h1> </font> </center> ", "_____no_output_____" ], [ "This section of the notebook is dedicated to defining functions which will then be called later in the script. If you want to create your own functions, define them here.", "_____no_output_____" ], [ "### Function for computing processing time", "_____no_output_____" ], [ "The \"print_processing_time\" is used to calculate and display the processing time for various stages of the processing chain. At the beginning of each major step, the current time is stored in a new variable, using [time.time() function](https://docs.python.org/2/library/time.html). At the end of the stage in question, the \"print_processing_time\" function is called and takes as argument the name of this new variable containing the recorded time at the beginning of the stage, and an output message.", "_____no_output_____" ] ], [ [ "## Import library for managing time in python\nimport time \n\n## Function \"print_processing_time()\" compute processing time and printing it.\n# The argument \"begintime\" wait for a variable containing the begintime (result of time.time()) of the process for which to compute processing time.\n# The argument \"printmessage\" wait for a string format with information about the process. \ndef print_processing_time(begintime, printmessage): \n endtime=time.time() \n processtime=endtime-begintime\n remainingtime=processtime\n\n days=int((remainingtime)/86400)\n remainingtime-=(days*86400)\n hours=int((remainingtime)/3600)\n remainingtime-=(hours*3600)\n minutes=int((remainingtime)/60)\n remainingtime-=(minutes*60)\n seconds=round((remainingtime)%60,1)\n\n if processtime<60:\n finalprintmessage=str(printmessage)+str(seconds)+\" seconds\"\n elif processtime<3600:\n finalprintmessage=str(printmessage)+str(minutes)+\" minutes and \"+str(seconds)+\" seconds\"\n elif processtime<86400:\n finalprintmessage=str(printmessage)+str(hours)+\" hours and \"+str(minutes)+\" minutes and \"+str(seconds)+\" seconds\"\n elif processtime>=86400:\n finalprintmessage=str(printmessage)+str(days)+\" days, \"+str(hours)+\" hours and \"+str(minutes)+\" minutes and \"+str(seconds)+\" seconds\"\n \n return finalprintmessage", "_____no_output_____" ] ], [ [ "### Function for creation of configuration file for r.li (landscape units provided as polygons) (multiprocessed)", "_____no_output_____" ] ], [ [ "##### Function that create the r.li configuration file for a list of landcover raster.\n### It enable to create in one function as many configuration file as the number of raster provided in 'listoflandcoverraster'.\n### It could be use only in case study with a several landcover raster and only one landscape unit layer.\n### So, the landscape unit layer if fixed and there are the landcover raster which change. \n# 'listoflandcoverraster' wait for a list with the name (string) of landcover rasters.\n# 'landscape_polygons' wait for the name (string) of the vector layer containing the polygons to be used as landscape units.\n# 'uniqueid' wait for the name of the 'landscape_polygons' layer's columns containing unique ID for each landscape unit polygon.\n# 'returnlistpath' wait for a boolean value (True/False) according to the fact that a list containing the path to the configuration files is desired.\n# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization.\n\n# Import libraries for multiprocessing \nimport multiprocessing\nfrom multiprocessing import Pool\nfrom functools import partial \n\n# Function that copy the landscape unit raster masks on a new layer with name corresponding to the current 'landcover_raster'\ndef copy_landscapeunitmasks(current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox,cat):\n ### Copy the landscape units mask for the current 'cat'\n # Define the name of the current \"current_landscapeunit_rast\" layer\n current_landscapeunit_rast=current_landcover_raster.split(\"@\")[0]+\"_\"+landscape_polygons.split(\"@\")[0]+\"_\"+str(cat) \n base_landscapeunit_rast=base_landcover_raster.split(\"@\")[0]+\"_\"+landscape_polygons.split(\"@\")[0]+\"_\"+str(cat) \n # Copy the the landscape unit created for the first landcover map in order to match the name of the current landcover map\n gscript.run_command('g.copy', overwrite=True, quiet=True, raster=(base_landscapeunit_rast,current_landscapeunit_rast))\n # Add the line to the maskedoverlayarea variable\n maskedoverlayarea=\"MASKEDOVERLAYAREA \"+current_landscapeunit_rast+\"|\"+landscapeunit_bbox[cat]\n return maskedoverlayarea\n\n# Function that create the r.li configuration file for the base landcover raster and then for all the binary rasters\ndef create_rli_configfile(listoflandcoverraster,landscape_polygons,uniqueid='cat',returnlistpath=True,ncores=2):\n # Check if 'listoflandcoverraster' is not empty\n if len(listoflandcoverraster)==0:\n sys.exit(\"The list of landcover raster is empty and should contain at least one raster name\")\n # Check if rasters provided in 'listoflandcoverraster' exists to avoid error in mutliprocessing \n for cur_rast in listoflandcoverraster:\n try:\n mpset=cur_rast.split(\"@\")[1]\n except:\n mpset=\"\"\n if cur_rast.split(\"@\")[0] not in [x[0] for x in gscript.list_pairs(type='raster',mapset=mpset)]:\n sys.exit('Raster <%s> not found' %cur_rast)\n # Check if rasters provided in 'listoflandcoverraster' have the same extend and spatial resolution \n raster={}\n for x, rast in enumerate(raster_list):\n raster[x]=gscript.raster_info(rast)\n key_list=raster.keys()\n for x in key_list[1:]:\n for info in ('north','south','east','west','ewres','nsres'):\n if not raster[0][info]==raster[x][info]:\n sys.exit(\"Some raster provided in the list have different spatial resolution or extend, please check\") \n # Get the version of GRASS GIS \n version=grass.version()['version'].split('.')[0]\n # Define the folder to save the r.li configuration files\n if sys.platform==\"win32\":\n rli_dir=os.path.join(os.environ['APPDATA'],\"GRASS\"+version,\"r.li\")\n else: \n rli_dir=os.path.join(os.environ['HOME'],\".grass\"+version,\"r.li\")\n if not os.path.exists(rli_dir):\n os.makedirs(rli_dir)\n ## Create an ordered list with the 'cat' value of landscape units to be processed.\n try:\n landscape_polygons_mapset=landscape_polygons.split(\"@\")[1]\n except:\n landscape_polygons_mapset=list(gscript.parse_command('g.mapset', flags='p'))[0]\n dbpath=\"$GISDBASE/$LOCATION_NAME/%s/sqlite/sqlite.db\"%landscape_polygons_mapset\n if uniqueid not in list(gscript.parse_command('db.columns', table=landscape_polygons.split(\"@\")[0], database=dbpath)):\n sys.exit('Column <%s> not found in vector layer <%s>' %(uniqueid,landscape_polygons.split(\"@\")[0]))\n else:\n list_cat=[int(x) for x in gscript.parse_command('v.db.select', quiet=True, \n map=landscape_polygons, column=uniqueid, flags='c')]\n list_cat.sort()\n # Declare a empty dictionnary which will contains the north, south, east, west values for each landscape unit\n landscapeunit_bbox={}\n # Declare a empty list which will contain the path of the configation files created\n listpath=[]\n # Declare a empty string variable which will contains the core part of the r.li configuration file\n maskedoverlayarea=\"\"\n # Duplicate 'listoflandcoverraster' in a new variable called 'tmp_list'\n tmp_list=list(listoflandcoverraster)\n # Set the current landcover raster as the first of the list\n base_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time\n # Loop trough the landscape units\n for cat in list_cat:\n # Extract the current landscape unit polygon as temporary vector\n tmp_vect=\"tmp_\"+base_landcover_raster.split(\"@\")[0]+\"_\"+landscape_polygons.split(\"@\")[0]+\"_\"+str(cat)\n gscript.run_command('v.extract', overwrite=True, quiet=True, input=landscape_polygons, cats=cat, output=tmp_vect)\n # Set region to match the extent of the current landscape polygon, with resolution and alignement matching the landcover raster\n gscript.run_command('g.region', vector=tmp_vect, align=base_landcover_raster)\n # Rasterize the landscape unit polygon\n landscapeunit_rast=tmp_vect[4:]\n gscript.run_command('v.to.rast', overwrite=True, quiet=True, input=tmp_vect, output=landscapeunit_rast, use='cat', memory='3000')\n # Remove temporary vector\n gscript.run_command('g.remove', quiet=True, flags=\"f\", type='vector', name=tmp_vect)\n # Set the region to match the raster landscape unit extent and save the region info in a dictionary\n region_info=gscript.parse_command('g.region', raster=landscapeunit_rast, flags='g')\n n=str(round(float(region_info['n']),5)) #the config file need 5 decimal for north and south\n s=str(round(float(region_info['s']),5))\n e=str(round(float(region_info['e']),6)) #the config file need 6 decimal for east and west\n w=str(round(float(region_info['w']),6))\n # Save the coordinates of the bbox in the dictionary (n,s,e,w)\n landscapeunit_bbox[cat]=n+\"|\"+s+\"|\"+e+\"|\"+w\n # Add the line to the maskedoverlayarea variable\n maskedoverlayarea+=\"MASKEDOVERLAYAREA \"+landscapeunit_rast+\"|\"+landscapeunit_bbox[cat]+\"\\n\"\n\n # Compile the content of the r.li configuration file\n config_file_content=\"SAMPLINGFRAME 0|0|1|1\\n\"\n config_file_content+=maskedoverlayarea\n config_file_content+=\"RASTERMAP \"+base_landcover_raster+\"\\n\"\n config_file_content+=\"VECTORMAP \"+landscape_polygons+\"\\n\"\n\n # Create a new file and save the content\n configfilename=base_landcover_raster.split(\"@\")[0]+\"_\"+landscape_polygons.split(\"@\")[0]\n path=os.path.join(rli_dir,configfilename)\n listpath.append(path)\n f=open(path, 'w')\n f.write(config_file_content)\n f.close()\n \n # Continue creation of r.li configuration file and landscape unit raster the rest of the landcover raster provided\n while len(tmp_list)>0:\n # Reinitialize 'maskedoverlayarea' variable as an empty string\n maskedoverlayarea=\"\"\n # Set the current landcover raster as the first of the list\n current_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time\n # Copy all the landscape units masks for the current landcover raster\n p=Pool(ncores) #Create a pool of processes and launch them using 'map' function\n func=partial(copy_landscapeunitmasks,current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox) # Set fixed argument of the function\n maskedoverlayarea=p.map(func,list_cat) # Launch the processes for as many items in the list and get the ordered results using map function\n p.close()\n p.join()\n # Compile the content of the r.li configuration file\n config_file_content=\"SAMPLINGFRAME 0|0|1|1\\n\"\n config_file_content+=\"\\n\".join(maskedoverlayarea)+\"\\n\"\n config_file_content+=\"RASTERMAP \"+current_landcover_raster+\"\\n\"\n config_file_content+=\"VECTORMAP \"+landscape_polygons+\"\\n\"\n # Create a new file and save the content\n configfilename=current_landcover_raster.split(\"@\")[0]+\"_\"+landscape_polygons.split(\"@\")[0]\n path=os.path.join(rli_dir,configfilename)\n listpath.append(path)\n f=open(path, 'w')\n f.write(config_file_content)\n f.close()\n \n # Return a list of path of configuration files creates if option actived\n if returnlistpath:\n return listpath", "_____no_output_____" ] ], [ [ "### Function for creation of binary raster from a categorical raster (multiprocessed)", "_____no_output_____" ] ], [ [ "###### Function creating a binary raster for each category of a base raster. \n### The function run within the current region. If a category do not exists in the current region, no binary map will be produce\n# 'categorical_raster' wait for the name of the base raster to be used. It is the one from which one binary raster will be produced for each category value\n# 'prefix' wait for a string corresponding to the prefix of the name of the binary raster which will be produced\n# 'setnull' wait for a boolean value (True, False) according to the fact that the output binary should be 1/0 or 1/null\n# 'returnlistraster' wait for a boolean value (True, False) regarding to the fact that a list containing the name of binary raster is desired as return of the function\n# 'category_list' wait for a list of interger corresponding to specific category of the base raster to be used \n# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization\n\n# Import libraries for multiprocessing \nimport multiprocessing\nfrom multiprocessing import Pool\nfrom functools import partial \n\ndef create_binary_raster(categorical_raster,prefix=\"binary\",setnull=False,returnlistraster=True,category_list=None,ncores=2):\n # Check if raster exists to avoid error in mutliprocessing \n try:\n mpset=categorical_raster.split(\"@\")[1]\n except:\n mpset=\"\"\n if categorical_raster not in gscript.list_strings(type='raster',mapset=mpset):\n sys.exit('Raster <%s> not found' %categorical_raster)\n # Check for number of cores doesnt exceed available\n nbcpu=multiprocessing.cpu_count()\n if ncores>=nbcpu:\n ncores=nbcpu-1\n returnlist=[] #Declare empty list for return\n #gscript.run_command('g.region', raster=categorical_raster, quiet=True) #Set the region\n null='null()' if setnull else '0' #Set the value for r.mapcalc\n minclass=1 if setnull else 2 #Set the value to check if the binary raster is empty\n if category_list == None: #If no category_list provided\n category_list=[cl for cl in gscript.parse_command('r.category',map=categorical_raster,quiet=True)]\n for i,x in enumerate(category_list): #Make sure the format is UTF8 and not Unicode\n category_list[i]=x.encode('UTF8')\n category_list.sort(key=float) #Sort the raster categories in ascending.\n p=Pool(ncores) #Create a pool of processes and launch them using 'map' function\n func=partial(get_binary,categorical_raster,prefix,null,minclass) # Set the two fixed argument of the function\n returnlist=p.map(func,category_list) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function\n p.close()\n p.join()\n if returnlistraster:\n return returnlist\n\n#### Function that extract binary raster for a specified class (called in 'create_binary_raster' function)\ndef get_binary(categorical_raster,prefix,null,minclass,cl):\n binary_class=prefix+\"_\"+cl\n gscript.run_command('r.mapcalc', expression=binary_class+'=if('+categorical_raster+'=='+str(cl)+',1,'+null+')',overwrite=True, quiet=True)\n if len(gscript.parse_command('r.category',map=binary_class,quiet=True))>=minclass: #Check if created binary is not empty\n return binary_class\n else:\n gscript.run_command('g.remove', quiet=True, flags=\"f\", type='raster', name=binary_class)", "_____no_output_____" ] ], [ [ "### Function for computation of spatial metrics at landscape level (multiprocessed)", "_____no_output_____" ] ], [ [ "##### Function that compute different landscape metrics (spatial metrics) at landscape level. \n### The metric computed are \"dominance\",\"pielou\",\"renyi\",\"richness\",\"shannon\",\"simpson\".\n### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer.\n# 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer.\n# 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed.\n# 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired.\n# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization.\n\n# Import libraries for multiprocessing \nimport multiprocessing\nfrom multiprocessing import Pool\nfrom functools import partial \n\ndef compute_landscapelevel_metrics(configfile, raster, spatial_metric):\n filename=raster.split(\"@\")[0]+\"_%s\" %spatial_metric\n outputfile=os.path.join(os.path.split(configfile)[0],\"output\",filename)\n if spatial_metric=='renyi': # The alpha parameter was set to 2 as in https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy\n gscript.run_command('r.li.%s' %spatial_metric, overwrite=True,\n input=raster,config=configfile,alpha='2', output=filename)\n else:\n gscript.run_command('r.li.%s' %spatial_metric, overwrite=True,\n input=raster,config=configfile, output=filename)\n return outputfile\n \ndef get_landscapelevel_metrics(configfile, raster, returnlistresult=True, ncores=2):\n # Check if raster exists to avoid error in mutliprocessing \n try:\n mpset=raster.split(\"@\")[1]\n except:\n mpset=\"\"\n if raster not in gscript.list_strings(type='raster',mapset=mpset):\n sys.exit('Raster <%s> not found' %raster)\n # Check if configfile exists to avoid error in mutliprocessing \n if not os.path.exists(configfile):\n sys.exit('Configuration file <%s> not found' %configfile)\n # List of metrics to be computed\n spatial_metric_list=[\"dominance\",\"pielou\",\"renyi\",\"richness\",\"shannon\",\"simpson\"]\n # Check for number of cores doesnt exceed available\n nbcpu=multiprocessing.cpu_count()\n if ncores>=nbcpu:\n ncores=nbcpu-1\n if ncores>len(spatial_metric_list):\n ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute\n #Declare empty list for return\n returnlist=[] \n # Create a new pool\n p=Pool(ncores)\n # Set the two fixed argument of the 'compute_landscapelevel_metrics' function\n func=partial(compute_landscapelevel_metrics,configfile, raster)\n # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function\n returnlist=p.map(func,spatial_metric_list)\n p.close()\n p.join()\n # Return list of paths to result files\n if returnlistresult:\n return returnlist", "_____no_output_____" ] ], [ [ "### Function for computation of spatial metrics at class level (multiprocessed)", "_____no_output_____" ] ], [ [ "##### Function that compute different landscape metrics (spatial metrics) at class level. \n### The metric computed are \"patch number (patchnum)\",\"patch density (patchdensity)\",\"mean patch size(mps)\",\n### \"coefficient of variation of patch area (padcv)\",\"range of patch area size (padrange)\",\n### \"standard deviation of patch area (padsd)\", \"shape index (shape)\", \"edge density (edgedensity)\".\n### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer.\n# 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer.\n# 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed.\n# 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired.\n# 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization.\n\n# Import libraries for multiprocessing \nimport multiprocessing\nfrom multiprocessing import Pool\nfrom functools import partial \n\ndef compute_classlevel_metrics(configfile, raster, spatial_metric):\n filename=raster.split(\"@\")[0]+\"_%s\" %spatial_metric\n gscript.run_command('r.li.%s' %spatial_metric, overwrite=True,\n input=raster,config=configfile,output=filename)\n outputfile=os.path.join(os.path.split(configfile)[0],\"output\",filename)\n return outputfile\n \ndef get_classlevel_metrics(configfile, raster, returnlistresult=True, ncores=2):\n # Check if raster exists to avoid error in mutliprocessing \n try:\n mpset=raster.split(\"@\")[1]\n except:\n mpset=\"\"\n if raster not in [x.split(\"@\")[0] for x in gscript.list_strings(type='raster',mapset=mpset)]:\n sys.exit('Raster <%s> not found' %raster)\n # Check if configfile exists to avoid error in mutliprocessing \n if not os.path.exists(configfile):\n sys.exit('Configuration file <%s> not found' %configfile)\n # List of metrics to be computed\n spatial_metric_list=[\"patchnum\",\"patchdensity\",\"mps\",\"padcv\",\"padrange\",\"padsd\",\"shape\",\"edgedensity\"]\n # Check for number of cores doesnt exceed available\n nbcpu=multiprocessing.cpu_count()\n if ncores>=nbcpu:\n ncores=nbcpu-1\n if ncores>len(spatial_metric_list):\n ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute\n # Declare empty list for return\n returnlist=[] \n # Create a new pool\n p=Pool(ncores)\n # Set the two fixed argument of the 'compute_classlevel_metrics' function\n func=partial(compute_classlevel_metrics,configfile, raster)\n # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function\n returnlist=p.map(func,spatial_metric_list)\n p.close()\n p.join()\n # Return list of paths to result files\n if returnlistresult:\n return returnlist", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "<center> <font size=5> <h1>User inputs</h1> </font> </center> ", "_____no_output_____" ] ], [ [ "## Define a empty dictionnary for saving user inputs\nuser={}", "_____no_output_____" ], [ "## Enter the path to GRASSDATA folder\nuser[\"gisdb\"] = \"/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga\"\n\n## Enter the name of the location (existing or for a new one)\nuser[\"location\"] = \"SPIE_subset\"\n\n## Enter the EPSG code for this location \nuser[\"locationepsg\"] = \"32630\"\n\n## Enter the name of the mapset to use for segmentation\nuser[\"mapsetname\"] = \"test_rli\"", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "# Compute spatial metrics for deriving land use in street blocs", "_____no_output_____" ], [ "**Launch GRASS GIS working session**", "_____no_output_____" ] ], [ [ "## Set the name of the mapset in which to work\nmapsetname=user[\"mapsetname\"]\n\n## Launch GRASS GIS working session in the mapset\nif os.path.exists(os.path.join(user[\"gisdb\"],user[\"location\"],mapsetname)):\n gsetup.init(os.environ['GISBASE'], user[\"gisdb\"], user[\"location\"], mapsetname)\n print \"You are now working in mapset '\"+mapsetname+\"'\" \nelse: \n print \"'\"+mapsetname+\"' mapset doesn't exists in \"+user[\"gisdb\"]", "You are now working in mapset 'test_rli'\n" ] ], [ [ "**Set the path to the r.li folder for configuration file and for results**", "_____no_output_____" ] ], [ [ "os.environ", "_____no_output_____" ], [ "# Define path of the outputfile (in r.li folder)\nversion=grass.version()['version'].split('.')[0] # Get the version of GRASS GIS \nif sys.platform==\"win32\":\n rli_config_dir=os.path.join(os.environ['APPDATA'],\"GRASS\"+version,\"r.li\")\n rli_output_dir=os.path.join(os.environ['APPDATA'],\"GRASS\"+version,\"r.li\",\"output\")\nelse: \n rli_config_dir=os.path.join(os.environ['HOME'],\"GRASS\"+version,\"r.li\")\n rli_output_dir=os.path.join(os.environ['HOME'],\".grass\"+version,\"r.li\",\"output\")\nif not os.path.exists(rli_config_dir):\n os.makedirs(rli_config_dir)\nif not os.path.exists(rli_output_dir):\n os.makedirs(rli_output_dir)\n# Print\nprint \"GRASS GIS add-on's r.li configuration files will be saved under <%s>.\"%(rli_config_dir,)\nprint \"GRASS GIS add-on's r.li outputs will be saved under <%s>.\"%(rli_output_dir,)", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "### Define the name of the base landcover map and landscape units polygons", "_____no_output_____" ] ], [ [ "# Set the name of the 'base' landcover map\nbaselandcoverraster=\"classif@test_rli\"\n# Set the name of the vector polygon layer containing the landscape units\nlandscape_polygons=\"streetblocks@PERMANENT\"", "_____no_output_____" ] ], [ [ "### Import shapefile containing street blocks polygons", "_____no_output_____" ] ], [ [ "# Set the path to the shapefile containing streetblocks polygons\npathtoshp=\"/media/tais/data/Dropbox/ULB/MAUPP/Landuse_mapping/Test_spatial_metrics_computation/Data/Subset_spatial_metrics.shp\"", "_____no_output_____" ], [ "# Import shapefile\ngscript.run_command('v.in.ogr', quiet=True, overwrite=True, input=pathtoshp, output=landscape_polygons)", "_____no_output_____" ] ], [ [ "### Create binary rasters from the base landcover map", "_____no_output_____" ] ], [ [ "# Save time for computing processin time\nbegintime=time.time()\n\n# Create as many binary raster layer as categorical values existing in the base landcover map\ngscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region\npref=baselandcoverraster.split(\"@\")[0]+\"_cl\" #Set the prefix\n\nraster_list=[] # Initialize a empty list for results\nraster_list=create_binary_raster(baselandcoverraster,\n prefix=pref,setnull=True,returnlistraster=True,\n category_list=None,ncores=15) #Extract binary raster \n\n# Compute and print processing time\nprint_processing_time(begintime,\"Extraction of binary rasters achieved in \")", "_____no_output_____" ], [ "# Insert the name of the base landcover map at first position in the list\nraster_list.insert(0,baselandcoverraster)\n# Display the raster to be used for landscape analysis\nraster_list", "_____no_output_____" ] ], [ [ "raster_list=['classif@test_rli',\n 'classif_cl_11',\n 'classif_cl_13',\n 'classif_cl_14',\n 'classif_cl_20',\n 'classif_cl_30',\n 'classif_cl_31',\n 'classif_cl_41',\n 'classif_cl_51']", "_____no_output_____" ] ], [ [ "## Create r.li configuration file for a list of landcover rasters", "_____no_output_____" ] ], [ [ "# Save time for computing processin time\nbegintime=time.time()\n# Run creation of r.li configuration file and associated raster layers\nlist_configfile=create_rli_configfile(raster_list,landscape_polygons,uniqueid='gid',returnlistpath=True,ncores=20)\n# Compute and print processing time\nprint_processing_time(begintime,\"Creation of r.li configuration files achieved in \")", "_____no_output_____" ], [ "# Display the path to the configuration files created\nlist_configfile", "_____no_output_____" ] ], [ [ "list_configfile=[u'/home/tais/.grass7/r.li/classif_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_11_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_13_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_14_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_20_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_30_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_31_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_41_streetblocks',\n u'/home/tais/.grass7/r.li/classif_cl_51_streetblocks']", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "## Compute spatial metrics at landscape level", "_____no_output_____" ] ], [ [ "# Initialize an empty list which will contains the resultfiles \nresultfiles=[]", "_____no_output_____" ], [ "# Save time for computing processin time\nbegintime=time.time()\n# Get the path to the configuration file for the base landcover raster\ncurrentconfigfile=list_configfile[0]\n# Get the name of the base landcover raster\ncurrentraster=raster_list[0]\n# Set the region to match the extent of the base raster\ngscript.run_command('g.region', raster=currentraster, quiet=True)\n# Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function\nresultfiles.append(get_landscapelevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=15))\n# Compute and print processing time\nprint_processing_time(begintime,\"Computation of spatial metric achieved in \")", "_____no_output_____" ], [ "resultfiles", "_____no_output_____" ] ], [ [ "resultfiles=[[u'/home/tais/.grass7/r.li/output/classif_dominance',\n u'/home/tais/.grass7/r.li/output/classif_pielou',\n u'/home/tais/.grass7/r.li/output/classif_renyi',\n u'/home/tais/.grass7/r.li/output/classif_richness',\n u'/home/tais/.grass7/r.li/output/classif_shannon',\n u'/home/tais/.grass7/r.li/output/classif_simpson']]", "_____no_output_____" ] ], [ [ "## Compute spatial metrics at class level", "_____no_output_____" ] ], [ [ "# Save time for computing processin time\nbegintime=time.time()\n# Get a list with paths to the configuration file for class level metrics\nclasslevelconfigfiles=list_configfile[1:]\n# Get a list with name of binary landcover raster for class level metrics\nclasslevelrasters=raster_list[1:]\n\nfor x,currentraster in enumerate(classlevelrasters[:]):\n # Get the path to the configuration file for the base landcover raster\n currentconfigfile=classlevelconfigfiles[x]\n # Set the region to match the extent of the base raster\n gscript.run_command('g.region', raster=currentraster, quiet=True)\n # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function\n resultfiles.append(get_classlevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=10))\n\n# Compute and print processing time\nprint_processing_time(begintime,\"Computation of spatial metric achieved in \")", "_____no_output_____" ], [ "resultfiles", "_____no_output_____" ], [ "# Flat the 'resultfiles' list which contains several lists\nresultfiles=[item for sublist in resultfiles for item in sublist]", "_____no_output_____" ], [ "resultfiles", "_____no_output_____" ] ], [ [ "resultfiles=[u'/home/tais/.grass7/r.li/output/classif_dominance',\n u'/home/tais/.grass7/r.li/output/classif_pielou',\n u'/home/tais/.grass7/r.li/output/classif_renyi',\n u'/home/tais/.grass7/r.li/output/classif_richness',\n u'/home/tais/.grass7/r.li/output/classif_shannon',\n u'/home/tais/.grass7/r.li/output/classif_simpson',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_11_edgedensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_13_edgedensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_14_edgedensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_20_edgedensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_30_edgedensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_31_edgedensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_41_edgedensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_patchnum',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_patchdensity',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_mps',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_padcv',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_padrange',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_padsd',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_shape',\n u'/home/tais/.grass7/r.li/output/classif_cl_51_edgedensity']", "_____no_output_____" ] ], [ [ "# Compute some special metrics", "_____no_output_____" ] ], [ [ "# Set pixel value of 'buildings' on the 'baselandcoverraster'\nbuildpixel=11\n# Set the name of the new layer containing height of buildings\nbuildings_height='buildings_height'\n# Set the name of the nDSM layer\nndsm=\"ndsm\"\n# Set the name of the NDVI layer\nndvi=\"ndvi\"\n# Set the name of the NDWI layer\nndwi=\"ndwi\"\n# Set the prefix of SAR textures layer\nSAR_prefix=\"SAR_w\"", "_____no_output_____" ] ], [ [ "### Create raster with nDSM value of 'buildings' pixels", "_____no_output_____" ] ], [ [ "# Save time for computing processin time\nbegintime=time.time()\n# Create a raster layer with height of pixels classified as 'buildings'\ngscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region\nformula=\"%s=if(%s==%s, %s, null())\"%(buildings_height,baselandcoverraster,buildpixel,ndsm)\ngscript.mapcalc(formula, overwrite=True)\n# Compute and print processing time\nprint_processing_time(begintime,\"Creation of layer in \")", "_____no_output_____" ] ], [ [ "### Mean and standard deviation of building's height, SAR textures, NDVI, NDWI", "_____no_output_____" ] ], [ [ "# Save time for computing processin time\nbegintime=time.time()\n# Create a raster layer with height of pixels classified as 'buildings'\ngscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region\nformula=\"%s=if(%s==%s, %s, null())\"%(buildings_height,baselandcoverraster,buildpixel,ndsm)\ngscript.mapcalc(formula, overwrite=True)\n# Compute and print processing time\nprint_processing_time(begintime,\"Creation of layer in \")", "_____no_output_____" ], [ "# Set up a list with name of raster layer to be used\nancillarylayers=[]\nancillarylayers.append(buildings_height)\nancillarylayers.append(ndvi)\nancillarylayers.append(ndwi)\n[ancillarylayers.append(x) for x in gscript.list_strings(\"rast\", pattern=SAR_prefix, flag='r')] #Append SAR textures\nprint \"Layer to be used :\\n\\n\"+'\\n'.join(ancillarylayers)", "Layer to be used :\n\nbuildings_height\nndvi\nndwi\nSAR_w11.1@PERMANENT\nSAR_w11.2@PERMANENT\nSAR_w11.3@PERMANENT\nSAR_w11.4@PERMANENT\nSAR_w11.5@PERMANENT\nSAR_w11.6@PERMANENT\nSAR_w11.7@PERMANENT\nSAR_w7.1@PERMANENT\nSAR_w7.2@PERMANENT\nSAR_w7.3@PERMANENT\nSAR_w7.4@PERMANENT\nSAR_w7.5@PERMANENT\nSAR_w7.6@PERMANENT\nSAR_w7.7@PERMANENT\n" ], [ "# Set the path to the file for i.segment.stats results\nisegmentstatsfile=os.path.join(rli_output_dir,\"ancillary_info\")", "_____no_output_____" ], [ "# Save time for computing processin time\nbegintime=time.time()\n###### Compute shape metrics as well as mean and stddev of ancillary layers for each landscape unit\n## Set number of cores to be used\nncores=len(ancillarylayers) \nnbcpu=multiprocessing.cpu_count()\nif ncores>=nbcpu:\n ncores=nbcpu-1\n if ncores>len(ancillarylayers):\n ncores=len(ancillarylayers) #Adapt number of cores to number of metrics to compute\n# Run i.segment.stats\ngscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region\nraster_landscapeunits=\"temp_%s\"%landscape_polygons.split(\"@\")[0]\ngscript.run_command('v.to.rast', overwrite=True, input=landscape_polygons, output=raster_landscapeunits, use='cat') \ngscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits,\n raster_statistics='stddev,median',\n area_measures='area,perimeter,compact_circle,compact_square,fd',\n rasters=','.join(ancillarylayers),\n csvfile=isegmentstatsfile,\n processes=ncores)\n\n# Compute and print processing time\nprint_processing_time(begintime,\"Metrics computed in \")", "_____no_output_____" ], [ "resultfiles.insert(0,isegmentstatsfile)", "_____no_output_____" ], [ "resultfiles", "_____no_output_____" ] ], [ [ "# Combine all .csv files together", "_____no_output_____" ] ], [ [ "## Function which execute a left join using individual .csv files.\n## This ddddddddddddd\n# The argument \"indir\" wait for a string containing the path to the directory where the individual .csv files are stored.\n# The argument \"outfile\" wait for a string containing the path to the output file to create.\n# The argument \"overwrite\" wait for True/False value allow or not to overwrite existing outfile.\n# The argument \"pattern\" wait for a string containing the pattern of filename to use. Use wildcards is possible (*.csv for all .csv files)\n\nimport os,sys\nimport glob\n\ndef leftjoin_csv(fileList,outfile,separator=\",\",overwrite=False,pattern=None):\n # Stop execution if outputfile exitst and can not be overwriten\n if os.path.isfile(outfile) and overwrite==False:\n print \"File '\"+str(outfile)+\"' aleady exists and overwrite option is not enabled.\"\n else:\n if os.path.isfile(outfile) and overwrite==True: # If outputfile exitst and can be overwriten\n os.remove(outfile)\n print \"File '\"+str(outfile)+\"' has been overwrited.\"\n if len(fileList)<=1: #Check if there are at least 2 files in the list\n sys.exit(\"This function require at least two .csv files to be jointed together.\")\n # Save all the value in a dictionnary with key corresponding to the first column\n outputdict={}\n header=[]\n header.append(\"id\") #set name for first column\n # Loop through all files:\n for filenum,f in enumerate([open(f) for f in fileList]):\n for linenum,line in enumerate(f):\n firstcolumn=line.split(separator)[0]\n othercolumns=line.split(\"\\n\")[0].split(separator)[1:]\n if linenum==0: #If first line\n if firstcolumn.split(\" \")[0]==\"RESULT\": #If file comes from r.li.* add-ons \n header.append(os.path.split(fileList[filenum])[-1].split(\".\")[0])\n else:\n [header.append(x) for x in othercolumns] #If file comes from i.segment.stats \n else:\n try:\n cat_id=firstcolumn.split(\" \")[1]\n except:\n cat_id=firstcolumn\n try:\n [outputdict[cat_id].append(x) for x in othercolumns]\n except:\n outputdict[cat_id]=othercolumns\n # Write the dictionnary with header in a the output csv file\n outputcsv=open(outfile,\"w\")\n outputcsv.write(separator.join(header))\n outputcsv.write(\"\\n\")\n for key in outputdict.keys():\n outputcsv.write(key+separator)\n outputcsv.write(separator.join(outputdict[key]))\n outputcsv.write(\"\\n\")\n outputcsv.close()\n # Create a .csvt file with type of each column\n csvt=open(outfile+\"t\",\"w\")\n results=open(outfile,\"r\")\n header=results.next()\n typecolumn=[]\n typecolumn.append(\"Integer\")\n for columns in header[1:]:\n typecolumn.append(\"Real\")\n csvt.write(separator.join(typecolumn))\n csvt.close()\n outputcsv.close()\n # Print what happend\n print str(len(fileList))+\" individual .csv files were joint together.\"", "_____no_output_____" ], [ "# Join all result files together in a new .csv file\noutfile=os.path.join(rli_output_dir,\"land_use_metrics.csv\")\nleftjoin_csv(resultfiles, outfile, separator=\"|\", overwrite=True)", "File '/home/tais/.grass7/r.li/output/land_use_metrics.csv' has been overwrited.\n71 individual .csv files were joint together.\n" ] ], [ [ "# Importing the NDVI layer", "_____no_output_____" ] ], [ [ "break\n## Saving current time for processing time management\nbegintime_ndvi=time.time()\n\n## Import nDSM imagery \nprint (\"Importing NDVI raster imagery at \" + time.ctime())\ngscript.run_command('r.import', \n input=\"/media/tais/data/MAUPP/WorldView3_Ouagadougou/Orthorectified/mosaique_georef/NDVI/ndvi_georef_ordre2.TIF\", \n output=\"ndvi\", overwrite=True)\n\n# Mask null/nodata values\ngscript.run_command('r.null', map=\"ndvi\")\n\nprint_processing_time(begintime_ndvi, \"imagery has been imported in \")", "_____no_output_____" ] ], [ [ "# Importing the nDSM layer", "_____no_output_____" ] ], [ [ "break\n## Saving current time for processing time management\nbegintime_ndsm=time.time()\n\n## Import nDSM imagery \nprint (\"Importing nDSM raster imagery at \" + time.ctime())\ngrass.run_command('r.import', \n input=\"/media/tais/data/MAUPP/WorldView3_Ouagadougou/Orthorectified/mosaique_georef/nDSM/nDSM_mosaik_georef_ordre2.tif\", \n output=\"ndsm\", overwrite=True)\n\n## Define null value for specific value in nDSM raster. Adapt the value to your own data. \n# If there is no null value in your data, comment the next line\ngrass.run_command('r.null', map=\"ndsm\", setnull=\"-999\")\n\n# Make histogram equalisation on grey color.\ngrass.run_command('r.colors', flags='e', map='ndsm', color='grey')\n\nprint_processing_time(begintime_ndsm, \"nDSM imagery has been imported in \")", "_____no_output_____" ] ], [ [ "### Masking the nDSM artifacts", "_____no_output_____" ] ], [ [ "break\n# Import vector with nDSM artifacts zones\ngrass.run_command('v.in.ogr', overwrite=True, \n input=\"/media/tais/data/MAUPP/WorldView3_Ouagadougou/Masque_artifacts_nDSM/Ouaga_mask_artifacts_nDSM.shp\",\n output=\"mask_artifacts_ndsm\")\n\n## Set computational region to match the default region\ngrass.run_command('g.region', overwrite=True, raster=\"ndsm\")\n# Rasterize the vector layer, with value \"0\" on the artifacts zones\ngrass.run_command('v.to.rast', input='mask_artifacts_ndsm', output='mask_artifacts_ndsm', \n use='val', value='0', memory='5000')\n## Set computational region to match the default region\ngrass.run_command('g.region', overwrite=True, raster=\"ndsm\")\n## Create a new nDSM with artifacts filled with '0' value\nformula='tmp_artifact=nmin(ndsm,mask_artifacts_ndsm)'\ngrass.mapcalc(formula, overwrite=True)\n## Remove the artifact mask\ngrass.run_command('g.remove', flags='f', type='raster', name=\"mask_artifacts_ndsm\")\n\n## Rename the new nDSM\ngrass.run_command('g.rename', raster='tmp_artifact,ndsm', overwrite=True)\n\n## Remove the intermediate nDSM layer\ngrass.run_command('g.remove', flags='f', type='raster', name=\"tmp_artifact\")", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "# Define input raster for computing statistics of segments", "_____no_output_____" ] ], [ [ "## Display the name of rasters available in PERMANENT and CLASSIFICATION mapset\nprint grass.read_command('g.list',type=\"raster\", mapset=\"PERMANENT\", flags='rp')\nprint grass.read_command('g.list',type=\"raster\", mapset=user[\"classificationA_mapsetname\"], flags='rp')", "----------------------------------------------\nraster fichiers disponibles dans le jeu de données <PERMANENT> :\nMASK ndsm ndvi opt_blue opt_green opt_nir opt_red\n\n\n----------------------------------------------\nraster fichiers disponibles dans le jeu de données <CLASSIF> :\nzone_morpho\n\n\n" ], [ "## Define the list of raster layers for which statistics will be computed\ninputstats=[]\ninputstats.append(\"opt_blue\")\ninputstats.append(\"opt_green\")\ninputstats.append(\"opt_red\")\ninputstats.append(\"opt_nir\")\ninputstats.append(\"ndsm\")\ninputstats.append(\"ndvi\")\n\nprint \"Layer to be used to compute raster statistics of segments:\\n\"+'\\n'.join(inputstats)", "Layer to be used to compute raster statistics of segments:\nopt_blue\nopt_green\nopt_red\nopt_nir\nndsm\nndvi\n" ], [ "## Define the list of raster statistics to be computed for each raster layer\nrasterstats=[]\nrasterstats.append(\"min\")\nrasterstats.append(\"max\")\nrasterstats.append(\"range\")\nrasterstats.append(\"mean\")\nrasterstats.append(\"stddev\")\n#rasterstats.append(\"coeff_var\") # Seems that this statistic create null values \nrasterstats.append(\"median\")\nrasterstats.append(\"first_quart\")\nrasterstats.append(\"third_quart\")\nrasterstats.append(\"perc_90\")\n\nprint \"Raster statistics to be computed:\\n\"+'\\n'.join(rasterstats)", "Raster statistics to be computed:\nmin\nmax\nrange\nmean\nstddev\nmedian\nfirst_quart\nthird_quart\nperc_90\n" ], [ "## Define the list of area measures (segment's shape statistics) to be computed\nareameasures=[]\nareameasures.append(\"area\")\nareameasures.append(\"perimeter\")\nareameasures.append(\"compact_circle\")\nareameasures.append(\"compact_square\")\nareameasures.append(\"fd\")\n\nprint \"Area measures to be computed:\\n\"+'\\n'.join(areameasures)", "Area measures to be computed:\narea\nperimeter\ncompact_circle\ncompact_square\nfd\n" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "<center> <font size=5> <h1>Compute objects' statistics</h1> </font> </center> ", "_____no_output_____" ] ], [ [ "## Saving current time for processing time management\nbegintime_computeobjstat=time.time()", "_____no_output_____" ] ], [ [ "## Define the folder where to save the results and create it if necessary", "_____no_output_____" ], [ "In the next cell, please adapt the path to the directory where you want to save the .csv output of i.segment.uspo.", "_____no_output_____" ] ], [ [ "## Folder in which save processing time output\noutputfolder=\"/media/tais/My_Book_1/MAUPP/Traitement/Ouagadougou/Segmentation_fullAOI_localapproach/Results/CLASSIF/stats_optical\"\n\n## Create the folder if does not exists\nif not os.path.exists(outputfolder):\n os.makedirs(outputfolder)\n print \"Folder '\"+outputfolder+\"' created\"", "_____no_output_____" ] ], [ [ "### Copy data from other mapset to the current mapset", "_____no_output_____" ], [ "Some data need to be copied from other mapsets into the current mapset.", "_____no_output_____" ], [ "### Remove current mask", "_____no_output_____" ] ], [ [ "## Check if there is a raster layer named \"MASK\"\nif not grass.list_strings(\"rast\", pattern=\"MASK\", mapset=mapsetname, flag='r'):\n print 'There is currently no MASK'\nelse:\n ## Remove the current MASK layer\n grass.run_command('r.mask',flags='r')\n print 'The current MASK has been removed'", "There is currently no MASK\n" ] ], [ [ "***Copy segmentation raster***", "_____no_output_____" ] ], [ [ "## Copy segmentation raster layer from SEGMENTATION mapset to current mapset\ngrass.run_command('g.copy', overwrite=True, \n raster=\"segmentation_raster@\"+user[\"segmentation_mapsetname\"]+\",segments\")", "_____no_output_____" ] ], [ [ "***Copy morphological zone (raster)***", "_____no_output_____" ] ], [ [ "## Copy segmentation raster layer from SEGMENTATION mapset to current mapset\ngrass.run_command('g.copy', overwrite=True, \n raster=\"zone_morpho@\"+user[\"segmentation_mapsetname\"]+\",zone_morpho\")", "_____no_output_____" ] ], [ [ "***Copy morphological zone (vector)***", "_____no_output_____" ] ], [ [ "## Copy segmentation raster layer from SEGMENTATION mapset to current mapset\ngrass.run_command('g.copy', overwrite=True, \n vector=\"zone_morpho@\"+user[\"segmentation_mapsetname\"]+\",zone_morpho\")", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ], [ "# Compute statistics of segments (Full AOI extend)", "_____no_output_____" ], [ "### Compute statistics of segment using i.segment.stats", "_____no_output_____" ], [ "The process is make to compute statistics iteratively for each morphological zones, used here as tiles.", "_____no_output_____" ], [ "This section uses the ['i.segment.stats' add-on](https://grass.osgeo.org/grass70/manuals/addons/i.segment.stats.html) to compute statistics for each object. ", "_____no_output_____" ] ], [ [ "## Save name of the layer to be used as tiles\ntile_layer='zone_morpho'+'@'+mapsetname\n## Save name of the segmentation layer to be used by i.segment.stats\nsegment_layer='segments'+'@'+mapsetname\n## Save name of the column containing area_km value\narea_column='area_km2'\n## Save name of the column containing morphological type value\ntype_column='type'\n## Save the prefix to be used for the outputfiles of i.segment.stats\nprefix=\"Segstat\"", "_____no_output_____" ], [ "## Save the list of polygons to be processed (save the 'cat' value)\nlistofregion=list(grass.parse_command('v.db.select', map=tile_layer, \n columns='cat', flags='c'))[:]", "_____no_output_____" ], [ "for count, cat in enumerate(listofregion):\n print str(count)+\" cat:\"+str(cat)", "0 cat:344\n1 cat:345\n2 cat:346\n3 cat:347\n4 cat:340\n5 cat:341\n6 cat:342\n7 cat:343\n8 cat:348\n9 cat:349\n10 cat:1653\n11 cat:298\n12 cat:299\n13 cat:296\n14 cat:297\n15 cat:294\n16 cat:295\n17 cat:292\n18 cat:293\n19 cat:290\n20 cat:291\n21 cat:270\n22 cat:271\n23 cat:272\n24 cat:273\n25 cat:274\n26 cat:275\n27 cat:276\n28 cat:277\n29 cat:278\n30 cat:279\n31 cat:108\n32 cat:109\n33 cat:102\n34 cat:103\n35 cat:100\n36 cat:101\n37 cat:106\n38 cat:107\n39 cat:104\n40 cat:105\n41 cat:1372\n42 cat:1001\n43 cat:1210\n44 cat:1375\n45 cat:1655\n46 cat:1374\n47 cat:99\n48 cat:98\n49 cat:91\n50 cat:90\n51 cat:93\n52 cat:92\n53 cat:95\n54 cat:94\n55 cat:97\n56 cat:96\n57 cat:1623\n58 cat:1622\n59 cat:1621\n60 cat:1620\n61 cat:1627\n62 cat:1626\n63 cat:1625\n64 cat:1624\n65 cat:1629\n66 cat:1377\n67 cat:559\n68 cat:558\n69 cat:555\n70 cat:554\n71 cat:557\n72 cat:556\n73 cat:551\n74 cat:550\n75 cat:553\n76 cat:552\n77 cat:1439\n78 cat:1199\n79 cat:1198\n80 cat:1191\n81 cat:1190\n82 cat:1193\n83 cat:1192\n84 cat:1195\n85 cat:1194\n86 cat:1197\n87 cat:1196\n88 cat:1177\n89 cat:1176\n90 cat:1175\n91 cat:1174\n92 cat:1173\n93 cat:1172\n94 cat:1171\n95 cat:1170\n96 cat:1179\n97 cat:1178\n98 cat:511\n99 cat:510\n100 cat:513\n101 cat:1285\n102 cat:1284\n103 cat:1287\n104 cat:512\n105 cat:1281\n106 cat:1280\n107 cat:1283\n108 cat:1282\n109 cat:1003\n110 cat:879\n111 cat:1289\n112 cat:1288\n113 cat:514\n114 cat:1579\n115 cat:1578\n116 cat:689\n117 cat:688\n118 cat:685\n119 cat:684\n120 cat:687\n121 cat:686\n122 cat:681\n123 cat:680\n124 cat:683\n125 cat:682\n126 cat:458\n127 cat:1226\n128 cat:621\n129 cat:873\n130 cat:1224\n131 cat:1223\n132 cat:1222\n133 cat:1221\n134 cat:1220\n135 cat:407\n136 cat:406\n137 cat:405\n138 cat:404\n139 cat:403\n140 cat:402\n141 cat:401\n142 cat:400\n143 cat:1379\n144 cat:1378\n145 cat:1342\n146 cat:409\n147 cat:408\n148 cat:453\n149 cat:454\n150 cat:455\n151 cat:1346\n152 cat:1347\n153 cat:379\n154 cat:378\n155 cat:371\n156 cat:370\n157 cat:373\n158 cat:372\n159 cat:375\n160 cat:374\n161 cat:377\n162 cat:376\n163 cat:393\n164 cat:392\n165 cat:391\n166 cat:390\n167 cat:397\n168 cat:396\n169 cat:395\n170 cat:394\n171 cat:399\n172 cat:398\n173 cat:895\n174 cat:245\n175 cat:244\n176 cat:247\n177 cat:246\n178 cat:241\n179 cat:240\n180 cat:243\n181 cat:242\n182 cat:249\n183 cat:248\n184 cat:179\n185 cat:178\n186 cat:177\n187 cat:176\n188 cat:175\n189 cat:174\n190 cat:173\n191 cat:172\n192 cat:171\n193 cat:170\n194 cat:1502\n195 cat:1503\n196 cat:1500\n197 cat:1501\n198 cat:1506\n199 cat:1507\n200 cat:1367\n201 cat:1504\n202 cat:1505\n203 cat:1227\n204 cat:659\n205 cat:1618\n206 cat:1619\n207 cat:1616\n208 cat:1617\n209 cat:1614\n210 cat:1615\n211 cat:1612\n212 cat:1613\n213 cat:1610\n214 cat:1611\n215 cat:1142\n216 cat:1143\n217 cat:1140\n218 cat:1141\n219 cat:1146\n220 cat:1147\n221 cat:1144\n222 cat:1145\n223 cat:1148\n224 cat:1149\n225 cat:692\n226 cat:693\n227 cat:690\n228 cat:691\n229 cat:696\n230 cat:697\n231 cat:694\n232 cat:695\n233 cat:698\n234 cat:699\n235 cat:1548\n236 cat:1549\n237 cat:542\n238 cat:543\n239 cat:540\n240 cat:541\n241 cat:546\n242 cat:547\n243 cat:544\n244 cat:545\n245 cat:548\n246 cat:549\n247 cat:414\n248 cat:415\n249 cat:416\n250 cat:417\n251 cat:410\n252 cat:411\n253 cat:412\n254 cat:413\n255 cat:1384\n256 cat:1385\n257 cat:1386\n258 cat:1387\n259 cat:418\n260 cat:419\n261 cat:1382\n262 cat:1383\n263 cat:368\n264 cat:369\n265 cat:366\n266 cat:367\n267 cat:364\n268 cat:365\n269 cat:362\n270 cat:363\n271 cat:360\n272 cat:361\n273 cat:380\n274 cat:381\n275 cat:382\n276 cat:383\n277 cat:384\n278 cat:385\n279 cat:386\n280 cat:387\n281 cat:388\n282 cat:389\n283 cat:258\n284 cat:259\n285 cat:252\n286 cat:253\n287 cat:250\n288 cat:251\n289 cat:256\n290 cat:257\n291 cat:254\n292 cat:255\n293 cat:1679\n294 cat:168\n295 cat:169\n296 cat:164\n297 cat:165\n298 cat:166\n299 cat:167\n300 cat:160\n301 cat:161\n302 cat:162\n303 cat:163\n304 cat:678\n305 cat:679\n306 cat:670\n307 cat:671\n308 cat:1609\n309 cat:1608\n310 cat:1601\n311 cat:1600\n312 cat:1603\n313 cat:1602\n314 cat:1605\n315 cat:1604\n316 cat:1607\n317 cat:1606\n318 cat:809\n319 cat:808\n320 cat:803\n321 cat:802\n322 cat:801\n323 cat:800\n324 cat:807\n325 cat:806\n326 cat:805\n327 cat:804\n328 cat:608\n329 cat:1159\n330 cat:1158\n331 cat:1155\n332 cat:1154\n333 cat:1157\n334 cat:1156\n335 cat:1151\n336 cat:1150\n337 cat:1153\n338 cat:1152\n339 cat:1555\n340 cat:1554\n341 cat:1551\n342 cat:1550\n343 cat:1553\n344 cat:1552\n345 cat:59\n346 cat:58\n347 cat:1557\n348 cat:1556\n349 cat:55\n350 cat:54\n351 cat:57\n352 cat:56\n353 cat:51\n354 cat:50\n355 cat:53\n356 cat:52\n357 cat:537\n358 cat:536\n359 cat:535\n360 cat:534\n361 cat:533\n362 cat:532\n363 cat:531\n364 cat:530\n365 cat:539\n366 cat:538\n367 cat:1558\n368 cat:429\n369 cat:428\n370 cat:1399\n371 cat:1398\n372 cat:421\n373 cat:420\n374 cat:423\n375 cat:422\n376 cat:425\n377 cat:424\n378 cat:427\n379 cat:426\n380 cat:229\n381 cat:228\n382 cat:227\n383 cat:226\n384 cat:225\n385 cat:224\n386 cat:223\n387 cat:222\n388 cat:221\n389 cat:220\n390 cat:151\n391 cat:150\n392 cat:153\n393 cat:152\n394 cat:155\n395 cat:154\n396 cat:157\n397 cat:156\n398 cat:159\n399 cat:158\n400 cat:1524\n401 cat:1544\n402 cat:1525\n403 cat:1526\n404 cat:818\n405 cat:819\n406 cat:1527\n407 cat:810\n408 cat:811\n409 cat:812\n410 cat:813\n411 cat:814\n412 cat:815\n413 cat:816\n414 cat:817\n415 cat:1545\n416 cat:1522\n417 cat:1523\n418 cat:1490\n419 cat:1397\n420 cat:1492\n421 cat:1493\n422 cat:1494\n423 cat:1495\n424 cat:1496\n425 cat:1396\n426 cat:1498\n427 cat:1499\n428 cat:1395\n429 cat:1394\n430 cat:1393\n431 cat:1392\n432 cat:1391\n433 cat:1390\n434 cat:1128\n435 cat:1129\n436 cat:1628\n437 cat:1120\n438 cat:1121\n439 cat:1122\n440 cat:1123\n441 cat:1124\n442 cat:1125\n443 cat:1126\n444 cat:1127\n445 cat:524\n446 cat:525\n447 cat:526\n448 cat:527\n449 cat:520\n450 cat:521\n451 cat:522\n452 cat:523\n453 cat:1014\n454 cat:1015\n455 cat:1016\n456 cat:1017\n457 cat:528\n458 cat:529\n459 cat:1012\n460 cat:1013\n461 cat:1234\n462 cat:1235\n463 cat:1236\n464 cat:1237\n465 cat:1230\n466 cat:1231\n467 cat:1232\n468 cat:1233\n469 cat:1238\n470 cat:1239\n471 cat:438\n472 cat:439\n473 cat:436\n474 cat:437\n475 cat:434\n476 cat:435\n477 cat:432\n478 cat:433\n479 cat:430\n480 cat:431\n481 cat:238\n482 cat:239\n483 cat:234\n484 cat:235\n485 cat:236\n486 cat:237\n487 cat:230\n488 cat:231\n489 cat:232\n490 cat:233\n491 cat:1\n492 cat:146\n493 cat:147\n494 cat:144\n495 cat:145\n496 cat:142\n497 cat:143\n498 cat:140\n499 cat:141\n500 cat:148\n501 cat:149\n502 cat:939\n503 cat:938\n504 cat:933\n505 cat:932\n506 cat:931\n507 cat:930\n508 cat:937\n509 cat:936\n510 cat:935\n511 cat:934\n512 cat:829\n513 cat:828\n514 cat:825\n515 cat:824\n516 cat:827\n517 cat:826\n518 cat:821\n519 cat:820\n520 cat:823\n521 cat:822\n522 cat:1536\n523 cat:1483\n524 cat:1482\n525 cat:1481\n526 cat:1480\n527 cat:1487\n528 cat:1486\n529 cat:1485\n530 cat:1484\n531 cat:1489\n532 cat:1488\n533 cat:797\n534 cat:796\n535 cat:795\n536 cat:794\n537 cat:793\n538 cat:792\n539 cat:791\n540 cat:790\n541 cat:799\n542 cat:798\n543 cat:1270\n544 cat:1271\n545 cat:1272\n546 cat:1139\n547 cat:1138\n548 cat:1133\n549 cat:1132\n550 cat:1131\n551 cat:1130\n552 cat:1137\n553 cat:1136\n554 cat:1135\n555 cat:1134\n556 cat:1276\n557 cat:1277\n558 cat:519\n559 cat:518\n560 cat:1009\n561 cat:1008\n562 cat:1007\n563 cat:1006\n564 cat:1005\n565 cat:1004\n566 cat:515\n567 cat:1002\n568 cat:517\n569 cat:1000\n570 cat:623\n571 cat:622\n572 cat:1225\n573 cat:620\n574 cat:627\n575 cat:626\n576 cat:625\n577 cat:624\n578 cat:629\n579 cat:628\n580 cat:1229\n581 cat:1228\n582 cat:1535\n583 cat:2\n584 cat:1286\n585 cat:11\n586 cat:10\n587 cat:13\n588 cat:12\n589 cat:15\n590 cat:14\n591 cat:17\n592 cat:16\n593 cat:19\n594 cat:18\n595 cat:1534\n596 cat:201\n597 cat:200\n598 cat:203\n599 cat:202\n600 cat:205\n601 cat:204\n602 cat:207\n603 cat:206\n604 cat:209\n605 cat:208\n606 cat:1573\n607 cat:1572\n608 cat:1571\n609 cat:1570\n610 cat:1577\n611 cat:1576\n612 cat:1575\n613 cat:1574\n614 cat:928\n615 cat:929\n616 cat:920\n617 cat:921\n618 cat:922\n619 cat:923\n620 cat:924\n621 cat:925\n622 cat:926\n623 cat:927\n624 cat:832\n625 cat:833\n626 cat:830\n627 cat:831\n628 cat:836\n629 cat:837\n630 cat:834\n631 cat:835\n632 cat:838\n633 cat:839\n634 cat:3\n635 cat:1532\n636 cat:784\n637 cat:785\n638 cat:786\n639 cat:787\n640 cat:780\n641 cat:781\n642 cat:782\n643 cat:783\n644 cat:788\n645 cat:789\n646 cat:60\n647 cat:61\n648 cat:62\n649 cat:63\n650 cat:64\n651 cat:65\n652 cat:66\n653 cat:67\n654 cat:68\n655 cat:69\n656 cat:1371\n657 cat:1588\n658 cat:1589\n659 cat:1370\n660 cat:1582\n661 cat:1583\n662 cat:1580\n663 cat:1581\n664 cat:1586\n665 cat:1373\n666 cat:1584\n667 cat:1585\n668 cat:1038\n669 cat:1039\n670 cat:508\n671 cat:509\n672 cat:1032\n673 cat:507\n674 cat:1030\n675 cat:505\n676 cat:502\n677 cat:503\n678 cat:500\n679 cat:501\n680 cat:1212\n681 cat:1213\n682 cat:632\n683 cat:633\n684 cat:1216\n685 cat:1217\n686 cat:636\n687 cat:637\n688 cat:638\n689 cat:639\n690 cat:1218\n691 cat:1219\n692 cat:465\n693 cat:1106\n694 cat:1107\n695 cat:1104\n696 cat:1105\n697 cat:1102\n698 cat:1103\n699 cat:1100\n700 cat:1101\n701 cat:1458\n702 cat:1459\n703 cat:1108\n704 cat:1109\n705 cat:216\n706 cat:217\n707 cat:214\n708 cat:215\n709 cat:212\n710 cat:213\n711 cat:210\n712 cat:211\n713 cat:1530\n714 cat:218\n715 cat:219\n716 cat:4\n717 cat:919\n718 cat:918\n719 cat:915\n720 cat:914\n721 cat:917\n722 cat:916\n723 cat:911\n724 cat:910\n725 cat:913\n726 cat:912\n727 cat:847\n728 cat:846\n729 cat:845\n730 cat:844\n731 cat:843\n732 cat:842\n733 cat:841\n734 cat:840\n735 cat:849\n736 cat:848\n737 cat:663\n738 cat:1587\n739 cat:662\n740 cat:753\n741 cat:752\n742 cat:751\n743 cat:750\n744 cat:757\n745 cat:756\n746 cat:755\n747 cat:754\n748 cat:759\n749 cat:758\n750 cat:1595\n751 cat:506\n752 cat:1597\n753 cat:1596\n754 cat:1591\n755 cat:1590\n756 cat:1593\n757 cat:1033\n758 cat:1599\n759 cat:504\n760 cat:1025\n761 cat:1024\n762 cat:1027\n763 cat:1031\n764 cat:1021\n765 cat:1020\n766 cat:1023\n767 cat:1022\n768 cat:1036\n769 cat:1029\n770 cat:1028\n771 cat:1037\n772 cat:1034\n773 cat:1035\n774 cat:605\n775 cat:604\n776 cat:607\n777 cat:606\n778 cat:601\n779 cat:600\n780 cat:603\n781 cat:602\n782 cat:1205\n783 cat:1204\n784 cat:1207\n785 cat:1206\n786 cat:609\n787 cat:1200\n788 cat:1203\n789 cat:1202\n790 cat:1211\n791 cat:634\n792 cat:635\n793 cat:1214\n794 cat:1215\n795 cat:1111\n796 cat:1110\n797 cat:1113\n798 cat:1112\n799 cat:1115\n800 cat:1114\n801 cat:1117\n802 cat:464\n803 cat:1119\n804 cat:1118\n805 cat:467\n806 cat:1449\n807 cat:1448\n808 cat:466\n809 cat:1357\n810 cat:460\n811 cat:1355\n812 cat:489\n813 cat:488\n814 cat:487\n815 cat:486\n816 cat:485\n817 cat:1354\n818 cat:483\n819 cat:482\n820 cat:481\n821 cat:480\n822 cat:199\n823 cat:198\n824 cat:195\n825 cat:194\n826 cat:197\n827 cat:196\n828 cat:191\n829 cat:190\n830 cat:193\n831 cat:192\n832 cat:1454\n833 cat:1455\n834 cat:1456\n835 cat:1457\n836 cat:1450\n837 cat:1451\n838 cat:1452\n839 cat:1453\n840 cat:902\n841 cat:903\n842 cat:900\n843 cat:901\n844 cat:906\n845 cat:907\n846 cat:904\n847 cat:905\n848 cat:1511\n849 cat:908\n850 cat:909\n851 cat:854\n852 cat:855\n853 cat:856\n854 cat:857\n855 cat:850\n856 cat:851\n857 cat:852\n858 cat:853\n859 cat:858\n860 cat:859\n861 cat:6\n862 cat:740\n863 cat:741\n864 cat:742\n865 cat:743\n866 cat:744\n867 cat:745\n868 cat:746\n869 cat:747\n870 cat:748\n871 cat:749\n872 cat:1050\n873 cat:1051\n874 cat:1052\n875 cat:1053\n876 cat:1054\n877 cat:1055\n878 cat:1056\n879 cat:1057\n880 cat:1058\n881 cat:1059\n882 cat:1278\n883 cat:1279\n884 cat:618\n885 cat:619\n886 cat:612\n887 cat:613\n888 cat:610\n889 cat:611\n890 cat:616\n891 cat:617\n892 cat:614\n893 cat:615\n894 cat:1491\n895 cat:1472\n896 cat:1473\n897 cat:1470\n898 cat:1471\n899 cat:1476\n900 cat:1477\n901 cat:1474\n902 cat:1475\n903 cat:1478\n904 cat:1479\n905 cat:1304\n906 cat:1305\n907 cat:1306\n908 cat:1307\n909 cat:1300\n910 cat:1301\n911 cat:1302\n912 cat:1303\n913 cat:1497\n914 cat:1308\n915 cat:1309\n916 cat:498\n917 cat:499\n918 cat:494\n919 cat:495\n920 cat:496\n921 cat:497\n922 cat:490\n923 cat:491\n924 cat:492\n925 cat:493\n926 cat:24\n927 cat:25\n928 cat:26\n929 cat:27\n930 cat:20\n931 cat:21\n932 cat:22\n933 cat:23\n934 cat:28\n935 cat:29\n936 cat:7\n937 cat:972\n938 cat:1087\n939 cat:1086\n940 cat:1085\n941 cat:1084\n942 cat:1083\n943 cat:1082\n944 cat:977\n945 cat:976\n946 cat:975\n947 cat:974\n948 cat:973\n949 cat:1081\n950 cat:971\n951 cat:970\n952 cat:1080\n953 cat:979\n954 cat:978\n955 cat:182\n956 cat:183\n957 cat:180\n958 cat:181\n959 cat:186\n960 cat:187\n961 cat:184\n962 cat:185\n963 cat:188\n964 cat:189\n965 cat:1559\n966 cat:1464\n967 cat:869\n968 cat:868\n969 cat:861\n970 cat:860\n971 cat:863\n972 cat:862\n973 cat:865\n974 cat:864\n975 cat:867\n976 cat:866\n977 cat:883\n978 cat:882\n979 cat:881\n980 cat:880\n981 cat:887\n982 cat:886\n983 cat:885\n984 cat:884\n985 cat:889\n986 cat:888\n987 cat:775\n988 cat:774\n989 cat:777\n990 cat:776\n991 cat:771\n992 cat:770\n993 cat:773\n994 cat:772\n995 cat:779\n996 cat:778\n997 cat:77\n998 cat:76\n999 cat:75\n1000 cat:74\n1001 cat:73\n1002 cat:72\n1003 cat:71\n1004 cat:70\n1005 cat:79\n1006 cat:78\n1007 cat:1043\n1008 cat:1042\n1009 cat:1041\n1010 cat:1040\n1011 cat:1047\n1012 cat:1046\n1013 cat:1045\n1014 cat:1044\n1015 cat:1049\n1016 cat:1048\n1017 cat:1681\n1018 cat:1680\n1019 cat:1682\n1020 cat:1269\n1021 cat:1268\n1022 cat:669\n1023 cat:668\n1024 cat:667\n1025 cat:1262\n1026 cat:665\n1027 cat:664\n1028 cat:1267\n1029 cat:1266\n1030 cat:1265\n1031 cat:1264\n1032 cat:1469\n1033 cat:1468\n1034 cat:1465\n1035 cat:1018\n1036 cat:1467\n1037 cat:1466\n1038 cat:1461\n1039 cat:1460\n1040 cat:1463\n1041 cat:1019\n1042 cat:1317\n1043 cat:1316\n1044 cat:1315\n1045 cat:1314\n1046 cat:1313\n1047 cat:1312\n1048 cat:1311\n1049 cat:1310\n1050 cat:1319\n1051 cat:1318\n1052 cat:1010\n1053 cat:1011\n1054 cat:319\n1055 cat:318\n1056 cat:313\n1057 cat:312\n1058 cat:311\n1059 cat:310\n1060 cat:317\n1061 cat:316\n1062 cat:315\n1063 cat:314\n1064 cat:1335\n1065 cat:1334\n1066 cat:1337\n1067 cat:1336\n1068 cat:1331\n1069 cat:1330\n1070 cat:1333\n1071 cat:1332\n1072 cat:630\n1073 cat:631\n1074 cat:1521\n1075 cat:964\n1076 cat:965\n1077 cat:966\n1078 cat:967\n1079 cat:960\n1080 cat:961\n1081 cat:962\n1082 cat:963\n1083 cat:968\n1084 cat:969\n1085 cat:1560\n1086 cat:1241\n1087 cat:878\n1088 cat:1240\n1089 cat:876\n1090 cat:877\n1091 cat:874\n1092 cat:875\n1093 cat:872\n1094 cat:1243\n1095 cat:870\n1096 cat:871\n1097 cat:1242\n1098 cat:9\n1099 cat:1245\n1100 cat:1244\n1101 cat:890\n1102 cat:891\n1103 cat:892\n1104 cat:893\n1105 cat:894\n1106 cat:1247\n1107 cat:896\n1108 cat:897\n1109 cat:898\n1110 cat:899\n1111 cat:646\n1112 cat:1249\n1113 cat:648\n1114 cat:1537\n1115 cat:768\n1116 cat:769\n1117 cat:762\n1118 cat:763\n1119 cat:760\n1120 cat:761\n1121 cat:766\n1122 cat:767\n1123 cat:764\n1124 cat:765\n1125 cat:1078\n1126 cat:1079\n1127 cat:1076\n1128 cat:1077\n1129 cat:1074\n1130 cat:1075\n1131 cat:1072\n1132 cat:1073\n1133 cat:1070\n1134 cat:1071\n1135 cat:1678\n1136 cat:1561\n1137 cat:1674\n1138 cat:1675\n1139 cat:1676\n1140 cat:1677\n1141 cat:1670\n1142 cat:1671\n1143 cat:1672\n1144 cat:1673\n1145 cat:1094\n1146 cat:1095\n1147 cat:1096\n1148 cat:1097\n1149 cat:1090\n1150 cat:1091\n1151 cat:1092\n1152 cat:1093\n1153 cat:674\n1154 cat:675\n1155 cat:676\n1156 cat:677\n1157 cat:1098\n1158 cat:1099\n1159 cat:672\n1160 cat:673\n1161 cat:1533\n1162 cat:1418\n1163 cat:1419\n1164 cat:1410\n1165 cat:1411\n1166 cat:1412\n1167 cat:1413\n1168 cat:1414\n1169 cat:1415\n1170 cat:1416\n1171 cat:1417\n1172 cat:1322\n1173 cat:1323\n1174 cat:1320\n1175 cat:1321\n1176 cat:1326\n1177 cat:1327\n1178 cat:1324\n1179 cat:1325\n1180 cat:1328\n1181 cat:1329\n1182 cat:5\n1183 cat:1531\n1184 cat:1256\n1185 cat:1257\n1186 cat:1254\n1187 cat:1255\n1188 cat:1252\n1189 cat:1253\n1190 cat:1250\n1191 cat:1251\n1192 cat:1528\n1193 cat:1529\n1194 cat:1258\n1195 cat:1259\n1196 cat:308\n1197 cat:309\n1198 cat:300\n1199 cat:301\n1200 cat:302\n1201 cat:303\n1202 cat:304\n1203 cat:305\n1204 cat:306\n1205 cat:307\n1206 cat:470\n1207 cat:471\n1208 cat:1443\n1209 cat:476\n1210 cat:959\n1211 cat:958\n1212 cat:951\n1213 cat:950\n1214 cat:953\n1215 cat:952\n1216 cat:955\n1217 cat:954\n1218 cat:957\n1219 cat:956\n1220 cat:1442\n1221 cat:477\n1222 cat:1440\n1223 cat:1263\n1224 cat:666\n1225 cat:1261\n1226 cat:1260\n1227 cat:719\n1228 cat:718\n1229 cat:717\n1230 cat:716\n1231 cat:715\n1232 cat:714\n1233 cat:713\n1234 cat:712\n1235 cat:711\n1236 cat:710\n1237 cat:661\n1238 cat:660\n1239 cat:1069\n1240 cat:1068\n1241 cat:1061\n1242 cat:1060\n1243 cat:1063\n1244 cat:1062\n1245 cat:1065\n1246 cat:1064\n1247 cat:1067\n1248 cat:1066\n1249 cat:1669\n1250 cat:1668\n1251 cat:1667\n1252 cat:1666\n1253 cat:1665\n1254 cat:1664\n1255 cat:1663\n1256 cat:1662\n1257 cat:1661\n1258 cat:1660\n1259 cat:591\n1260 cat:590\n1261 cat:593\n1262 cat:592\n1263 cat:595\n1264 cat:594\n1265 cat:597\n1266 cat:596\n1267 cat:599\n1268 cat:598\n1269 cat:1089\n1270 cat:1088\n1271 cat:1409\n1272 cat:1408\n1273 cat:1403\n1274 cat:1402\n1275 cat:1401\n1276 cat:1400\n1277 cat:1407\n1278 cat:1406\n1279 cat:1405\n1280 cat:1404\n1281 cat:1546\n1282 cat:449\n1283 cat:448\n1284 cat:1339\n1285 cat:1338\n1286 cat:1547\n1287 cat:443\n1288 cat:442\n1289 cat:441\n1290 cat:440\n1291 cat:447\n1292 cat:446\n1293 cat:445\n1294 cat:444\n1295 cat:1520\n1296 cat:1542\n1297 cat:1543\n1298 cat:39\n1299 cat:38\n1300 cat:1540\n1301 cat:33\n1302 cat:32\n1303 cat:31\n1304 cat:30\n1305 cat:37\n1306 cat:36\n1307 cat:35\n1308 cat:34\n1309 cat:641\n1310 cat:640\n1311 cat:643\n1312 cat:642\n1313 cat:645\n1314 cat:644\n1315 cat:647\n1316 cat:1246\n1317 cat:649\n1318 cat:1248\n1319 cat:1539\n1320 cat:1538\n1321 cat:339\n1322 cat:338\n1323 cat:335\n1324 cat:334\n1325 cat:337\n1326 cat:336\n1327 cat:331\n1328 cat:330\n1329 cat:333\n1330 cat:332\n1331 cat:1026\n1332 cat:8\n1333 cat:1462\n1334 cat:948\n1335 cat:949\n1336 cat:946\n1337 cat:947\n1338 cat:944\n1339 cat:945\n1340 cat:942\n1341 cat:943\n1342 cat:940\n1343 cat:941\n1344 cat:133\n1345 cat:132\n1346 cat:131\n1347 cat:130\n1348 cat:137\n1349 cat:136\n1350 cat:135\n1351 cat:134\n1352 cat:139\n1353 cat:138\n1354 cat:708\n1355 cat:709\n1356 cat:704\n1357 cat:705\n1358 cat:706\n1359 cat:707\n1360 cat:700\n1361 cat:701\n1362 cat:702\n1363 cat:703\n1364 cat:88\n1365 cat:89\n1366 cat:82\n1367 cat:83\n1368 cat:80\n1369 cat:81\n1370 cat:86\n1371 cat:87\n1372 cat:84\n1373 cat:85\n1374 cat:1658\n1375 cat:1659\n1376 cat:1652\n1377 cat:1388\n1378 cat:1650\n1379 cat:1651\n1380 cat:1656\n1381 cat:1657\n1382 cat:1654\n1383 cat:1389\n1384 cat:586\n1385 cat:587\n1386 cat:584\n1387 cat:585\n1388 cat:582\n1389 cat:583\n1390 cat:580\n1391 cat:581\n1392 cat:588\n1393 cat:589\n1394 cat:1633\n1395 cat:1436\n1396 cat:1437\n1397 cat:1434\n1398 cat:1435\n1399 cat:1432\n1400 cat:1433\n1401 cat:1430\n1402 cat:1431\n1403 cat:1380\n1404 cat:1438\n1405 cat:1381\n1406 cat:1349\n1407 cat:1541\n1408 cat:1348\n1409 cat:459\n1410 cat:450\n1411 cat:451\n1412 cat:452\n1413 cat:1343\n1414 cat:1344\n1415 cat:1345\n1416 cat:456\n1417 cat:457\n1418 cat:656\n" ] ], [ [ "## Just a print to identify which cat correspond to which area and morpho type \n\nfor count, cat in enumerate(listofregion):\n condition=\"cat=\"+cat\n typemorpho=(grass.read_command('v.db.select', map=tile_layer, \n columns=type_column, where=condition,flags=\"c\"))\n area=(grass.read_command('v.db.select', map=tile_layer, \n columns=area_column, where=condition,flags=\"c\"))\n print str(count)+\" cat:\"+str(cat)+\" morpho_type:\"+str(typemorpho)+\" area:\"+str(area)", "_____no_output_____" ] ], [ [ "## Initialize a empty string for saving print outputs\ntxtcontent=\"\"\n\n## Running i.segment.stats\nmessagetoprint=\"Start computing statistics for segments to be classified, using i.segment.stats on \"+time.ctime()+\"\\n\"\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\nbegintime_isegmentstats=time.time()\n\n## Compute total area to be processed for process progression information\nprocessed_area=0\nnbrtile=len(listofregion)\nattributes=grass.parse_command('db.univar', flags='g', table=tile_layer.split(\"@\")[0], column=area_column, driver='sqlite')\ntotal_area=float(attributes['sum'])\n\nmessagetoprint=str(nbrtile)+\" region(s) will be processed, covering an area of \"+str(round(total_area,3))+\" Sqkm.\"+\"\\n\\n\"\nprint (messagetoprint)\ntxtcontent+=messagetoprint\n\n## Save time before looping\nbegintime_isegmentstats=time.time()\n\n## Start loop on morphological zones\ncount=1\nfor cat in listofregion[:]:\n ## Save current time at loop' start. \n begintime_current_id=time.time()\n \n ## Create a computional region for the current polygon\n condition=\"cat=\"+cat\n outputname=\"tmp_\"+cat\n grass.run_command('v.extract', overwrite=True, quiet=True, \n input=tile_layer, type='area', where=condition, output=outputname)\n grass.run_command('g.region', overwrite=True, vector=outputname, align=segment_layer)\n grass.run_command('r.mask', overwrite=True, raster=tile_layer, maskcats=cat)\n grass.run_command('g.remove', quiet=True, type=\"vector\", name=outputname, flags=\"f\")\n\n ## Save size of the current polygon and add it to the already processed area \n size=round(float(grass.read_command('v.db.select', map=tile_layer, \n columns=area_column, where=condition,flags=\"c\")),2)\n \n ## Print\n messagetoprint=\"Computing segments's statistics for tile n°\"+str(cat)\n messagetoprint+=\" (\"+str(count)+\"/\"+str(len(listofregion))+\")\"\n messagetoprint+=\" corresponding to \"+str(size)+\" km2\"\n print (messagetoprint)\n txtcontent+=messagetoprint+\"\\n\"\n \n ## Define the csv output file name, according to the optimization function selected\n outputcsv=os.path.join(outputfolder,prefix+\"_\"+str(cat)+\".csv\")\n \n ## Compute statistics of objets using i.segment.stats only with .csv output (no vectormap output).\n grass.run_command('i.segment.stats', overwrite=True, map=segment_layer, \n rasters=','.join(inputstats), raster_statistics=','.join(rasterstats), \n area_measures=','.join(areameasures), csvfile=outputcsv, processes='20')\n\n ## Add the size of the zone to the already processed area\n processed_area+=size\n \n ## Print\n messagetoprint=print_processing_time(begintime_current_id, \n \"i.segment.stats finishes to process th current tile in \")\n print (messagetoprint)\n txtcontent+=messagetoprint+\"\\n\"\n remainingtile=nbrtile-count\n if remainingtile>0:\n messagetoprint=str(round((processed_area/total_area)*100,2))+\" percent of the total area processed. \"\n messagetoprint+=\"Still \"+str(remainingtile)+\" zone(s) to process.\"+\"\\n\"\n print (messagetoprint)\n txtcontent+=messagetoprint+\"\\n\"\n else:\n messagetoprint=\"\\n\"\n print (messagetoprint)\n txtcontent+=messagetoprint\n \n ## Adapt the count \n count+=1\n \n## Remove current mask\ngrass.run_command('r.mask', flags='r')\n\n## Compute processing time and print it\nmessagetoprint=print_processing_time(begintime_isegmentstats, \"Statitics computed in \")\nprint (messagetoprint) \ntxtcontent+=messagetoprint\n \n#### Write text file with log of processing time\n## Create the .txt file for processing time output and begin to write\nf = open(os.path.join(outputfolder,mapsetname+\"_processingtime_isegmentstats.txt\"), 'w')\nf.write(mapsetname+\" processing time information for i.segment.stats\"+\"\\n\\n\")\nf.write(txtcontent)\nf.close()", "Start computing statistics for segments to be classified, using i.segment.stats on Thu Nov 23 17:53:37 2017\n\n1682 region(s) will be processed, covering an area of 615.474 Sqkm.\n\n\nComputing segments's statistics for tile n°344 (1/1682) corresponding to 0.07 km2\ni.segment.stats finishes to process th current tile in 16.5 seconds\n0.01 percent of the total area processed. Still 1681 zone(s) to process.\n\nComputing segments's statistics for tile n°345 (2/1682) corresponding to 0.02 km2\ni.segment.stats finishes to process th current tile in 10.1 seconds\n0.01 percent of the total area processed. Still 1680 zone(s) to process.\n\nComputing segments's statistics for tile n°346 (3/1682) corresponding to 0.19 km2\ni.segment.stats finishes to process th current tile in 16.8 seconds\n0.05 percent of the total area processed. Still 1679 zone(s) to process.\n\nComputing segments's statistics for tile n°347 (4/1682) corresponding to 0.11 km2\ni.segment.stats finishes to process th current tile in 12.1 seconds\n0.06 percent of the total area processed. Still 1678 zone(s) to process.\n\nComputing segments's statistics for tile n°340 (5/1682) corresponding to 0.04 km2\ni.segment.stats finishes to process th current tile in 13.0 seconds\n0.07 percent of the total area processed. Still 1677 zone(s) to process.\n\nComputing segments's statistics for tile n°341 (6/1682) corresponding to 0.03 km2\ni.segment.stats finishes to process th current tile in 10.7 seconds\n0.07 percent of the total area processed. Still 1676 zone(s) to process.\n\nComputing segments's statistics for tile n°342 (7/1682) corresponding to 0.05 km2\ni.segment.stats finishes to process th current tile in 10.0 seconds\n0.08 percent of the total area processed. Still 1675 zone(s) to process.\n\nComputing segments's statistics for tile n°343 (8/1682) corresponding to 0.02 km2\ni.segment.stats finishes to process th current tile in 10.3 seconds\n0.09 percent of the total area processed. Still 1674 zone(s) to process.\n\nComputing segments's statistics for tile n°348 (9/1682) corresponding to 0.03 km2\ni.segment.stats finishes to process th current tile in 10.5 seconds\n0.09 percent of the total area processed. Still 1673 zone(s) to process.\n\nComputing segments's statistics for tile n°349 (10/1682) corresponding to 0.08 km2\ni.segment.stats finishes to process th current tile in 9.6 seconds\n0.1 percent of the total area processed. Still 1672 zone(s) to process.\n\nComputing segments's statistics for tile n°1653 (11/1682) corresponding to 0.02 km2\ni.segment.stats finishes to process th current tile in 8.0 seconds\n0.11 percent of the total area processed. Still 1671 zone(s) to process.\n\nComputing segments's statistics for tile n°298 (12/1682) corresponding to 0.02 km2\ni.segment.stats finishes to process th current tile in 11.5 seconds\n0.11 percent of the total area processed. Still 1670 zone(s) to process.\n\nComputing segments's statistics for tile n°299 (13/1682) corresponding to 0.03 km2\ni.segment.stats finishes to process th current tile in 13.5 seconds\n0.12 percent of the total area processed. Still 1669 zone(s) to process.\n\nComputing segments's statistics for tile n°296 (14/1682) corresponding to 0.03 km2\ni.segment.stats finishes to process th current tile in 9.9 seconds\n0.12 percent of the total area processed. Still 1668 zone(s) to process.\n\nComputing segments's statistics for tile n°297 (15/1682) corresponding to 0.04 km2\ni.segment.stats finishes to process th current tile in 12.3 seconds\n0.13 percent of the total area processed. Still 1667 zone(s) to process.\n\nComputing segments's statistics for tile n°294 (16/1682) corresponding to 0.03 km2\ni.segment.stats finishes to process th current tile in 11.9 seconds\n0.13 percent of the total area processed. Still 1666 zone(s) to process.\n\nComputing segments's statistics for tile n°295 (17/1682) corresponding to 1.66 km2\ni.segment.stats finishes to process th current tile in 44.5 seconds\n0.4 percent of the total area processed. Still 1665 zone(s) to process.\n\nComputing segments's statistics for tile n°292 (18/1682) corresponding to 0.34 km2\ni.segment.stats finishes to process th current tile in 19.7 seconds\n0.46 percent of the total area processed. Still 1664 zone(s) to process.\n\nComputing segments's statistics for tile n°293 (19/1682) corresponding to 0.26 km2\ni.segment.stats finishes to process th current tile in 14.7 seconds\n0.5 percent of the total area processed. Still 1663 zone(s) to process.\n\nComputing segments's statistics for tile n°290 (20/1682) corresponding to 0.02 km2\ni.segment.stats finishes to process th current tile in 9.0 seconds\n0.5 percent of the total area processed. Still 1662 zone(s) to process.\n\nComputing segments's statistics for tile n°291 (21/1682) corresponding to 0.02 km2\ni.segment.stats finishes to process th current tile in 9.9 seconds\n0.51 percent of the total area processed. Still 1661 zone(s) to process.\n\nComputing segments's statistics for tile n°270 (22/1682) corresponding to 0.04 km2\ni.segment.stats finishes to process th current tile in 10.8 seconds\n0.51 percent of the total area processed. Still 1660 zone(s) to process.\n\nComputing segments's statistics for tile n°271 (23/1682) corresponding to 0.06 km2\ni.segment.stats finishes to process th current tile in 9.0 seconds\n0.52 percent of the total area processed. Still 1659 zone(s) to process.\n\nComputing segments's statistics for tile n°272 (24/1682) corresponding to 0.08 km2\ni.segment.stats finishes to process th current tile in 12.8 seconds\n0.53 percent of the total area processed. Still 1658 zone(s) to process.\n\nComputing segments's statistics for tile n°273 (25/1682) corresponding to 0.14 km2\ni.segment.stats finishes to process th current tile in 14.6 seconds\n0.56 percent of the total area processed. Still 1657 zone(s) to process.\n\nComputing segments's statistics for tile n°274 (26/1682) corresponding to 0.71 km2\ni.segment.stats finishes to process th current tile in 17.7 seconds\n0.67 percent of the total area processed. Still 1656 zone(s) to process.\n\nComputing segments's statistics for tile n°275 (27/1682) corresponding to 0.03 km2\ni.segment.stats finishes to process th current tile in 12.2 seconds\n0.68 percent of the total area processed. Still 1655 zone(s) to process.\n\nComputing segments's statistics for tile n°276 (28/1682) corresponding to 0.14 km2\ni.segment.stats finishes to process th current tile in 11.7 seconds\n0.7 percent of the total area processed. Still 1654 zone(s) to process.\n\nComputing segments's statistics for tile n°277 (29/1682) corresponding to 0.05 km2\ni.segment.stats finishes to process th current tile in 9.9 seconds\n0.71 percent of the total area processed. Still 1653 zone(s) to process.\n\nComputing segments's statistics for tile n°278 (30/1682) corresponding to 0.26 km2\ni.segment.stats finishes to process th current tile in 14.8 seconds\n0.75 percent of the total area processed. Still 1652 zone(s) to process.\n\nComputing segments's statistics for tile n°279 (31/1682) corresponding to 0.02 km2\ni.segment.stats finishes to process th current tile in 9.9 seconds\n0.75 percent of the total area processed. Still 1651 zone(s) to process.\n\nComputing segments's statistics for tile n°108 (32/1682) corresponding to 0.09 km2\ni.segment.stats finishes to process th current tile in 14.7 seconds\n0.77 percent of the total area processed. Still 1650 zone(s) to process.\n\nComputing segments's statistics for tile n°109 (33/1682) corresponding to 0.04 km2\ni.segment.stats finishes to process th current tile in 9.2 seconds\n0.78 percent of the total area processed. Still 1649 zone(s) to process.\n\nComputing segments's statistics for tile n°102 (34/1682) corresponding to 0.13 km2\ni.segment.stats finishes to process th current tile in 11.5 seconds\n0.8 percent of the total area processed. Still 1648 zone(s) to process.\n\nComputing segments's statistics for tile n°103 (35/1682) corresponding to 0.27 km2\ni.segment.stats finishes to process th current tile in 25.6 seconds\n0.84 percent of the total area processed. Still 1647 zone(s) to process.\n\nComputing segments's statistics for tile n°100 (36/1682) corresponding to 0.13 km2\ni.segment.stats finishes to process th current tile in 12.5 seconds\n0.86 percent of the total area processed. Still 1646 zone(s) to process.\n\n" ], [ "## print\nprint_processing_time(begintime_computeobjstat,\"Object statistics computed in \")", "_____no_output_____" ] ], [ [ "## Concatenate individuals .csv files and replace unwanted values", "_____no_output_____" ], [ "BE CAREFUL! Before runing the following cells, please check your data to be sure that it makes sens to replace the 'nan', 'null', or 'inf' values with \"0\"", "_____no_output_____" ] ], [ [ "## Define the outputfile for .csv containing statistics for all segments\noutputfile=os.path.join(outputfolder,\"all_segments_stats.csv\")\nprint outputfile", "/media/tais/My_Book_1/MAUPP/Traitement/Ouagadougou/Segmentation_fullAOI_localapproach/Results/CLASSIF/stats_optical/all_segments_stats.csv\n" ], [ "# Create a dictionary with 'key' to be replaced by 'values' \nfindreplacedict={}\nfindreplacedict['nan']=\"0\"\nfindreplacedict['null']=\"0\"\nfindreplacedict['inf']=\"0\"\n\n# Define pattern of file to concatenate\npat=prefix+\"_*.csv\"\nsep=\"|\"", "_____no_output_____" ], [ "## Initialize a empty string for saving print outputs\ntxtcontent=\"\"\n\n## Saving current time for processing time management\nbegintime_concat=time.time()\n\n## Print\nmessagetoprint=\"Start concatenate individual .csv files and replacing unwanted values.\"\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n# Concatenate and replace unwanted values\nmessagetoprint=concat_findreplace(outputfolder,pat,sep,findreplacedict,outputfile)\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n## Compute processing time and print it\nmessagetoprint=print_processing_time(begintime_concat, \"Process achieved in \")\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n#### Write text file with log of processing time\n## Create the .txt file for processing time output and begin to write\nfilepath=os.path.join(outputfolder,mapsetname+\"_processingtime_concatreplace.txt\")\nf = open(filepath, 'w')\nf.write(mapsetname+\" processing time information for concatenation of individual .csv files and replacing of unwanted values.\"+\"\\n\\n\")\nf.write(txtcontent)\nf.close()", "Start concatenate individual .csv files and replacing unwanted values.\nGoing to concatenate 1682 .csv files together and replace unwanted values.Nothing changed. No unwanted values found \nProcess achieved in 11 minutes and 12.3 seconds\n" ] ], [ [ "# Create new database in postgresql", "_____no_output_____" ] ], [ [ "# User for postgresql connexion\ndbuser=\"tais\"\n# Password of user\ndbpassword=\"tais\"\n# Host of database\nhost=\"localhost\"\n# Name of the new database\ndbname=\"ouaga_fullaoi_localsegment\"\n# Set name of schema for objects statistics\nstat_schema=\"statistics\"\n# Set name of table with statistics of segments - FOR OPTICAL\nobject_stats_table=\"object_stats_optical\"", "_____no_output_____" ], [ "break\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\n# Connect to postgres database\ndb=None\ndb=pg.connect(dbname='postgres', user=dbuser, password=dbpassword, host=host)\n\n# Allow to create a new database\ndb.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n\n# Execute the CREATE DATABASE query\ncur=db.cursor()\n#cur.execute('DROP DATABASE IF EXISTS ' + dbname) #Comment this to avoid deleting existing DB\ncur.execute('CREATE DATABASE ' + dbname)\ncur.close()\ndb.close()", "_____no_output_____" ] ], [ [ "### Create PostGIS Extension in the database", "_____no_output_____" ] ], [ [ "break\n# Connect to the database\ndb=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host)\n# Open a cursor to perform database operations\ncur=db.cursor()\n# Execute the query\ncur.execute('CREATE EXTENSION IF NOT EXISTS postgis')\n# Make the changes to the database persistent\ndb.commit()\n# Close connection with database\ncur.close()\ndb.close()", "_____no_output_____" ] ], [ [ "<center> <font size=4> <h2>Import statistics of segments in a Postgresql database</h2> </font> </center> ", "_____no_output_____" ], [ "## Create new schema in the postgresql database ", "_____no_output_____" ] ], [ [ "schema=stat_schema", "_____no_output_____" ], [ "from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\n# Connect to postgres database\ndb=None\ndb=pg.connect(dbname=dbname, user='tais', password='tais', host='localhost')\n\n# Allow to create a new database\ndb.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n\n# Execute the CREATE DATABASE query\ncur=db.cursor()\n#cur.execute('DROP SCHEMA IF EXISTS '+schema+' CASCADE') #Comment this to avoid deleting existing DB\ntry:\n cur.execute('CREATE SCHEMA '+schema)\nexcept Exception as e:\n print (\"Exception occured : \"+str(e))\ncur.close()\ndb.close()", "Exception occured : ERREUR: le schéma « statistics » existe déjà\n\n" ] ], [ [ "## Create a new table", "_____no_output_____" ] ], [ [ "# Connect to an existing database\ndb=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host)\n# Open a cursor to perform database operations\ncur=db.cursor()", "_____no_output_____" ], [ "# Drop table if exists:\ncur.execute(\"DROP TABLE IF EXISTS \"+schema+\".\"+object_stats_table)\n# Make the changes to the database persistent\ndb.commit()", "_____no_output_____" ], [ "import csv\n\n# Create a empty list for saving of column name\ncolumn_name=[]\n\n# Create a reader for the first csv file in the stack of csv to be imported\npathtofile=os.path.join(outputfolder, outputfile)\nreadercsvSubset=open(pathtofile)\nreadercsv=csv.reader(readercsvSubset, delimiter='|') \nheaderline=readercsv.next()\nprint \"Create a new table '\"+schema+\".\"+object_stats_table+\"' with header corresponding to the first row of file '\"+pathtofile+\"'\"\n\n## Build a query for creation of a new table with auto-incremental key-value (thus avoiding potential duplicates of 'cat' value)\n# All column data-types are set to 'text' in order to be able to import some 'nan', 'inf' or 'null' values present in statistics files\n# This table will allow to import all individual csv files in a single Postgres table, which will be cleaned after\nquery=\"CREATE TABLE \"+schema+\".\"+object_stats_table+\" (\"\nquery+=\"key_value serial PRIMARY KEY\"\nquery+=\", \"+str(headerline[0])+\" text\"\ncolumn_name.append(str(headerline[0]))\nfor column in headerline[1:]:\n if column[0] in ('1','2','3','4','5','6','7','8','9','0'):\n query+=\",\"\n query+=\" \"+\"W\"+str(column)+\" double precision\"\n column_name.append(\"W\"+str(column))\n else:\n query+=\",\"\n query+=\" \"+str(column)+\" double precision\"\n column_name.append(str(column))\nquery+=\")\"\n\n# Execute the CREATE TABLE query \ncur.execute(query)\n# Make the changes to the database persistent\ndb.commit()\n\n# Close cursor and communication with the database\ncur.close()\ndb.close()", "Create a new table 'statistics.object_stats_optical' with header corresponding to the first row of file '/media/tais/My_Book_1/MAUPP/Traitement/Ouagadougou/Segmentation_fullAOI_localapproach/Results/CLASSIF/stats_optical/all_segments_stats.csv'\n" ] ], [ [ "## Copy objects statistics from csv to Postgresql database", "_____no_output_____" ] ], [ [ "# Connect to an existing database\ndb=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host)\n# Open a cursor to perform database operations\ncur=db.cursor()", "_____no_output_____" ], [ "## Initialize a empty string for saving print outputs\ntxtcontent=\"\"\n## Saving current time for processing time management\nbegintime_copy=time.time()\n## Print\nmessagetoprint=\"Start copy of segments' statistics in the postgresql table '\"+schema+\".\"+object_stats_table+\"'\"\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n# Create query for copy data from csv, avoiding the header, and updating only the column which are in the csv (to allow auto-incremental key value to wokr)\nquery=\"COPY \"+schema+\".\"+object_stats_table+\"(\"+', '.join(column_name)+\") \" \nquery+=\" FROM '\"+str(pathtofile)+\"' HEADER DELIMITER '|' CSV;\" \n\n# Execute the COPY FROM CSV query \ncur.execute(query)\n# Make the changes to the database persistent\ndb.commit() \n\n## Compute processing time and print it\nmessagetoprint=print_processing_time(begintime_copy, \"Process achieved in \")\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n#### Write text file with log of processing time\n## Create the .txt file for processing time output and begin to write\nfilepath=os.path.join(outputfolder,mapsetname+\"_processingtime_PostGimport.txt\")\nf = open(filepath, 'w')\nf.write(mapsetname+\" processing time information for importation of segments' statistics in the PostGreSQL Database.\"+\"\\n\\n\")\nf.write(txtcontent)\nf.close()", "Start copy of segments' statistics in the postgresql table 'statistics.object_stats_optical'\nProcess achieved in 5 minutes and 7.3 seconds\n" ], [ "# Close cursor and communication with the database\ncur.close()\ndb.close()", "_____no_output_____" ] ], [ [ "# Drop duplicate values of CAT", "_____no_output_____" ], [ "Here, we will find duplicates. Indeed, as statistics are computed for each tile (morphological area) and computational region aligned to the pixels raster, some objets could appear in two different tile resulting on duplicates on \"CAT\" column. \n\nWe firs select the \"CAT\" of duplicated objets and then puting them in a list. Then, for each duplicated \"CAT\", we select the key-value (primary key) of the smallest object (area_min). The row corresponding to those key-values are then remoed using the \"DELETE FROM\" query.", "_____no_output_____" ] ], [ [ "# Connect to an existing database\ndb=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host)\n# Open a cursor to perform database operations\ncur=db.cursor()", "_____no_output_____" ], [ "## Initialize a empty string for saving print outputs\ntxtcontent=\"\"\n## Saving current time for processing time management\nbegintime_removeduplic=time.time()\n## Print\nmessagetoprint=\"Start removing duplicates in the postgresql table '\"+schema+\".\"+object_stats_table+\"'\"\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n# Find duplicated 'CAT'\nfind_duplicated_cat()\n\n# Remove duplicated\ncount_pass=1\ncount_removedduplic=0\nwhile len(cattodrop)>0:\n messagetoprint=\"Removing duplicates - Pass \"+str(count_pass)\n print (messagetoprint)\n txtcontent+=messagetoprint+\"\\n\"\n find_duplicated_key()\n remove_duplicated_key()\n messagetoprint=str(len(keytodrop))+\" duplicates removed.\"\n print (messagetoprint)\n txtcontent+=messagetoprint+\"\\n\"\n count_removedduplic+=len(keytodrop)\n # Find again duplicated 'CAT'\n find_duplicated_cat()\n count_pass+=1 \n\nmessagetoprint=\"A total of \"+str(count_removedduplic)+\" duplicates were removed.\"\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n## Compute processing time and print it\nmessagetoprint=print_processing_time(begintime_removeduplic, \"Process achieved in \")\nprint (messagetoprint)\ntxtcontent+=messagetoprint+\"\\n\"\n\n#### Write text file with log of processing time\n## Create the .txt file for processing time output and begin to write\nfilepath=os.path.join(outputfolder,mapsetname+\"_processingtime_RemoveDuplic.txt\")\nf = open(filepath, 'w')\nf.write(mapsetname+\" processing time information for removing duplicated objects.\"+\"\\n\\n\")\nf.write(txtcontent)\nf.close()", "Start removing duplicates in the postgresql table 'statistics.object_stats_optical'\nRemoving duplicates - Pass 1\n31 duplicates removed.\nA total of 31 duplicates were removed.\nProcess achieved in 4 minutes and 8.0 seconds\n" ], [ "# Vacuum the current Postgresql database\nvacuum(db)", "_____no_output_____" ] ], [ [ "# Change the primary key from 'key_value' to 'cat'", "_____no_output_____" ] ], [ [ "# Connect to an existing database\ndb=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host)\n# Open a cursor to perform database operations\ncur=db.cursor()", "_____no_output_____" ], [ "# Build a query to drop the current constraint on primary key \nquery=\"ALTER TABLE \"+schema+\".\"+object_stats_table+\" \\\nDROP CONSTRAINT \"+object_stats_table+\"_pkey\"\n# Execute the query \ncur.execute(query)\n# Make the changes to the database persistent\ndb.commit()\n\n# Build a query to change the datatype of 'cat' to 'integer'\nquery=\"ALTER TABLE \"+schema+\".\"+object_stats_table+\" \\\nALTER COLUMN cat TYPE integer USING cat::integer\"\n# Execute the query \ncur.execute(query)\n# Make the changes to the database persistent\ndb.commit()\n\n# Build a query to add primary key on 'cat'\nquery=\"ALTER TABLE \"+schema+\".\"+object_stats_table+\" \\\nADD PRIMARY KEY (cat)\"\n# Execute the query \ncur.execute(query)\n# Make the changes to the database persistent\ndb.commit()\n\n# Build a query to drop column 'key_value'\nquery=\"ALTER TABLE \"+schema+\".\"+object_stats_table+\" \\\nDROP COLUMN key_value\"\n# Execute the query \ncur.execute(query)\n# Make the changes to the database persistent\ndb.commit()", "_____no_output_____" ], [ "# Vacuum the current Postgresql database\nvacuum(db)", "_____no_output_____" ], [ "# Close cursor and communication with the database\ncur.close()\ndb.close()", "_____no_output_____" ] ], [ [ "### Show first rows of statistics", "_____no_output_____" ] ], [ [ "# Connect to an existing database\ndb=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host)\n# Number of line to show (please limit to 100 for saving computing time)\nnbrow=15\n# Query\nquery=\"SELECT * FROM \"+schema+\".\"+object_stats_table+\" \\\nORDER BY cat \\\nASC LIMIT \"+str(nbrow)\n# Execute query through panda\ndf=pd.read_sql(query, db)\n# Show dataframe\ndf.head(15)", "_____no_output_____" ] ], [ [ "<left> <font size=4> <b> End of classification part </b> </font> </left> ", "_____no_output_____" ] ], [ [ "print(\"The script ends at \"+ time.ctime())\nprint_processing_time(begintime_segmentation_full, \"Entire process has been achieved in \")", "_____no_output_____" ] ], [ [ "**-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "raw", "markdown", "code", "raw", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "raw" ], [ "markdown" ], [ "code", "code" ], [ "raw" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "raw" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "raw" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6c69e94e7a9ab5adfc57536c8e9e8f0ad7716c
182,672
ipynb
Jupyter Notebook
raw/exploratory_computing_with_python/notebook_s3/py_exp_comp_s3_sol.ipynb
MAMBA-python/raw_material
55029cfb4c64ffb5deebf04015fd85a94a0093be
[ "MIT" ]
1
2018-11-30T21:16:40.000Z
2018-11-30T21:16:40.000Z
raw/exploratory_computing_with_python/notebook_s3/py_exp_comp_s3_sol.ipynb
MAMBA-python/raw_material
55029cfb4c64ffb5deebf04015fd85a94a0093be
[ "MIT" ]
null
null
null
raw/exploratory_computing_with_python/notebook_s3/py_exp_comp_s3_sol.ipynb
MAMBA-python/raw_material
55029cfb4c64ffb5deebf04015fd85a94a0093be
[ "MIT" ]
null
null
null
202.070796
18,499
0.886759
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a6c6d515e63839e09f621da42a19f386b47766a
76,710
ipynb
Jupyter Notebook
additional material for report/create_images_for_emigration_reproduction.ipynb
FelixNoessler/Buffers-or-corridors-for-great-crested-newts
3cf0b412249eb884861429b9c4fe9b42aba86edc
[ "MIT" ]
null
null
null
additional material for report/create_images_for_emigration_reproduction.ipynb
FelixNoessler/Buffers-or-corridors-for-great-crested-newts
3cf0b412249eb884861429b9c4fe9b42aba86edc
[ "MIT" ]
null
null
null
additional material for report/create_images_for_emigration_reproduction.ipynb
FelixNoessler/Buffers-or-corridors-for-great-crested-newts
3cf0b412249eb884861429b9c4fe9b42aba86edc
[ "MIT" ]
null
null
null
440.862069
37,848
0.93654
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as scy", "_____no_output_____" ], [ "inds = np.arange(0, 50, 0.001)\ncapacity = 20\n\ny = 1/ (1 + np.exp( -0.1 * (inds - capacity / 2)) )\ny[inds < 5] = 0\n\nplt.plot(inds, y, label='Juveniles')\ny1 = y\ny1[inds >= 5] = 0.01\nplt.plot(inds, y1, '-.', label='Adults')\nplt.text(20, 0.03, '1 %', size=12, color='tab:orange')\nplt.legend()\n\nplt.ylabel('Emigration probability', size =12)\nplt.xlabel('Number of individuals in pond (adults and juveniles)', size=12)\nplt.title('Emigration')\nplt.savefig('images/emigration.svg');", "_____no_output_____" ], [ "hatchlings = np.arange(0,10)\n\nprob = scy.poisson(5).pmf(hatchlings)\n\nplt.plot(hatchlings, prob, 'bo')\nplt.vlines(hatchlings, 0, prob, colors='b', lw=5, alpha=0.5)\nplt.xlabel('Number of fertile female juveniles per female newt', size=12)\nplt.ylabel('Probability', size=12)\nplt.title('Offspring', size=14)\nplt.savefig('images/offspring.svg');", "_____no_output_____" ], [ "import matplotlib as mpl\n\nmpl.rcParams['axes.spines.right'] = False\nmpl.rcParams['axes.spines.top'] = False\nmpl.rcParams['xtick.labelsize'] = 14\nmpl.rcParams['ytick.labelsize'] = 14\n\nfig, ax = plt.subplots(nrows=2, ncols=1, figsize=(10,10), sharex=True)\n\nax[0].fill_between([0.3,0.7], 0, 1/(0.7-0.3), alpha=0.3)\nax[1].fill_between([0.13,0.27], 0, 1/(0.27-0.13), alpha=0.3, color='tab:red')\n\nax[0].plot([0.5, 0.5], [0,1/(0.7-0.3)])\nax[0].plot([0.3, 0.3], [0,1/(0.7-0.3)], '--', color='tab:blue')\nax[0].plot([0.7, 0.7], [0,1/(0.7-0.3)], '--', color='tab:blue')\nax[0].text(0.5, 1/(0.7-0.3)+0.5, 'mean-juvenile-\\nmortality-prob',\n ha='center', va='center', size=14)\nax[0].text(0.3, 1/(0.7-0.3)+0.5, r'60 % $\\cdot$ mean',\n ha='center', va='center', size=14)\nax[0].text(0.7, 1/(0.7-0.3)+0.5, r'140 % $\\cdot$ mean',\n ha='center', va='center', size=14)\n\n\nax[1].plot([0.2, 0.2], [0,1/(0.27-0.13)], color='red')\nax[1].plot([0.13, 0.13], [0,1/(0.27-0.13)], '--', color='red')\nax[1].plot([0.27, 0.27], [0,1/(0.27-0.13)], '--', color='red')\nax[1].text(0.2, 1/(0.27-0.13)+0.5, 'mean-adult-\\nmortality-prob',\n ha='center', va='center', size=14)\nax[1].text(0.13, 1/(0.27-0.13)+1.5, r'65 % $\\cdot$ mean',\n ha='center', va='center', size=14)\nax[1].text(0.27, 1/(0.27-0.13)+1.5, r'135 % $\\cdot$ mean',\n ha='center', va='center', size=14)\n\nax[0].set_ylim(0,10)\nax[1].set_ylim(0,10)\nax[0].set_xlim(0,0.8)\n\nax[0].set_ylabel('density, juveniles', size=16) \nax[1].set_ylabel('density, adults', size=16) \nax[1].set_xlabel('mortality probability', size=16)\n\nplt.tight_layout()\nplt.savefig('images/mortality_prob.svg');", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a6c789b04350f50cf526fef821f5e39d474506c
763,247
ipynb
Jupyter Notebook
test_itikz_from_python.ipynb
ea42gh/itikz
22d5fe72822259469da82f62d7447da9678aee04
[ "MIT" ]
null
null
null
test_itikz_from_python.ipynb
ea42gh/itikz
22d5fe72822259469da82f62d7447da9678aee04
[ "MIT" ]
null
null
null
test_itikz_from_python.ipynb
ea42gh/itikz
22d5fe72822259469da82f62d7447da9678aee04
[ "MIT" ]
1
2021-02-10T20:25:26.000Z
2021-02-10T20:25:26.000Z
105.144924
8,923
0.569702
[ [ [ "%load_ext itikz\nimport itikz\nfrom itikz import nicematrix as nM\nimport jinja2\n\nimport numpy as np\nimport sympy as sym\n\nimport panel as pn\npn.extension()\n\n## Invoke itikz without using cell magic\n# itikz.build_commands?\n# itikz.fetch_or_compile_svg?", "_____no_output_____" ] ], [ [ "# 1. Examples from the Original Itikz Notebook ", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix implicit-demo- --template pic --use-xetex --scale 0.5\n\\draw[help lines] grid (5, 5);\n\\draw[fill=magenta!10] (1, 1) rectangle (2, 2);\n\\draw[fill=magenta!10] (2, 1) rectangle (3, 2);\n\\draw[fill=magenta!10] (3, 1) rectangle (4, 2);\n\\draw[fill=magenta!10] (3, 2) rectangle (4, 3);\n\\draw[fill=magenta!10] (2, 3) rectangle (3, 4);", "_____no_output_____" ], [ "%%itikz --temp-dir --template standalone --tex-packages=smartdiagram,amsfonts\n\\smartdiagramset{uniform sequence color=true,\nsequence item border color=black,\nsequence item font size=\\footnotesize,\nsequence item text color=white\n}\n\\smartdiagram[sequence diagram]{\n $\\mathbb{N}$,\n $\\mathbb{Z}$,\n $\\mathbb{Q}$,\n $\\mathbb{R}$,\n $\\mathbb{I}$,\n $\\mathbb{C}$\n}", "_____no_output_____" ], [ "src = r\"\"\"\n\\documentclass[tikz]{standalone}\n\\usepackage{tikz-cd}\n\\usetikzlibrary{cd}\n\n\\begin{document}\n\\begin{tikzcd}\nT\n\\arrow[drr, bend left, \"x\"]\n\\arrow[ddr, bend right, \"y\"]\n\\arrow[dr, dotted, \"{(x,y)}\" description] & & \\\\\n& X \\times_Z Y \\arrow[r, \"p\"] \\arrow[d, \"q\"]\n& X \\arrow[d, \"f\"] \\\\\n& Y \\arrow[r, \"g\"]\n& Z\n\\end{tikzcd}\n\\end{document}\n\"\"\"\n\nitikz.fetch_or_compile_svg(src, prefix=\"cd_\", working_dir=\"/tmp/itikz\", full_err=False, debug=False)", "_____no_output_____" ] ], [ [ "# 2. Linear Algebra Examples", "_____no_output_____" ], [ "## 2.1 Row-echelon Form", "_____no_output_____" ], [ "### 2.2.1 Row Echelon Form Matrix: Numerical Example", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --template standalone --tex-packages=nicematrix,tikz,relsize,amsmath --tikz-libraries=decorations.pathreplacing\n% --nexec=4 --use-dvi --use-xetex \n\\NiceMatrixOptions{code-for-last-row = \\color{blue}, code-for-first-row = \\color{red}}\n$\\begin{pNiceArray}{*5r|r}[left-margin = 4pt, first-col, last-row,\n code-before =\n {\n % ----------------------------------------------------------------------- Row-echelon form Path\n \\tikz \\draw[red] (row-1-|col-1) -- (row-2-|col-1)\n -- (row-2-|col-2) -- (row-3-|col-2)\n -- (row-3-|col-4) -- (row-4-|col-4)\n -- (row-4-|col-7);\n }\n]\n & \\color{red}{\\mathbf{1}} & 1 & 1 & 2 & 2 & \\; 4 \\\\\n & 0 & \\color{red}{\\mathbf{1}} & -1 & 1 & 0 & \\; 1 \\\\\n & 0 & 0 & 0 & \\color{red}{\\mathbf{1}} & -2 & \\; 2 \\\\\n & 0 & 0 & 0 & 0 & 0 & \\; 0 \\\\\n% ------------------------------------------------------------------------------------ Basic and Free Variables\n\\color{blue}{\\begin{matrix} \\\\ \\text{basic}\\\\ \\text{free} \\end{matrix}}\n & \\begin{matrix} x_1 \\\\ \\end{matrix}\n & \\begin{matrix} x_2 \\\\ \\end{matrix}\n & \\begin{matrix} \\\\ x_3=\\alpha \\end{matrix}\n & \\begin{matrix} x_4 \\\\ \\end{matrix}\n & \\begin{matrix} \\\\ x_5=\\beta \\end{matrix}\n &\n\\end{pNiceArray}$", "_____no_output_____" ] ], [ [ "### 2.1.2 Stack of Matrices", "_____no_output_____" ] ], [ [ "mat_rep = r'''\n && A & b \\\\ \\noalign{\\vskip1.5mm}\nE_1 && E_1 A & E_1 b \\\\ \\noalign{\\vskip1.5mm}\nE_2 && E_2 E_1 A & E_2 E_1 b \\\\ \\noalign{\\vskip1.5mm}\n\\dots && \\dots & \\dots \\\\ \\noalign{\\vskip1.5mm}\nE_k && E_k \\dots E_2 E_1 A & E_k \\dots E_2 E_1 b\n'''\nsubmatrix_locs=[ ['A1','{1-3}{1-4}'],['A2','{2-3}{2-4}'],['A3','{3-3}{3-4}'],['A4','{5-3}{5-4}'],\n ['A5','{2-1}{2-1}'],['A6','{3-1}{3-1}'],['A7','{5-1}{5-1}']\n ]\npivot_locs=[]\ntxt_with_locs=[]\nmat_format='{ccrIr}'\n\nitikz.fetch_or_compile_svg( jinja2.Template( nM.GE_TEMPLATE ).render( preamble=nM.preamble, extension=nM.extension,fig_scale=None, array_names=None,row_echelonPaths=[],\n mat_rep=mat_rep, mat_format=mat_format, submatrix_locs=submatrix_locs, submatrix_names=pivot_locs, txt_with_locs=txt_with_locs, row_echelon_paths=[]),\n prefix='test_', working_dir='/tmp/itikz', debug=False,\n **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=True), nexec=4, keep_file=\"/tmp/itikz/foo\" )", "_____no_output_____" ] ], [ [ "## 2.2 Systeme", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix foo_ --template article --crop --tex-packages=systeme,amsmath,xcolor\n%--use-dvi --use-xetex\n% -----------------------------------------------------\n\\begin{flalign*}\n(\\xi)\n& \\Leftrightarrow \\sysalign{r,r}\\systeme[xyzw]{\n \\boxed{2 x} + 4 y + 8 z + 6 w = 8 @\\color{red}{R_1},\n 2 x + 5 y + 11 z + 7 w = 10 @\\color{red}{R_2},\n -4 x -9 y -20 z -12 w = -17 @\\color{red}{R_3},\n 4 x + 8 y + 13 z + 18 w = 22 @\\color{red}{R_4}\n } \\\\\n%\n\\sysautonum{\\quad \\color{red}{R_{*}\\; \\leftarrow \\;}}\n& \\Leftrightarrow \\sysautonum{\\quad \\color{red}{R_{*}\\; \\leftarrow \\;}}\n\\sysalign{r,r}\\systeme[xyzw]{\n \\boxed{2 x} + 4 y + 8 z + 6 w = 8 @\\color{red}{\\ \\;\\;1 R_1 + 0 R_2 + 0 R_3 + 0 R_4},\n \\boxed{y} + 3 z + w = 2 @\\color{red}{ -1 R_1 + 1 R_2 + 0 R_3 + 0 R_4},\n - y - 4 z = -1 @\\color{red}{\\ \\;\\;2 R_1 + 0 R_2 + 1 R_3 + 0 R_4},\n - 3 z + 6 w = 6 @\\color{red}{ -2 R_1 + 0 R_2 + 0 R_3 + 1 R_4}\n }\n\\end{flalign*}", "_____no_output_____" ] ], [ [ "## 2.3 Programmatic Calls: GE Layout with PySym and Jinja2", "_____no_output_____" ] ], [ [ "k = sym.Symbol('k'); h = sym.Symbol('h')\nAb = sym.Matrix([[1,2,4,1],[2,k,8,h],[3,7,3,1]]); matrices = [[None, Ab]]; pivots = []; txt=[]\n# we could use row ops, but we want a computational layout:\n# A=A.elementary_row_op('n->n+km', k=-3, row1=2,row2=0 );A\n# A=A.elementary_row_op('n<->m',row1=1,row2=2);A\n\nE1=sym.eye(3);E1[1:,0]=[-2,-3]; A1=E1*Ab; matrices.append([E1,A1]); pivots.append((1,1));txt.append('Pivot at (1,1)')\nE2=sym.eye(3);E2=E2.elementary_row_op('n<->m',row1=1,row2=2); A2=E2*A1; matrices.append([E2,A2]); pivots.append(None); txt.append('Rows 2 <-> 3')\nE3=sym.eye(3);E3[2,1]=4-k; A3=E3*A2; matrices.append([E3,A3]); pivots.append((2,2));txt.append('Pivot at (2,2)')\npivots.append((3,3)); txt.append('In Row Echelon Form')\n\n\nh,m = nM.ge( matrices, Nrhs=[1],\n formater = sym.latex,\n pivot_list=[ [(0,1), [(0,0)] ],\n [(1,1), [(0,0),(1,1)]],\n [(2,1), [(0,0),(1,1)]],\n [(3,1), [(0,0),(1,1),(2,2)]]\n ],\n ref_path_list = [ [0,1, [(0,0) ],'vv','cyan'],\n [1,1, [(0,0),(1,1) ],'hv','cyan'],\n [2,1, [(0,0),(1,1) ],'vh','cyan'],\n [3,1, [(0,0),(1,1),(2,2)],'hh']\n ],\n comment_list = [\"pivot in (1,1)\",\n r\"possible pivot in (2,2) \\\\ \\qquad provided $k \\ne 4$\",\n r\"pivot in(2,2)\\\\ \\qquad after row exchange\",\"pivot in (3,3)\"], # <===== ???? Where are they?\n variable_summary = [True,True,True],\n array_names = ['E', ['A', 'b']],\n tmp_dir=\"tmp\", keep_file=\"tmp/m3\"\n)\nh", "_____no_output_____" ] ], [ [ "## 2.4 Back-Substitution: Row Echelon Form, Back-substitution, Standard Form", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix test_ --template article --tex-packages=amssymb,cascade,systeme,nicematrix,tikz,relsize --crop --tikz-libraries=decorations.pathreplacing\n\n% ==================================================================================== Decorate matrix\n \\NiceMatrixOptions{code-for-last-row = \\color{blue}, code-for-first-row = \\color{red}}\n $\\begin{pNiceArray}{*5r|r}[left-margin = 4pt, first-col, last-row,\n code-before =\n {\n % ----------------------------------------------------------------------- Row-echelon form Path\n \\tikz \\draw[red] (row-1-|col-1) -- (row-2-|col-1)\n -- (row-2-|col-2) -- (row-3-|col-2)\n -- (row-3-|col-4) -- (row-4-|col-4)\n -- (row-4-|col-7);\n }\n ]\n & \\color{red}{\\mathbf{1}} & 1 & 1 & 2 & 2 & \\; 4 \\\\\n & 0 & \\color{red}{\\mathbf{1}} & -1 & 1 & 0 & \\; 1 \\\\\n & 0 & 0 & 0 & \\color{red}{\\mathbf{1}} & -2 & \\; 2 \\\\\n & 0 & 0 & 0 & 0 & 0 & \\; 0 \\\\\n % ------------------------------------------------------------------------------------ Basic and Free Variables\n \\color{blue}{\\begin{matrix} \\\\ \\text{basic}\\\\ \\text{free} \\end{matrix}}\n & \\begin{matrix} x_1 \\\\ \\end{matrix}\n & \\begin{matrix} x_2 \\\\ \\end{matrix}\n & \\begin{matrix} \\\\ x_3=\\alpha \\end{matrix}\n & \\begin{matrix} x_4 \\\\ \\end{matrix}\n & \\begin{matrix} \\\\ x_5=\\beta \\end{matrix}\n &\n \\end{pNiceArray}$\n % ==================================================================================== Solve by Back-substitution\n \\vspace{1cm} % below the figure; inkscape cropping fails otherwise...\n\n % ------------------------------------------------------------------------------------ Solve\n {\\ShortCascade%\n {\\ShortCascade%\n {\\ShortCascade%\n {$\\boxed{x_3 = \\alpha, x_5=\\beta}$}%\n {$x_4 = 2 + 2 x_5$}%\n {$\\;\\Rightarrow\\; \\boxed{x_4 = 2 + 2 \\beta}$}%\n }%\n {$x_2 = 1 +x_3-x_4$}%\n {$\\;\\Rightarrow\\; \\boxed{x_2 = -1+\\alpha-2\\beta}$}%\n }%\n {$x_1 = 4 - x_2 - x_3 - 2 x_4 -2 x_5$}%\n {$\\;\\Rightarrow \\; \\boxed{x_1 = 1-\\alpha+2\\beta}.$}\n }%\n %& % --------------------------------------------------------------------------------- Standard Form\n \\vspace{1cm}\n\n {$\\; \\therefore\\; \\left( \\begin{array}{r} x_1 \\\\ x_2 \\\\ x_3 \\\\ x_4 \\\\ x_5 \\end{array} \\right)\n = \\left( \\begin{array}{r} 1 \\\\ -1 \\\\ 0 \\\\ 2 \\\\ 0 \\end{array} \\right)\n + \\alpha \\left( \\begin{array}{r} -1 \\\\ 1 \\\\ 1 \\\\ 0 \\\\ 0 \\end{array} \\right)\n + \\beta \\left( \\begin{array}{r} 2 \\\\ -2 \\\\ 0 \\\\ 2 \\\\ 1 \\end{array} \\right)\n $\n }", "_____no_output_____" ] ], [ [ "## 2.5 QR Decomposition", "_____no_output_____" ] ], [ [ "A = sym.Matrix([[ 1, 1, -1],\n [ 1, -2, 1],\n [-1, -1, 2],\n [ 1, 1, -1]])\nW = sym.Matrix([[ 1, 1, 1],\n [ 1, -3, 0],\n [-1, -1, 2],\n [ 1, 1, 1]])\n\nWtW = W.T @ W\nWtA = W.T @ A\nS = WtW**(-1)\nfor i in range(S.shape[0]):\n S[i,i]=sym.sqrt(S[i,i])\n\nQt = S*W.T\nR = S*WtA\n\nmatrices = [ [ None, None, A, W ],\n [ None, W.T, WtA, WtW ],\n [ S, Qt, R, None ] ]\n\nh,mz = nM.qr( matrices, formater=sym.latex, array_names=True, tmp_dir=\"tmp\", keep_file='tmp/qr_fancy')\nh", "_____no_output_____" ] ], [ [ "# 3 Geometric Figures", "_____no_output_____" ], [ "## 3.1 Graph with Arrows", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 0.8 --tex-packages=amsmath\n\n% axes\n \\draw[thick,>=stealth,->] (0,-0.2) -- (0,6);\n \\draw[thick,>=stealth,->] (-0.2,0) -- (7,0);\n\n% grid lines\n \\draw[step=1.0,black,thin,dotted,xshift=1cm,yshift=1cm] (-1,-1) grid (6,5);\n\n% draw the output line\n \\draw[thin,draw=red, dashed] (-0.2,-0.1) -- (7,3.5) node[right, text=blue, text width=5em] {};\n\n% starting vector blue, transformed vector red\n \\draw[thick,>=stealth,->,draw=blue] (0,0) -- (5,1) node[right, text=blue, text width=5em] {\\large $\\mathbf{\\begin{pmatrix} 5 \\\\ 1 \\end{pmatrix}}$};\n \\draw[thick,>=stealth,->,dotted,draw=black] (5,1) -- (2,1);\n \\draw[thick,>=stealth,->,draw=blue] (0,0) -- (1,3) node[text=blue, label={[xshift=0.3cm, yshift=-0.1cm]\\large $\\color{blue}{\\mathbf{\\begin{pmatrix} 1 \\\\ 3 \\end{pmatrix}}}$}] (x2) {};\n \\draw[thick,>=stealth,->,dotted,draw=black] (1,3) -- (6,3);", "_____no_output_____" ] ], [ [ "## 3.2 Parallelograms", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 2 --tex-packages=amsmath --tikz-libraries quotes\n\n \\node (n) [draw, minimum width=3cm, minimum height=2cm, xslant=0.8] {};\n \\draw (n.south west) to [\"$u+y$\",pos=0.7,sloped] (n.north east)\n (n.north west) node[above] {$u$} \n to [\"$u-y$\",pos=0.3,sloped] (n.south east) node[below] {$y$};", "_____no_output_____" ], [ "%%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 2 --tex-packages=tkz-euclide --tikz-libraries arrows\n\\tkzDefPoint(0,0){A}\n\\tkzDefPoint(30:3){B}\n\\tkzDefShiftPointCoord[B](10:2){C}\n\\tkzDefShiftPointCoord[A](10:2){D}\n\\tkzDrawPolygon(A,...,D)\n\\tkzDrawPoints(A,...,D)", "_____no_output_____" ], [ "%%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tikz-libraries shapes.geometric\n\\tikzstyle{every node}=[trapezium, draw, minimum width=3cm,\ntrapezium left angle=120, trapezium right angle=60]\n\n\\node[trapezium stretches=false,minimum height=1cm]\n at (0,0) {A};\n\n\\node[trapezium stretches=false,minimum height=1cm]\n at (0,1.5) {\\fbox{A long }};\n\n\\node[trapezium stretches=false,minimum height=1cm]\n at (0,3) {\\fbox{A long text}};\n\n\\draw[thick,green,|-|] (-1.5,-.5) -- (1.5,-0.5);\n\\draw[thick,green,|-|] (-1.5,0.5) -- (-1.5,-0.5);\n\n\\draw[thick,blue,|-|] (-1.5,1) -- (1.5,1);\n\\draw[thick,blue,|-|] (-1.5,1) -- (-1.5,2);\n\n\\draw[thick,red,|-|] (-1.5,2.5) -- (1.5,2.5);\n\\draw[thick,red,|-|] (-1.5,2.5) -- (-1.5,3.5);\n", "_____no_output_____" ] ], [ [ "## 3.3 Arcs", "_____no_output_____" ], [ "### 3.3.1 Arcs with pgfplot", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tex-packages=pgfplots --tikz-libraries=calc\n\\def\\centerarc[#1](#2)(#3:#4:#5)% Syntax: [draw options] (center) (initial angle:final angle:radius)\n { \\draw[#1] ($(#2)+({#5*cos(#3)},{#5*sin(#3)})$) arc (#3:#4:#5); }\n\n\\centerarc[red,thick,->](0,0)(5:85:1) ;\n\\centerarc[red,thick,->](1,1)(-160:130:1) ;", "_____no_output_____" ] ], [ [ "### 3.3.2 Arcs with tkz-euclide", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tex-packages=tkz-euclide --tikz-libraries=calc\n\\tkzDefPoint(0,0){O}\n\\tkzDefPoint(2,-1){A}\n\\tkzDefPoint(1,1){B}\n\\tkzDrawArc[color=blue](O,A)(B)\n\\tkzDrawArc[color=brown](O,B)(A)\n\\tkzDrawArc(O,B)(A)\n\\tkzDrawLines[add = 0 and .5](O,A O,B)\n\\tkzDrawPoints(O,A,B)\n\\tkzLabelPoints[below](O,A,B)", "_____no_output_____" ] ], [ [ "### 3.1.3 Arcs with Tikz", "_____no_output_____" ] ], [ [ "%%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tex-packages=amsmath --tikz-libraries=calc\n\\newcommand{\\cercle}[4]{\n\\node[circle,inner sep=0,minimum size={2*#2}](a) at (#1) {};\n\\draw[red,thick] (a.#3) arc (#3:{#3+#4}:#2);\n}\n \n\\newcommand{\\mycercle}[6]{\n\\node[circle,inner sep=0,minimum size={2*#2}](a) at (#1) {};\n\\draw[#6,line width=#5] (a.#3) arc (#3:{#3+#4}:#2);\n}\n\n\\coordinate (OR) at (0.00, 0.00);\n \\coordinate (center) at (3,2);\n\n\\cercle{center}{2cm}{25}{-90}\n![\\cercle{1,2}{1cm}{15}{130}][1]\n\n\\mycercle {OR} {0.5cm} {0} {270} {1.00} {blue} ;\n", "_____no_output_____" ], [ "itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=True)", "_____no_output_____" ] ], [ [ "## 3.4 3D Examples", "_____no_output_____" ] ], [ [ "%reload_ext itikz\nimport itikz\nfrom itikz import nicematrix as nM\nfrom itikz.tikz import tikz_source\n\nimport jinja2\n\nimport numpy as np\nimport sympy as sym\n\nimport panel as pn\npn.extension()", "_____no_output_____" ], [ "src = tikz_source(\nr\"\"\"% ======================================================= colors\n\\definecolor{la_white}{RGB}{233,235,223} %#E9EBDF\n\\definecolor{la_dark}{RGB}{59,54,81} %#3B3651\n\\definecolor{la_gray}{RGB}{96,112,139} %#60708B\n\\definecolor{la_tan}{RGB}{152,159,122} %#989F7A\n\n% -------------------------------------------------------- axes\n\\draw[-latex] (0,0,0) -- (4,0,0) node[left] {$x$};\n\\draw[-latex] (0,0,0) -- (0,4,0) node[below] {$y$};\n\\draw[-latex] (0,0,0) -- (0,0,4) node[left] {$z$};\n\n% ---------------------------------------------------------- planes\n\\draw[fill=la_tan,opacity=0.3] (-3,0,-3) -- (-3,0,3) -- (3,0,3) -- (3,0,-3) -- cycle;\n\\draw[fill=la_gray,opacity=0.4] (-3,-3,0) -- (-3,3,0) -- (3,3,0) -- (3,-3,0) -- cycle;\n\\draw[thick](-3,0,0)--(3,0,0); % intersection of the planes\n\n% ---------------------------------------------------------- text decoration\n\\node[anchor=south west,align=center] (line) at (3,3,3) {line of\\\\ intersection};\n\\draw[-latex] (line) to[out=180,in=75] (-2,0,0.05);\n\"\"\",\n class_args=\"border=23.14pt\",\n tex_packages=\"tikz-3dplot\",\n preamble=r\"\\tdplotsetmaincoords{70}{110}\",\n tikz_args=r\"tdplot_main_coords,font=\\sffamily\"\n )\nitikz.fetch_or_compile_svg( src,\n prefix='test_', working_dir='/tmp/itikz', debug=False,\n **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False),\n nexec=1, keep_file=\"/tmp/foo\" )", "_____no_output_____" ], [ "def tikz_source( code,\n class_args=None, tex_packages=None, tikz_libraries=None, extension=\"% no_extension\", \n preamble=\"% preamble\", tikz_args=None):\n template = r\"\"\"\\documentclass[tikz{% for a in class_args %},{{a}}{% endfor %}]{standalone}\n\\pagestyle{empty}\n{% for p in tex_packages %}\n{{p}}\n{% endfor %}\n{% for p in tikz_libraries %}\n\\usetikzlibrary{{p}}\n{% endfor %}\n{{extension}}\n\n\\begin{document}\n{{preamble}}\n\\begin{tikzpicture}{% for p in tikz_args %}{{p}}{% endfor %}\n {{tikz_code}}\n\\end{tikzpicture}\n\\end{document}\n\"\"\"\n import re\n pattern = re.compile( r'(\\[[^]]*])(.*)' )\n def split(arg):\n if arg is None:\n return []\n l = []\n for a in arg.split(\",\"):\n match = pattern.match( a )\n if match:\n l.append( r\"\\usepackage\" + match.group(1) + \"{\" + match.group(2)+ \"}\" )\n else:\n l.append(r\"\\usepackage{\" + a + \"}\")\n return l\n\n\n class_args = [] if class_args is None else [class_args]\n tex_packages = split(tex_packages)\n tikz_libraries = [] if tikz_libraries is None else [\"{\"+tikz_libraries+\"}\"]\n tikz_args = [] if tikz_args is None else [\"[\"+tikz_args+\"]\"]\n\n src=jinja2.Template( template )\\\n .render( class_args=class_args,\n tex_packages=tex_packages,\n tikz_libraries=tikz_libraries,\n extension=extension,\n preamble=preamble,\n tikz_args=tikz_args,\n tikz_code=code\n )\n\n return src\n\nsrc = tikz_source(\nr\"\"\"% ======================================================= colors\n\\definecolor{la_white}{RGB}{233,235,223} %#E9EBDF\n\\definecolor{la_dark}{RGB}{59,54,81} %#3B3651\n\\definecolor{la_gray}{RGB}{96,112,139} %#60708B\n\\definecolor{la_tan}{RGB}{152,159,122} %#989F7A\n\n% -------------------------------------------------------- axes\n\\draw[-latex] (0,0,0) -- (4,0,0) node[left] {$x$};\n\\draw[-latex] (0,0,0) -- (0,4,0) node[below] {$y$};\n\\draw[-latex] (0,0,0) -- (0,0,4) node[left] {$z$};\n\n% ---------------------------------------------------------- planes\n\\draw[fill=la_tan,opacity=0.3] (-3,0,-3) -- (-3,0,3) -- (3,0,3) -- (3,0,-3) -- cycle;\n\\draw[fill=la_gray,opacity=0.4] (-3,-3,0) -- (-3,3,0) -- (3,3,0) -- (3,-3,0) -- cycle;\n\\draw[thick](-3,0,0)--(3,0,0); % intersection of the planes\n\n% ---------------------------------------------------------- text decoration\n\\node[anchor=south west,align=center] (line) at (3,3,3) {line of\\\\ intersection};\n\\draw[-latex] (line) to[out=180,in=75] (-2,0,0.05);\n\"\"\",\n class_args=\"border=23.14pt\",\n tex_packages=\"tikz-3dplot\",\n preamble=r\"\\tdplotsetmaincoords{70}{110}\",\n tikz_args=r\"tdplot_main_coords,font=\\sffamily\"\n )\nitikz.fetch_or_compile_svg( src,\n prefix='test_', working_dir='/tmp/itikz', debug=False,\n **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False),\n nexec=1, keep_file=\"/tmp/foo\" )", "_____no_output_____" ], [ "src = tikz_source(\n r\"\"\"% ======================================================= colors\n \\definecolor{la_white}{RGB}{233,235,223} %#E9EBDF\n \\definecolor{la_dark}{RGB}{59,54,81} %#3B3651\n \\definecolor{la_gray}{RGB}{96,112,139} %#60708B\n \\definecolor{la_tan}{RGB}{152,159,122} %#\n \\definecolor{la_red}{RGB}{152,0,0} %#\n\n \\draw[thin,->] (0,0,0) -- (1,0,0) node[anchor=north east]{$x$};\n \\draw[thin,->] (0,0,0) -- (0,1,0) node[anchor=north west]{$y$};\n \\draw[thin,->] (0,0,0) -- (0,0,1) node[anchor=south]{$z$};\n\n \\tdplotsetcoord{O}{0}{0}{0}\n \\tdplotsetcoord{P}{1.}{90}{-45}\n \\tdplotsetcoord{Q}{1.}{80}{-10}\n \\tdplotsetcoord{W}{1.}{-30}{60}\n\n \\tdplotsetcoord{Pn}{-1.}{90}{-45}\n \\tdplotsetcoord{Qn}{-1.}{80}{-10}\n \n \\foreach \\x in {-0.2,0,...,0.8}\n \\foreach \\y in {-0.2,0,...,0.8}\n {\n \\draw[very thin,gray] (\\x,-0.2) -- (\\x,0.8);\n \\draw[very thin,gray] (-0.2,\\y) -- (0.8,\\y);\n }\n\n %\\def\\x{.5}\n %\\filldraw[\n % draw=la_tan!10,%\n % fill=la_gray!20,%\n %] (0,0,0)\n % -- (\\x,{sqrt(3)*\\x},0)\n % -- (\\x,{sqrt(3)*\\x},1)\n % -- (0,0,1)\n % -- cycle;\n\n %\\draw[color=la_dark!10,fill=la_gray!60, nearly transparent] (O) -- (P) -- (Q) -- cycle;\n \\draw[color=la_dark!10,fill=la_tan!80, nearly transparent] (Pn) -- (Qn) -- (P) -- (Q) -- cycle;\n\n %draw a vector from origin to point (P)\n \\draw[thick,-stealth,color=la_gray] (O) -- (P);\n \\draw[thick,-stealth,color=la_gray] (O) -- (Q);\n \\draw[thick,-stealth,color=la_red] (O) -- (W);\n \"\"\",\n #class_args=\"border=23.14pt\",\n tex_packages=\"ifthen,tikz-3dplot\",\n preamble=r\"\"\"% -----------------------------------------------\n\\tdplotsetmaincoords{70}{70}\n\"\"\",\n tikz_args=r\"tdplot_main_coords,font=\\sffamily,scale=3.\"\n)\n\nitikz.fetch_or_compile_svg( src,\n prefix='test_', working_dir='/tmp/itikz', debug=False,\n **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False),\n nexec=1, keep_file=\"/tmp/bah\" )", "_____no_output_____" ], [ "src = tikz_source(\nr\"\"\"\n% ======================================================= colors\n\\definecolor{la_white}{RGB}{233,235,223} %#E9EBDF\n\\definecolor{la_dark}{RGB}{59,54,81} %#3B3651\n\\definecolor{la_gray}{RGB}{96,112,139} %#60708B\n\\definecolor{la_tan}{RGB}{152,159,122} %#989F7A\n\n\\coordinate (A) at (0.95,3.41);\n\\coordinate (B) at (1.95,0.23);\n\\coordinate (C) at (3.95,1.23);\n\\coordinate (D) at (2.95,4.41);\n\n\\coordinate (E) at (1.90,3.30);\n\\coordinate (F) at (0.25,0.45);\n\\coordinate (G) at (2.25,1.45);\n\\coordinate (H) at (3.90,4.30);\n\n\\coordinate (I) at (-0.2,1.80);\n\\coordinate (J) at (2.78,1.00);\n\\coordinate (K) at (4.78,2.00);\n\\coordinate (L) at (1.80,2.80);\n\n\\path[name path=AB] (A) -- (B);\n\\path[name path=CD] (C) -- (D);\n\\path[name path=EF] (E) -- (F);\n\\path[name path=IJ] (I) -- (J);\n\\path[name path=KL] (K) -- (L);\n\\path[name path=HG] (H) -- (G);\n\\path[name path=IL] (I) -- (L);\n\n\\path [name intersections={of=AB and EF,by=M}];\n\\path [name intersections={of=EF and IJ,by=N}];\n\\path [name intersections={of=AB and IJ,by=O}];\n\\path [name intersections={of=AB and IL,by=P}];\n\\path [name intersections={of=CD and KL,by=Q}];\n\\path [name intersections={of=CD and HG,by=R}];\n\\path [name intersections={of=KL and HG,by=S}];\n\\path [name path=NS] (N) -- (S);\n\\path [name path=FG] (F) -- (G);\n\\path [name intersections={of=NS and AB,by=T}];\n\\path [name intersections={of=FG and AB,by=U}];\n\n\\draw[thick, color=la_dark, fill=la_tan!60] (A) -- (B) -- (C) -- (D) -- cycle;\n%\\draw[thick, color=la_dark, fill=la_tan!60] (E) -- (F) -- (G) -- (H) -- cycle;\n%\\draw[thick, color=la_dark, fill=la_tan!60] (I) -- (J) -- (K) -- (L) -- cycle;\n\n\\draw[thick, color=la_dark, fill=la_gray!50] (P) -- (O) -- (I) -- cycle;\n\\draw[thick, color=la_dark, fill=la_gray!50] (O) -- (J) -- (K) -- (Q) -- cycle;\n\n\\draw[thick, color=la_dark, fill=la_tan!10] (H) -- (E) -- (M) -- (R) -- cycle;\n\\draw[thick, color=la_dark, fill=la_tan!10] (M) -- (N) -- (T) -- cycle;\n\\draw[thick, color=la_dark, fill=la_tan!10] (N) -- (F) -- (U) -- (O) -- cycle;\n\"\"\",\nclass_args=\"border=23.14pt\",\n#tex_packages=\"tikz-3dplot\",\ntikz_libraries=\"positioning,calc,intersections\",\n#preamble=r\"\\tdplotsetmaincoords{70}{110}\",\ntikz_args=r\"scale=1.6\"\n)\n\nitikz.fetch_or_compile_svg( src,\n prefix='test_', working_dir='/tmp/itikz', debug=False,\n **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False),\n nexec=1, keep_file=\"/tmp/bah\" )", "_____no_output_____" ], [ "src = tikz_source(\nr\"\"\"% ======================================================= colors\n\\definecolor{la_white}{RGB}{233,235,223}\n\\definecolor{la_dark}{RGB}{59,54,81}\n\\definecolor{la_gray}{RGB}{96,112,139}\n\\definecolor{la_tan}{RGB}{152,159,122}\n\\definecolor{la_red}{RGB}{152,0,0}\n\n\\tdplotsetrotatedcoords{00}{30}{0}\n \\begin{scope}[tdplot_rotated_coords]\n \\begin{scope}[canvas is xy plane at z=0]\n \\fill[la_gray,fill opacity=0.3] (-2,-3.5) rectangle (2,3.5); % =============== the plane\n \\draw[very thick] (-2,0) -- (2,0); % line on the plane\n\n \\path (-150:2) coordinate (H) (-1.5,0) coordinate(X);\n \\pgflowlevelsynccm\n \\draw[very thick,-stealth,la_red] (0,0) -- (-30:2.5); % vector on the plane?\n \\draw[very thick,-stealth,la_red] (0,0) -- (50:2.5); % vector on the plane?\n \\end{scope} \n \\draw[stealth-] (H) -- ++ (-1,0,0.2) node[pos=1.3]{$E_1$}; % ================= decorate eigenspace E_1\n\n \\draw[stealth-] (X) -- ++ (0,1,0.2) node[pos=1.3] {$X$};\n \\draw[very thick,-stealth,color=la_red] (0,0,0) coordinate (O) -- (1,1,4) node[right]{$p$}; % coords are (y,x,z) ?!\n \\end{scope}\n\n \\pgfmathsetmacro{\\Radius}{1.5}\n \\draw[-stealth] (O) -- (2.5*\\Radius,0,0) node[pos=1.15] {$y$};\n \\draw[-stealth] (O) -- (0,3.5*\\Radius,0) node[pos=1.15] {$x$};\n \\draw[-stealth] (O) -- (0,0,2.5*\\Radius) node[pos=1.05] {$z$};\n\"\"\",\n #class_args=\"border=23.14pt\",\n tex_packages=\"ifthen,tikz-3dplot\",\n preamble=r\"\"\"% -----------------------------------------------\n\\tdplotsetmaincoords{105}{-30}\n\"\"\",\n tikz_args=r\"tdplot_main_coords,font=\\sffamily,scale=1.\"\n)\n\nitikz.fetch_or_compile_svg( src,\n prefix='test_', working_dir='/tmp/itikz', debug=False,\n **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False),\n nexec=1, keep_file=\"/tmp/bah\" )", "_____no_output_____" ], [ "src = tikz_source(\nr\"\"\"% ======================================================= colors\n \\definecolor{la_white}{RGB}{233,235,223}\n \\definecolor{la_dark}{RGB}{59,54,81}\n \\definecolor{la_gray}{RGB}{96,112,139}\n \\definecolor{la_tan}{RGB}{152,159,122}\n \\definecolor{la_red}{RGB}{152,10,10}\n\n % variables\n \\def\\rvec{.8}\n \\def\\thetavec{30}\n \\def\\phivec{60}\n \n % ------------------------------------------------------------------------ axes\n \\coordinate (O) at (0,0,0);\n \\draw[thick,->] (0,0,0) -- (1,0,0) node[anchor=north east]{$x$};\n \\draw[thick,->] (0,0,0) -- (0,1,0) node[anchor=north west]{$y$};\n \\draw[thick,->] (0,0,0) -- (0,0,1) node[anchor=south] {$z$};\n\n \\filldraw[fill=la_dark, nearly transparent] (-1,1,0) -- (1,1,0) -- (1,-1,0) \n -- (-1,-1,0) -- cycle ; % x-y plane\n\n % a= s(1,2,0), b=t(1,0,.1)\n % 0,OA,0A+OB,0B (0,0,0) -- (s,2s,0) -- (s+t,2s,.1t) -- (t,0,.1t) -- cycle;\n % (0,0,0) -- (-s,-2s,0) -- (-s+t,-2s,.1t) -- (t,0,.1t) -- cycle;\n % take t=.5,s=.5 \n\n \\filldraw[fill=la_tan, nearly transparent] (0,0,0) -- (.5,1,0) -- (1,1,.05)\n -- (.5,0,.05) -- cycle;\n \\filldraw[fill=la_tan, nearly transparent] (0,0,0) -- (-.5,-1,0) -- (0,-1,.05)\n -- (.5,0,.05) -- cycle;\n \n % ------------------------------------------------------------------------ vectors\n \\tdplotsetcoord{P}{\\rvec}{\\thetavec}{\\phivec} % P\n\n \\draw[-stealth,la_red,very thick] (O) -- (P) node[above right] {$P$};\n \\draw[dashed,red] (O) -- (Pxy);\n \\draw[dashed,red] (P) -- (Pxy);\n \\draw[dashed,red] (Py) -- (Pxy);\n \n % ------------------------------------------------------------------------- arcs\n \\tdplotdrawarc[->]{(O)}{0.2}{0}{\\phivec} {anchor=north}{$\\phi$}\n\n \\tdplotsetthetaplanecoords{\\phivec}\n \\tdplotdrawarc[->,tdplot_rotated_coords]{(0,0,0)}{0.5}{0}{\\thetavec} {anchor=south west}{$\\theta$}\n\"\"\",\n# =============================================================================================\n#class_args=\"border=23.14pt\",\ntex_packages=\"ifthen,tikz-3dplot\",\nextension=\nr\"\\tikzset{>=latex} % for LaTeX arrow head\",\npreamble =\nr\"\"\"\n\\tdplotsetmaincoords{70}{120}\n\"\"\",\ntikz_args=r\"tdplot_main_coords,font=\\sffamily,scale=3.\"\n)\n\nitikz.fetch_or_compile_svg( src,\n prefix='test_', working_dir='/tmp/itikz', debug=False,\n **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False),\n nexec=1, keep_file=\"/tmp/bah1\" )", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a6ca1da692d0f988ccafb72c16e79935394a711
62,762
ipynb
Jupyter Notebook
Code/Clase1_SintaxisFuncionesBooleanos.ipynb
LucaFalchini/prope_programacion
85ab8dd731e9965f8fe694a17e3ee520bb51915c
[ "MIT" ]
6
2022-02-08T22:54:47.000Z
2022-02-08T23:25:08.000Z
Code/Clase1_SintaxisFuncionesBooleanos.ipynb
LucaFalchini/prope_programacion
85ab8dd731e9965f8fe694a17e3ee520bb51915c
[ "MIT" ]
null
null
null
Code/Clase1_SintaxisFuncionesBooleanos.ipynb
LucaFalchini/prope_programacion
85ab8dd731e9965f8fe694a17e3ee520bb51915c
[ "MIT" ]
75
2022-02-04T23:11:23.000Z
2022-03-02T19:43:39.000Z
23.872955
1,172
0.498231
[ [ [ "# Introducción a Python: Sintaxis, Funciones y Booleanos\n\n<img style=\"float: right; margin: 0px 0px 15px 15px;\" src=\"https://www.python.org/static/community_logos/python-logo.png\" width=\"200px\" height=\"200px\" />\n\n> Bueno, ya que sabemos qué es Python, y que ya tenemos las herramientas para trabajarlo, veremos cómo usarlo. \n\nReferencias:\n- https://www.kaggle.com/learn/python\n___", "_____no_output_____" ], [ "# 1. Sintaxis básica", "_____no_output_____" ], [ "## 1.1 Hello, Python!\n\n¿Qué mejor para empezar que analizar el siguiente pedazo de código?", "_____no_output_____" ] ], [ [ "work_hours = 0\nprint(work_hours)\n\n# ¡A trabajar! Como una hora, no menos, como cinco\nwork_hours = work_hours + 5\n\nif work_hours > 0:\n print(\"Mucho trabajo!\")\n\nrihanna_song = \"Work \" * work_hours\nprint(rihanna_song)", "0\nMucho trabajo!\nWork Work Work Work Work \n" ] ], [ [ "¿Alguien adivina qué salida produce el código anterior?", "_____no_output_____" ], [ "Bueno, veamos línea por línea qué está pasando:", "_____no_output_____" ] ], [ [ "work_hours = 0", "_____no_output_____" ] ], [ [ "**Asignación de variable:** la línea anterior crea una variable llamada `work_hours` y le asigna el valor de `0` usando el símbolo `=`.\n\nA diferencia de otros lenguajes (como Java o `C++`), la asignación de variables en Python:\n- no necesita que la variable `work_hours` sea declarada antes de asignarle un valor;\n- no necesitamos decirle a Python qué tipo de valor tendrá la variable `work_hours` (int, float, str, list...). De hecho, podríamos luego asignarle a `work_hours` otro tipo de valor como un string (cadena de caracteres) o un booleano (`True` o `False`).", "_____no_output_____" ] ], [ [ "print(work_hours)", "0\n" ] ], [ [ "**Llamado a una función**: print es una función de Python que imprime el valor pasado a su argumento. Las funciones son llamadas poniendo paréntesis luego de su nombre, y escribiendo sus argumentos (entradas) dentro de dichos paréntesis.", "_____no_output_____" ] ], [ [ "# ¡A trabajar! Como una hora, no menos, como cinco\nwork_hours = work_hours + 5\n# work_hours += 5 # Esto es completamente equivalente a la linea de arriba", "_____no_output_____" ], [ "print(work_hours)", "5\n" ] ], [ [ "La primer línea es un **comentario**, los cuales en Python comienzan con el símbolo `#`.\n\nA continuación se hace una reasignación. En este caso, estamos asignando a la variable `work_hours` un nuevo valor que involucra una operación aritmética en su propio valor previo.", "_____no_output_____" ] ], [ [ "if work_hours > 0:\n print(\"Mucho trabajo!\")", "Mucho trabajo!\n" ], [ "if work_hours > 10:\n print(\"Mucho trabajo!\")", "_____no_output_____" ] ], [ [ "Todavía no es tiempo de ver **condicionales**, sin embargo, se puede adivinar fácilmente lo que este pedazo de código hace, ya que se puede leer casi literal.\n\nNotemos que la *indentación* es muy importante acá, y especifica qué parte del código pertenece al `if`. Lo que pertenece al `if` empieza por los dos puntos (`:`) y debe ir indentado en el renglón de abajo. Así que mucho cuidado con la indentación, sobretodo si han programado en otros lenguajes en los que este detalle no implica nada.", "_____no_output_____" ], [ "Acá vemos un tipo de variable string (cadena de caracteres). Se especifica a Python un objeto tipo string poniendo doble comilla (\"\") o comilla simple ('').", "_____no_output_____" ] ], [ [ "\"Work \" == 'Work '", "_____no_output_____" ], [ "rihanna_song = \"Work \" * work_hours\nprint(rihanna_song)", "Work Work Work Work Work \n" ], [ "a = 5\na", "_____no_output_____" ], [ "type(a)", "_____no_output_____" ], [ "a *= \"A \"\na", "_____no_output_____" ], [ "type(a)", "_____no_output_____" ] ], [ [ "El operador `*` puede ser usado para multiplicar dos números (`3 * 4 evalua en 12`), pero también podemos multiplicar strings por números enteros, y obtenemos un nuevo string que repite el primero esa cantidad de veces.\n\nEn Python suceden muchas cosas de este estilo, muchos \"truquillos\" que ahorran mucho tiempo.", "_____no_output_____" ], [ "## 1.2 Tipos de números en Python y operaciones aritméticas\n\nYa vimos un ejemplo de una variable que contenía un número:", "_____no_output_____" ] ], [ [ "work_hours = 0", "_____no_output_____" ] ], [ [ "Sin embargo, hay varios tipos de \"números\". Si queremos ser más tecnicos, preguntémosle a Python qué tipo de variable es `work_hours`:", "_____no_output_____" ] ], [ [ "type(work_hours)", "_____no_output_____" ] ], [ [ "Vemos que es un entero (`int`). Hay otro tipo de número que encontramos en Python:", "_____no_output_____" ] ], [ [ "type(0.5)", "_____no_output_____" ] ], [ [ "Un número de punto flotante (float) es un número con decimales.\n\nYa conocemos dos funciones estándar de Python: `print()` y `type()`. La última es bien útil para preguntarle a Python \"¿Qué es esto?\".", "_____no_output_____" ], [ "Ahora veamos operaciones aritméticas:", "_____no_output_____" ] ], [ [ "# Operación suma(+)/resta(-)\n5 + 8, 9 - 3", "_____no_output_____" ], [ "# Operación multiplicación(*)\n5 * 8", "_____no_output_____" ], [ "# Operación división(/)\n6 / 7", "_____no_output_____" ], [ "# Operación división entera(//)\n5 // 2", "_____no_output_____" ], [ "# Operación módulo(%)\n5 % 2", "_____no_output_____" ], [ "# Exponenciación(**)\n2**5", "_____no_output_____" ], [ "# Bitwise XOR (^)\n## 2 == 010\n## 5 == 101\n## 2^5 == 111 == 1 * 2**2 + 1 * 2**1 + 1 * 2**0 == 7\n2^5", "_____no_output_____" ] ], [ [ "El orden en que se efectúan las operaciones es justo como nos lo enseñaron en primaria/secundaria:\n\n- PEMDAS: Parentesis, Exponentes, Multiplicación/División, Adición/Sustracción.\n\nAnte la duda siempre usar paréntesis.", "_____no_output_____" ] ], [ [ "# Ejemplo de altura con sombrero\naltura_sombrero_cm = 20\nmi_altura_cm = 183", "_____no_output_____" ], [ "# Que tan alto soy cuando me pongo sombrero?\naltura_total_metros = altura_sombrero_cm + mi_altura_cm / 100\nprint(\"Altura total en metros =\", altura_total_metros, \"?\")", "Altura total en metros = 21.83 ?\n" ], [ "# Que tan alto soy cuando me pongo sombrero?\naltura_total_metros = (altura_sombrero_cm + mi_altura_cm) / 100\nprint(\"Altura total en metros =\", altura_total_metros)", "Altura total en metros = 2.03\n" ], [ "import this", "The Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n" ] ], [ [ "### 1.2.1 Funciones para trabajar con números", "_____no_output_____" ], [ "`min()` y `max()` devuelven el mínimo y el máximo de sus argumentos, respectivamente...", "_____no_output_____" ] ], [ [ "# min\nmin(1, 8, -5, 4.4, 4.89)", "_____no_output_____" ], [ "# max\nmax(1, 8, -5, 4.4, 4.89)", "_____no_output_____" ] ], [ [ "`abs()` devuelve el valor absoluto de su argumeto:", "_____no_output_____" ] ], [ [ "# abs\nabs(5), abs(-5)", "_____no_output_____" ] ], [ [ "Aparte de ser tipos de variable, `float()` e `int()` pueden ser usados como función para convertir su argumento al tipo especificado (esto lo veremos mejor cuando veamos programación orientada a objetos):", "_____no_output_____" ] ], [ [ "print(float(10))\nprint(int(3.33))\n# They can even be called on strings!\nprint(int('807') + 1)", "10.0\n3\n808\n" ], [ "int(8.99999)", "_____no_output_____" ] ], [ [ "___", "_____no_output_____" ], [ "# 2. Funciones y ayuda en Python", "_____no_output_____" ], [ "## 2.1 Pidiendo ayuda", "_____no_output_____" ], [ "Ya vimos algunas funciones en la sección anterior (`print()`, `abs()`, `min()`, `max()`), pero, ¿y si se nos olvida que hace alguna de ellas?\n\nQue no pande el cúnico, ahí estará siempre la función `help()` para venir al rescate...", "_____no_output_____" ] ], [ [ "# Usar la función help sobre la función round\nhelp(round)", "Help on built-in function round in module builtins:\n\nround(number, ndigits=None)\n Round a number to a given precision in decimal digits.\n \n The return value is an integer if ndigits is omitted or None. Otherwise\n the return value has the same type as the number. ndigits may be negative.\n\n" ], [ "help(max)", "Help on built-in function max in module builtins:\n\nmax(...)\n max(iterable, *[, default=obj, key=func]) -> value\n max(arg1, arg2, *args, *[, key=func]) -> value\n \n With a single iterable argument, return its biggest item. The\n default keyword-only argument specifies an object to return if\n the provided iterable is empty.\n With two or more arguments, return the largest argument.\n\n" ], [ "# Función round\nround(8.99999)", "_____no_output_____" ], [ "round(8.99999, 2)", "_____no_output_____" ], [ "round(146, -2)", "_____no_output_____" ] ], [ [ "### ¡CUIDADO!\nA la función `help()` se le pasa como argumento el nombre de la función, **no la función evaluada**.\n\nSi se le pasa la función evaluada, `help()` dará la ayuda sobre el resultado de la función y no sobre la función como tal.\n\nPor ejemplo,", "_____no_output_____" ] ], [ [ "# Help de una función\nhelp(round)", "Help on built-in function round in module builtins:\n\nround(number, ndigits=None)\n Round a number to a given precision in decimal digits.\n \n The return value is an integer if ndigits is omitted or None. Otherwise\n the return value has the same type as the number. ndigits may be negative.\n\n" ], [ "a = round(10.85)", "_____no_output_____" ], [ "type(a)", "_____no_output_____" ], [ "# Help de una función evaluada\nhelp(round(10.85))", "Help on int object:\n\nclass int(object)\n | int([x]) -> integer\n | int(x, base=10) -> integer\n | \n | Convert a number or string to an integer, or return 0 if no arguments\n | are given. If x is a number, return x.__int__(). For floating point\n | numbers, this truncates towards zero.\n | \n | If x is not a number or if base is given, then x must be a string,\n | bytes, or bytearray instance representing an integer literal in the\n | given base. The literal can be preceded by '+' or '-' and be surrounded\n | by whitespace. The base defaults to 10. Valid bases are 0 and 2-36.\n | Base 0 means to interpret the base from the string as an integer literal.\n | >>> int('0b100', base=0)\n | 4\n | \n | Built-in subclasses:\n | bool\n | \n | Methods defined here:\n | \n | __abs__(self, /)\n | abs(self)\n | \n | __add__(self, value, /)\n | Return self+value.\n | \n | __and__(self, value, /)\n | Return self&value.\n | \n | __bool__(self, /)\n | self != 0\n | \n | __ceil__(...)\n | Ceiling of an Integral returns itself.\n | \n | __divmod__(self, value, /)\n | Return divmod(self, value).\n | \n | __eq__(self, value, /)\n | Return self==value.\n | \n | __float__(self, /)\n | float(self)\n | \n | __floor__(...)\n | Flooring an Integral returns itself.\n | \n | __floordiv__(self, value, /)\n | Return self//value.\n | \n | __format__(self, format_spec, /)\n | Default object formatter.\n | \n | __ge__(self, value, /)\n | Return self>=value.\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __getnewargs__(self, /)\n | \n | __gt__(self, value, /)\n | Return self>value.\n | \n | __hash__(self, /)\n | Return hash(self).\n | \n | __index__(self, /)\n | Return self converted to an integer, if self is suitable for use as an index into a list.\n | \n | __int__(self, /)\n | int(self)\n | \n | __invert__(self, /)\n | ~self\n | \n | __le__(self, value, /)\n | Return self<=value.\n | \n | __lshift__(self, value, /)\n | Return self<<value.\n | \n | __lt__(self, value, /)\n | Return self<value.\n | \n | __mod__(self, value, /)\n | Return self%value.\n | \n | __mul__(self, value, /)\n | Return self*value.\n | \n | __ne__(self, value, /)\n | Return self!=value.\n | \n | __neg__(self, /)\n | -self\n | \n | __or__(self, value, /)\n | Return self|value.\n | \n | __pos__(self, /)\n | +self\n | \n | __pow__(self, value, mod=None, /)\n | Return pow(self, value, mod).\n | \n | __radd__(self, value, /)\n | Return value+self.\n | \n | __rand__(self, value, /)\n | Return value&self.\n | \n | __rdivmod__(self, value, /)\n | Return divmod(value, self).\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __rfloordiv__(self, value, /)\n | Return value//self.\n | \n | __rlshift__(self, value, /)\n | Return value<<self.\n | \n | __rmod__(self, value, /)\n | Return value%self.\n | \n | __rmul__(self, value, /)\n | Return value*self.\n | \n | __ror__(self, value, /)\n | Return value|self.\n | \n | __round__(...)\n | Rounding an Integral returns itself.\n | Rounding with an ndigits argument also returns an integer.\n | \n | __rpow__(self, value, mod=None, /)\n | Return pow(value, self, mod).\n | \n | __rrshift__(self, value, /)\n | Return value>>self.\n | \n | __rshift__(self, value, /)\n | Return self>>value.\n | \n | __rsub__(self, value, /)\n | Return value-self.\n | \n | __rtruediv__(self, value, /)\n | Return value/self.\n | \n | __rxor__(self, value, /)\n | Return value^self.\n | \n | __sizeof__(self, /)\n | Returns size in memory, in bytes.\n | \n | __sub__(self, value, /)\n | Return self-value.\n | \n | __truediv__(self, value, /)\n | Return self/value.\n | \n | __trunc__(...)\n | Truncating an Integral returns itself.\n | \n | __xor__(self, value, /)\n | Return self^value.\n | \n | as_integer_ratio(self, /)\n | Return integer ratio.\n | \n | Return a pair of integers, whose ratio is exactly equal to the original int\n | and with a positive denominator.\n | \n | >>> (10).as_integer_ratio()\n | (10, 1)\n | >>> (-10).as_integer_ratio()\n | (-10, 1)\n | >>> (0).as_integer_ratio()\n | (0, 1)\n | \n | bit_length(self, /)\n | Number of bits necessary to represent self in binary.\n | \n | >>> bin(37)\n | '0b100101'\n | >>> (37).bit_length()\n | 6\n | \n | conjugate(...)\n | Returns self, the complex conjugate of any int.\n | \n | to_bytes(self, /, length, byteorder, *, signed=False)\n | Return an array of bytes representing an integer.\n | \n | length\n | Length of bytes object to use. An OverflowError is raised if the\n | integer is not representable with the given number of bytes.\n | byteorder\n | The byte order used to represent the integer. If byteorder is 'big',\n | the most significant byte is at the beginning of the byte array. If\n | byteorder is 'little', the most significant byte is at the end of the\n | byte array. To request the native byte order of the host system, use\n | `sys.byteorder' as the byte order value.\n | signed\n | Determines whether two's complement is used to represent the integer.\n | If signed is False and a negative integer is given, an OverflowError\n | is raised.\n | \n | ----------------------------------------------------------------------\n | Class methods defined here:\n | \n | from_bytes(bytes, byteorder, *, signed=False) from builtins.type\n | Return the integer represented by the given array of bytes.\n | \n | bytes\n | Holds the array of bytes to convert. The argument must either\n | support the buffer protocol or be an iterable object producing bytes.\n | Bytes and bytearray are examples of built-in objects that support the\n | buffer protocol.\n | byteorder\n | The byte order used to represent the integer. If byteorder is 'big',\n | the most significant byte is at the beginning of the byte array. If\n | byteorder is 'little', the most significant byte is at the end of the\n | byte array. To request the native byte order of the host system, use\n | `sys.byteorder' as the byte order value.\n | signed\n | Indicates whether two's complement is used to represent the integer.\n | \n | ----------------------------------------------------------------------\n | Static methods defined here:\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | denominator\n | the denominator of a rational number in lowest terms\n | \n | imag\n | the imaginary part of a complex number\n | \n | numerator\n | the numerator of a rational number in lowest terms\n | \n | real\n | the real part of a complex number\n\n" ] ], [ [ "Intenten llamar la función `help()` sobre otras funciones a ver si se encuentran algo interesante...", "_____no_output_____" ] ], [ [ "# Help sobre print\nhelp(print)", "Help on built-in function print in module builtins:\n\nprint(...)\n print(value, ..., sep=' ', end='\\n', file=sys.stdout, flush=False)\n \n Prints the values to a stream, or to sys.stdout by default.\n Optional keyword arguments:\n file: a file-like object (stream); defaults to the current sys.stdout.\n sep: string inserted between values, default a space.\n end: string appended after the last value, default a newline.\n flush: whether to forcibly flush the stream.\n\n" ], [ "# Print\nprint(1, 'a', \"Hola, ¿Cómo están?\", sep=\"_este es un separador_\", end=\" \")\nprint(56)", "1_este es un separador_a_este es un separador_Hola, ¿Cómo están? 56\n" ] ], [ [ "## 2.2 Definiendo funciones\n\nLas funciones por defecto de Python son de mucha utilidad. Sin embargo, pronto nos daremos cuenta que sería más útil aún definir nuestras propias funciones para reutilizarlas cada vez que las necesitemos.\n\nPor ejemplo, creemos una función que dados tres números, devuelva la mínima diferencia absoluta entre ellos", "_____no_output_____" ] ], [ [ "# Explicar acá la forma de definir una función\ndef diferencia_minima(a, b, c):\n diff1 = abs(a - b)\n diff2 = abs(a - c)\n diff3 = abs(b - c)\n return min(diff1, diff2, diff3)", "_____no_output_____" ] ], [ [ "Las funciones comienzan con la palabra clave `def`, y el código indentado luego de los dos puntos `:` se corre cuando la función es llamada.\n\n`return` es otra parablra clave que sólo se asocia con funciones. Cuando Python se encuentra un `return`, termina la función inmediatamente y devuelve el valor que hay seguido del `return`.\n\n¿Qué hace específicamente la función que escribimos?", "_____no_output_____" ] ], [ [ "# Ejemplo: llamar la función unas 3 veces\ndiferencia_minima(7, -5, 8)", "_____no_output_____" ], [ "diferencia_minima(7.4, 7, 0)", "_____no_output_____" ], [ "diferencia_minima(7, 6, 8)", "_____no_output_____" ], [ "type(diferencia_minima)", "_____no_output_____" ] ], [ [ "Intentemos llamar `help` sobre la función", "_____no_output_____" ] ], [ [ "help(diferencia_minima)", "Help on function diferencia_minima in module __main__:\n\ndiferencia_minima(a, b, c)\n # Explicar acá la forma de definir una función\n\n" ] ], [ [ "Bueno, Python tampoco es tan listo como para leer código y entregar una buena descripción de la función. Esto es trabajo del diseñador de la función: incluir la documentación.\n\n¿Cómo se hace? (Recordar añadir un ejemplo)", "_____no_output_____" ] ], [ [ "# Copiar y pegar la función, pero esta vez, incluir documentación de la misma\ndef diferencia_minima(a, b, c):\n \"\"\"\n This function determines the minimum difference between the\n three arguments passed a, b, c.\n Example:\n >>> diferencia_minima(7, -5, 8)\n 1\n \"\"\"\n diff1 = abs(a - b)\n diff2 = abs(a - c)\n diff3 = abs(b - c)\n return min(diff1, diff2, diff3)", "_____no_output_____" ], [ "# Volver a llamar el help\nhelp(diferencia_minima)", "Help on function diferencia_minima in module __main__:\n\ndiferencia_minima(a, b, c)\n This function determines the minimum difference between the\n three arguments passed a, b, c.\n Example:\n >>> diferencia_minima(7, -5, 8)\n 1\n\n" ] ], [ [ "Muy bien. Ahora, podemos observar que podemos llamar esta función sobre diferentes números, incluso de diferentes tipos:\n\n- Si todos son enteros, entonces nos retornará un entero.\n- Si hay algún float, nos retornará un float.", "_____no_output_____" ] ], [ [ "# Todos enteros\ndiferencia_minima(1, 1, 4)", "_____no_output_____" ], [ "# Uno o más floats\ndiferencia_minima(0., 0., 1)", "_____no_output_____" ] ], [ [ "Sin embargo, no todas las entradas son válidas:", "_____no_output_____" ] ], [ [ "# String: TypeError\ndiferencia_minima('a', 'b', 'c')", "_____no_output_____" ] ], [ [ "### 2.2.1 Funciones que no devuelven\n\n¿Qué pasa si no incluimos el `return` en nuestra función?", "_____no_output_____" ] ], [ [ "# Ejemplo de función sin return\ndef imprimir(a):\n print(a)", "_____no_output_____" ], [ "# Llamar la función un par de veces\nimprimir('Hola a todos')", "Hola a todos\n" ], [ "var = imprimir(\"Hola a todos\")", "Hola a todos\n" ], [ "print(var)", "None\n" ], [ "def write_file(a):\n with open(\"file.txt\", 'w') as f:\n f.write(a)", "_____no_output_____" ], [ "write_file(\"Hola a todos\")", "_____no_output_____" ] ], [ [ "### 2.2.2 Argumentos por defecto\n\nModificar la función `saludo` para que tenga un argumento por defecto.", "_____no_output_____" ] ], [ [ "# Función saludo con argumento por defecto\ndef greetings(name=\"Ashwin\"):\n # print(f\"Welcome, {name}!\")\n # print(\"Welcome, \" + name + \"!\")\n # print(\"Welcome, \", name, \"!\", sep=\"\")\n print(\"Welcome, {}!\".format(name))\n # print(\"Welcome, %s!\" %name)", "_____no_output_____" ], [ "greetings(\"Alejandro\")", "Welcome, Alejandro!\n" ], [ "greetings()", "Welcome, Ashwin!\n" ] ], [ [ "___\n\n# 3. Booleanos y condicionales", "_____no_output_____" ], [ "## 3.1 Booleanos\n\nPython tiene un tipo de objetos de tipo `bool` los cuales pueden tomar uno de dos valores: `True` o `False`.\n\nEjemplo:", "_____no_output_____" ] ], [ [ "x = True\nprint(x)\nprint(type(x))", "True\n<class 'bool'>\n" ] ], [ [ "Normalmente no ponemos `True` o `False` directamente en nuestro código, sino que más bien los obtenemos luego de una operación booleana (operaciones que dan como resultado `True` o `False`).\n\nEjemplos de operaciones:", "_____no_output_____" ] ], [ [ "# ==\n3 == 3.", "_____no_output_____" ], [ "# !=\n2.99999 != 3", "_____no_output_____" ], [ "# <\n8 < 5", "_____no_output_____" ], [ "# >\n8 > 5", "_____no_output_____" ], [ "# <=\n4 <= 4", "_____no_output_____" ], [ "# >=\n5 >= 8", "_____no_output_____" ] ], [ [ "**Nota:** hay una diferencia enorme entre `==` e `=`. Con el primero estamos preguntando acerca del valor (`n==2`: ¿es `n` igual a `2`?), mientras que con el segundo asignamos un valor (`n=2`: `n` guarda el valor de `2`).", "_____no_output_____" ], [ "Ejemplo: escribir una función que dado un número nos diga si es impar", "_____no_output_____" ] ], [ [ "# Función para encontrar números impares\ndef odd(num_int):\n return (num_int % 2) != 0", "_____no_output_____" ], [ "def odd(num_int):\n if (num_int % 2) != 0:\n return True\n return False", "_____no_output_____" ], [ "# Probar la función\nodd(5), odd(32)", "_____no_output_____" ], [ "(5, 4, 3) == ((5, 4, 3))", "_____no_output_____" ] ], [ [ "### 3.1.1 Combinando valores booleanos\n\nPython también nos provee operadores básicos para operar con valores booleanos: `and`, `or`, y `not`.\n\nPor ejemplo, podemos definir una función para ver si vale la pena llegar a la taquería de la esquina:", "_____no_output_____" ] ], [ [ "# Función: ¿vale la pena ir a la taquería? distancia, clima, paraguas ...\ndef vale_la_pena_ir_taqueria(distancia, clima, paraguas):\n return (distancia <= 100) and (clima != 'lluvioso' or paraguas == True)", "_____no_output_____" ], [ "# Probar función\nvale_la_pena_ir_taqueria(distancia=50,\n clima=\"soleado\",\n paraguas=False)", "_____no_output_____" ], [ "vale_la_pena_ir_taqueria(distancia=50,\n clima=\"lluvioso\",\n paraguas=False)", "_____no_output_____" ] ], [ [ "También podemos combinar más de dos valores: ¿cuál es el resultado de la siguiente expresión?", "_____no_output_____" ] ], [ [ "(True or True) and False", "_____no_output_____" ] ], [ [ "Uno puede tratar de memorizarse el orden de las operaciones lógicas, así como el de las aritméticas. Sin embargo, en línea con la filosofía de Python, el uso de paréntesis enriquece mucho la legibilidad y no quedan lugares a dudas.\n\nLos siguientes códigos son equivalentes, pero, ¿cuál se lee mejor?", "_____no_output_____" ] ], [ [ "have_umbrella = True\nrain_level = 4\nhave_hood = True\nis_workday = False", "_____no_output_____" ], [ "prepared_for_weather = have_umbrella or rain_level < 5 and have_hood or not rain_level > 0 and is_workday\nprepared_for_weather", "_____no_output_____" ], [ "prepared_for_weather = have_umbrella or (rain_level < 5 and have_hood) or not (rain_level > 0 and is_workday)\nprepared_for_weather", "_____no_output_____" ], [ "prepared_for_weather = have_umbrella or ((rain_level < 5) and have_hood) or (not (rain_level > 0 and is_workday))\nprepared_for_weather", "_____no_output_____" ], [ "prepared_for_weather = (\n have_umbrella \n or ((rain_level < 5) and have_hood) \n or (not (rain_level > 0 and is_workday))\n)\nprepared_for_weather", "_____no_output_____" ] ], [ [ "___\n\n## 3.2 Condicionales\n\nAunque los booleanos son útiles en si, dan su verdadero salto a la fama cuando se combinan con cláusulas condicionales, usando las palabras clave `if`, `elif`, y `else`.\n\nLos condicionales nos permiten ejecutar ciertas partes de código dependiendo de alguna condición booleana:", "_____no_output_____" ] ], [ [ "# Función de inspección de un número\ndef inspeccion(num):\n if num == 0:\n print('El numero', num, 'es cero')\n elif num > 0:\n print('El numero', num, 'es positivo')\n elif num < 0:\n print('El numero', num, 'es negativo')\n else:\n print('Nunca he visto un numero como', num)", "_____no_output_____" ], [ "# Probar la función\ninspeccion(1), inspeccion(-1), inspeccion(0)", "El numero 1 es positivo\nEl numero -1 es negativo\nEl numero 0 es cero\n" ] ], [ [ "- `if` y `else` se utilizan justo como en otros lenguajes. \n\n- Por otra parte, la palabra clave `elif` es una contracción de \"else if\". \n\n- El uso de `elif` y de `else` son opcionales.\n\n- Adicionalmente, se pueden incluir tantos `elif` como se requieran.\n\nComo en las funciones, el bloque de código correspondiente al condicional empieza luego de los dos puntos (`:`), y lo que sigue está indentado 4 espacios (tabulador). Pertenece al condicional todo lo que esté indentado hasta que encontremos una línea sin indentación.\n\nPor ejemplo, analicemos la siguiente función:", "_____no_output_____" ] ], [ [ "def f(x):\n if x > 0:\n print(\"Only printed when x is positive; x =\", x)\n print(\"Also only printed when x is positive; x =\", x)\n print(\"Always printed, regardless of x's value; x =\", x)", "_____no_output_____" ], [ "f(-1)", "Always printed, regardless of x's value; x = -1\n" ] ], [ [ "### 3.2.1 Conversión a booleanos\n\nYa vimos que la función `int()` convierte sus argumentos en enteros, y `float()` los convierte en números de punto flotante.\n\nDe manera similar `bool()` convierte sus argumentos en booleanos.", "_____no_output_____" ] ], [ [ "print(bool(1)) # Todos los números excepto el cero 0 se tratan como True\nprint(bool(0))\nprint(bool(\"asf\")) # Todos los strings excepto el string vacío \"\" se tratan como True\nprint(bool(\"\")) # No confundir el string vacío \"\" con un espacio \" \"", "True\nFalse\nTrue\nFalse\n" ], [ "bool(\" \")", "_____no_output_____" ] ], [ [ "Por ejemplo, ¿qué imprime el siguiente código?", "_____no_output_____" ] ], [ [ "if 0:\n print(0)\nelif \"tocino\":\n print(\"tocino\")", "tocino\n" ] ], [ [ "Las siguientes celdas son equivalentes. Sin embargo, por la legibilidad preferimos la primera:", "_____no_output_____" ] ], [ [ "x = 10", "_____no_output_____" ], [ "if x != 0:\n print('Estoy contento')\nelse:\n print('No estoy tan contento')", "_____no_output_____" ], [ "if x:\n print('Estoy contento')\nelse:\n print('No estoy tan contento')", "_____no_output_____" ] ], [ [ "### 3.2.2 Expresiones condicionales\n\nEs muy común que una variable pueda tener dos valores, dependiendo de alguna condición:", "_____no_output_____" ] ], [ [ "# Función para ver si pasó o no dependiendo de la nota\ndef mensaje_calificacion(nota):\n \"\"\"\n Esta función imprime si pasaste o no de acuerdo a la nota obtenida.\n La minima nota aprobatoria es de 6.\n \n >>> mensaje_calificacion(9)\n Pasaste la materia, con una nota de 9\n \n >>> mensaje_calificacion(5)\n Reprobaste la materia, con una nota de 5\n \"\"\"\n if nota >= 6:\n print('Pasaste la materia, con una nota de', nota)\n else:\n print('Reprobaste la materia, con una nota de', nota)", "_____no_output_____" ], [ "mensaje_calificacion(5)", "_____no_output_____" ], [ "mensaje_calificacion(7)", "_____no_output_____" ], [ "mensaje_calificacion(10)", "_____no_output_____" ] ], [ [ "Por otra parte, Python permite escribir este tipo de expresiones en una sola línea, lo que resulta muy últil y muy legible:", "_____no_output_____" ] ], [ [ "# Función para ver si pasó o no dependiendo de la nota\ndef mensaje_calificacion(nota):\n \"\"\"\n Esta función imprime si pasaste o no de acuerdo a la nota obtenida.\n \n >>> mensaje_calificacion(9)\n Pasaste la materia, con una nota de 9\n \n >>> mensaje_calificacion(5)\n Reprobaste la materia, con una nota de 5\n \"\"\"\n resultado = 'Pasaste' if nota >= 6 else 'Reprobaste'\n print(resultado + ' la materia, con una nota de', nota)", "_____no_output_____" ], [ "mensaje_calificacion(5)", "_____no_output_____" ], [ "mensaje_calificacion(7)", "_____no_output_____" ] ], [ [ "___\nHoy vimos:\n- La sintaxis básica de Python, los tipos de variable int, float y str, y algunas funciones básicas.\n- Cómo pedir ayuda de las funciones, y como construir nuestras propias funciones.\n- Variables Booleanas y condicionales.\n\nPara la próxima clase:\n- Tarea 1 para el miércoles (23:59).", "_____no_output_____" ], [ "<script>\n $(document).ready(function(){\n $('div.prompt').hide();\n $('div.back-to-top').hide();\n $('nav#menubar').hide();\n $('.breadcrumb').hide();\n $('.hidden-print').hide();\n });\n</script>\n\n<footer id=\"attribution\" style=\"float:right; color:#808080; background:#fff;\">\nCreated with Jupyter by jfraustro.\n</footer>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
4a6ca420d18915e248453d21f966e761e6a9bc91
88,081
ipynb
Jupyter Notebook
fnmtf.ipynb
lucasbrunialti/biclustering-experiments
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
[ "BSD-2-Clause" ]
3
2017-11-21T08:21:32.000Z
2020-03-10T14:57:06.000Z
fnmtf.ipynb
lucasbrunialti/biclustering-experiments
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
[ "BSD-2-Clause" ]
null
null
null
fnmtf.ipynb
lucasbrunialti/biclustering-experiments
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
[ "BSD-2-Clause" ]
4
2017-01-18T18:10:37.000Z
2021-12-15T02:23:15.000Z
147.046745
34,018
0.815272
[ [ [ "import numpy as np\nimport random\nimport pandas as pd\nimport sklearn\n\nfrom matplotlib import pyplot as plt\nplt.rcParams['figure.figsize'] = (10.0, 8.0)\n\nfrom sklearn.datasets import make_biclusters\nfrom sklearn.datasets import samples_generator as sg\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn import preprocessing\n# from sklearn.cluster.bicluster import SpectralCoclustering\nfrom sklearn.metrics import consensus_score\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\nfrom sklearn.metrics.cluster import adjusted_rand_score\nfrom biclustering import DeltaBiclustering, MSR", "_____no_output_____" ], [ "%pylab inline", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "def generate_dataset(option, noise=1, noise_background=True, shuffle=False):\n \"\"\"\n This function generates syntetic datasets as described in the paper\n (http://cs-people.bu.edu/panagpap/Research/Bio/bicluster_survey.pdf)\n - Figure 4.\n \n Params\n option (str): bicluster structure ('a' to 'i')\n noise (int): value of the noise in the matrix\n noise_background (bool): positions where is not a bicluster should contain noise\n if this parameter is set to True\n shuffle (bool): shuffle lines and columns of the matrix if this parameter is set\n to True\n\n Returns\n data (array_like): matrix generated\n \"\"\"\n shape = (150,150)\n n,m = shape\n \n # values shouldn't be a lot far...\n centers = [20, 40, 60, 80, 100]\n \n y_row = np.zeros(150)\n y_col = np.zeros(150)\n\n if noise_background:\n data = np.random.rand(n, m)*100\n else:\n data = np.zeros(n*m).reshape(shape)\n\n if option == 'a':\n data[60:110][:,70:140] = np.random.rand(50,70)*noise + centers[0]\n y_row[60:110] += 1\n y_col[70:140] += 1\n elif option == 'd':\n data[0:50][:,0:70] = np.random.rand(50,70)*noise + centers[0]\n y_row[0:50] += 1\n y_col[0:70] += 1\n\n data[50:100][:,50:100] = np.random.rand(50,50)*noise + centers[2]\n y_row[50:100] += 2\n y_col[50:100] += 2\n\n data[100:150][:,80:150] = np.random.rand(50,70)*noise + centers[1]\n y_row[100:150] += 3\n y_col[80:150] += 3\n elif option == 'e':\n data[0:70][:,0:50] = np.random.rand(70,50)*noise + centers[3]\n y_row[0:70] += 1\n y_col[0:50] += 1\n\n data[50:100][:,50:100] = np.random.rand(50,50)*noise + centers[1]\n y_row[50:100] += 2\n y_col[50:100] += 2\n\n data[80:150][:,100:150] = np.random.rand(70,50)*noise + centers[2]\n y_row[80:150] += 3\n y_col[100:150] += 3\n elif option == 'f':\n data[0:50][:,0:40] = np.random.rand(50,40)*noise + centers[4]\n y_row[0:50] += 1\n y_col[0:40] += 1\n\n data[50:150][:,0:40] = np.random.rand(100,40)*noise + centers[0]\n y_row[50:150] += 2\n\n data[110:150][:,40:95] = np.random.rand(40,55)*noise + centers[2]\n y_row[110:150] += 3\n y_col[40:95] += 2\n\n data[110:150][:,95:150] = np.random.rand(40,55)*noise + centers[1]\n y_row[110:150] += 3\n y_col[95:150] += 3\n elif option == 'g':\n data[0:110][:,0:40] = np.random.rand(110,40)*noise + centers[0]\n data[110:150][:,0:110] = np.random.rand(40,110)*noise + centers[2]\n data[40:150][:,110:150] = np.random.rand(110,40)*noise + centers[1]\n data[0:40][:,40:150] = np.random.rand(40,110)*noise + centers[3]\n elif option == 'h':\n data[0:90][:,0:90] = np.random.rand(90,90)*noise + centers[0]\n data[35:55][:,35:55] = (np.random.rand(20,20)*noise + centers[1]) + data[35:55][:,35:55]\n data[110:140][:,35:90] = np.random.rand(30,55)*noise + centers[4]\n data[0:140][:,110:150] = np.random.rand(140,40)*noise + centers[2]\n data[0:55][:,130:150] = (np.random.rand(55,20)*noise + centers[3]) + data[0:55][:,130:150]\n elif option == 'i':\n data[20:70][:,20:70] = np.random.rand(50,50)*noise + centers[0]\n data[20:70][:,100:150] = np.random.rand(50,50)*noise + centers[1]\n data[50:110][:,50:120] = np.random.rand(60,70)*noise + centers[2]\n data[120:150][:,20:100] = np.random.rand(30,80)*noise + centers[3]\n\n if shuffle:\n np.random.shuffle(data)\n np.random.shuffle(data.T)\n\n return data, y_row, y_col", "_____no_output_____" ], [ "from numba import jit\n\n@jit(nopython=True)\ndef compute_U(S, V, m, k):\n V_tilde = np.dot(S, V.T)\n U_new = np.empty([m, k])\n for i in xrange(m):\n errors = np.empty(k)\n for row_clust_ind in xrange(k):\n errors[row_clust_ind] = np.sum((X[i][:] - V_tilde[row_clust_ind][:])**2)\n ind = np.argmin(errors)\n U_new[i][ind] = 1\n return U_new\n\ndef fnmtf(X, k, l, num_iter=10, norm=True):\n m, n = X.shape\n\n U = np.random.rand(m,k)\n S = np.random.rand(k,l)\n V = np.random.rand(n,l)\n\n if norm:\n X = preprocessing.normalize(X)\n for i in xrange(num_iter):\n S = pinv(U.T.dot(U)).dot(U.T).dot(X).dot(V).dot(pinv(V.T.dot(V)))\n\n # solve subproblem to update V\n U_tilde = U.dot(S)\n V_new = np.zeros(n*l).reshape(n, l)\n for j in range(n):\n errors = np.zeros(l)\n for col_clust_ind in xrange(l):\n errors[col_clust_ind] = ((X[:][:, j] - U_tilde[:][:, col_clust_ind])**2).sum()\n ind = np.argmin(errors)\n V_new[j][ind] = 1\n V = V_new\n\n# while np.linalg.det(V.T.dot(V)) <= 0:\n# erros = (X - U.dot(S).dot(V.T)) ** 2\n# erros = np.sum(erros.dot(V), axis=0) / np.sum(V, axis=0)\n# erros[np.where(np.sum(V, axis=0) <= 1)] = -inf\n# quantidade = np.sum(V, axis=0)\n# indexMin = np.argmin(quantidade)\n# indexMax = np.argmax(erros)\n# indexes = np.nonzero(V[:, indexMax])[0]\n# for j in indexes:\n# if np.random.rand(1) > 0.5:\n# V[j, indexMax] = 0\n# V[j, indexMin] = 1\n\n # solve subproblem to update U\n U = compute_U(S, V, m, k)\n\n# while np.linalg.det(U.T.dot(U)) <= 0:\n# erros = (X - U.dot(V_tilde)) ** 2\n# erros = np.sum(U.T.dot(erros), axis=1) / np.sum(U, axis=0)\n# erros[np.where(np.sum(U, axis=0) <= 1)] = -np.inf\n# quantidade = np.sum(U, axis=0)\n# indexMin = np.argmin(quantidade)\n# indexMax = np.argmax(erros)\n# indexes = np.nonzero(U[:, indexMax])[0]\n\n# end = len(indexes)\n# indexes_p = np.random.permutation(end)\n# U[indexes[indexes_p[0:np.floor(end/2.0)]], indexMax] = 0.0\n# U[indexes[indexes_p[0:np.floor(end/2.0)]], indexMin] = 1.0\n\n rows_ind = np.argmax(U, axis=1)\n cols_ind = np.argmax(V, axis=1)\n\n return U, S, V, rows_ind, cols_ind", "_____no_output_____" ], [ "# m, n = (40, 35)\n# X = .01 * np.random.rand(m,n)\n# X[0:10][:, 0:10] = 1 + .01 * np.random.random()\n# X[30:40][:, 20:35] = 1 + .01 * np.random.random()\n# X[20:30][:, 20:35] = .6 + .01 * np.random.random()\n# X[30:40][:, 36:40] = 1 + .01 * np.random.random()\n\n# m, n = (6, 8)\n# X = .01 * np.random.rand(m,n)\n# X[0:2][:, 0:4] = 1 + .01 * np.random.random()\n# X[2:4][:, 4:8] = .6 + .01 * np.random.random()\n# X[4:6][:, 0:8] = .8 + .01 * np.random.random()\n\nplt.matshow(X, cmap=plt.cm.Blues)\nplt.title('Original data')\nplt.grid()\nplt.show()", "_____no_output_____" ], [ "U, S, V, rows_ind, cols_ind = fnmtf(X, 3, 2, norm=False)\n\ndef plot_factorization_result(U, S, V):\n fig = plt.figure()\n \n ax = fig.add_subplot(2, 2, 1)\n ax.matshow(U.dot(S).dot(V.T), cmap=plt.cm.Blues)\n ax.set_title('reconstruction')\n ax.grid()\n \n ax2 = fig.add_subplot(2, 2, 2)\n ax2.matshow(U, cmap=plt.cm.Blues)\n ax2.set_title('U*S')\n ax2.grid()\n\n ax3 = fig.add_subplot(2, 2, 3)\n ax3.matshow(S, cmap=plt.cm.Blues)\n ax3.set_title('S')\n ax3.grid()\n\n ax4 = fig.add_subplot(2, 2, 4)\n ax4.matshow(V.T, cmap=plt.cm.Blues)\n ax4.set_title('S*V\\'')\n ax4.grid()\n \n plt.show()\n\n\ndef scores(labels_true, labels_pred, row=True):\n if row:\n print 'Rows scores'\n else:\n print 'Cols scores'\n print 'Random score: %s' % adjusted_rand_score(labels_true, labels_pred)\n print 'Normalized mutual information score: %s' % normalized_mutual_info_score(labels_true, labels_pred)\n print ''\n\nplot_factorization_result(U, S, V)\nscores(rows_ind, [0, 0, 1, 1, 2, 2])\nscores(cols_ind, [0, 0, 0, 0, 1, 1, 1, 1], row=False)", "_____no_output_____" ], [ "X, x_labels, y_labels = generate_dataset('d', noise_background=False, shuffle=False)\ntemp, _, _ = generate_dataset('d', noise_background=False)\n\nfig = plt.figure()\n \nax1 = fig.add_subplot(1, 2, 1)\nax1.matshow(temp, cmap=plt.cm.Blues)\nax1.set_title('Original data')\nax1.grid()\n\nax2 = fig.add_subplot(1, 2, 2)\nax2.matshow(X, cmap=plt.cm.Blues)\nax2.set_title('Shuffled data')\nax2.grid()\n\nplt.show()", "_____no_output_____" ], [ "import time\nt1 = time.time()\nU, S, V, rows_ind, cols_ind = fnmtf(X, 3, 3, norm=False)\nt2 = time.time()\nprint ('dt: {} secs'.format(t2-t1))\nplot_factorization_result(U, S, V)\nscores(rows_ind, x_labels)\nscores(cols_ind, y_labels, row=False)", "dt: 0.0820469856262 secs\n" ], [ "%load_ext Cython", "_____no_output_____" ], [ "%%cython\n\nimport cython\ncimport cython\n\nimport numpy as np\ncimport numpy as np\n\[email protected](False)\[email protected](False)\[email protected](False)\ndef fnmtf_improved(double[:, ::1] X, int k, int l, int num_iter=100, int norm=0):\n cdef int m = X.shape[0]\n cdef int n = X.shape[1]\n\n cdef unsigned int i = 0\n cdef unsigned int j = 0\n cdef unsigned int iter_index = 0\n cdef unsigned int row_clust_ind = 0\n cdef unsigned int col_clust_ind = 0\n cdef unsigned int ind = 0\n\n cdef double[:, ::1] U = np.random.rand(m, k).astype(np.float64)\n cdef double[:, ::1] U_best = np.random.rand(m, k).astype(np.float64)\n cdef double[:, ::1] S = np.random.rand(k, l).astype(np.float64)\n cdef double[:, ::1] S_best = np.random.rand(k, l).astype(np.float64)\n cdef double[:, ::1] V = np.random.rand(n, l).astype(np.float64)\n cdef double[:, ::1] V_best = np.random.rand(n, l).astype(np.float64)\n\n cdef double[:, ::1] U_tilde = np.empty((m, l), dtype=np.float64)\n cdef double[:, ::1] V_new = np.empty((n, l), dtype=np.float64)\n\n cdef double[:, ::1] V_tilde = np.empty((l, n), dtype=np.float64)\n cdef double[:, ::1] U_new = np.empty((m, k), dtype=np.float64)\n\n cdef double error_best = 10e9999\n cdef double error = 10e9999\n cdef double[:] errors_v = np.zeros(l, dtype=np.float64)\n cdef double[:] errors_u = np.zeros(k, dtype=np.float64)\n\n for iter_index in range(num_iter):\n S[:, :] = np.dot( np.dot(np.linalg.pinv(np.dot(U.T, U)), np.dot(np.dot(U.T, X), V)), np.linalg.pinv(np.dot(V.T, V)) )\n\n # solve subproblem to update V\n U_tilde[:, :] = np.dot(U, S)\n V_new[:, :] = np.empty((n, l), dtype=np.int)\n for j in range(n):\n errors_v = np.zeros(l, dtype=np.float64)\n for col_clust_ind in range(l):\n errors_v[col_clust_ind] = np.sum(np.square(np.subtract(X[:, j], U_tilde[:, col_clust_ind])))\n ind = np.argmin(errors_v)\n V_new[j, ind] = 1.0\n V[:, :] = V_new\n\n # solve subproblem to update U\n V_tilde[:, :] = np.dot(S, V.T)\n U_new[:, :] = np.empty((m, k), dtype=np.int)\n for i in range(m):\n errors_u = np.zeros(k, dtype=np.float64)\n for row_clust_ind in range(k):\n errors_u[row_clust_ind] = np.sum(np.square(np.subtract(X[i, :], V_tilde[row_clust_ind, :])))\n ind = np.argmin(errors_u)\n U_new[i, ind] = 1.0\n U[:, :] = U_new\n\n error_ant = error\n error = np.sum(np.square(np.subtract(X, np.dot(np.dot(U, S), V.T))))\n\n if error < error_best:\n U_best[:, :] = U\n S_best[:, :] = S\n V_best[:, :] = V\n error_best = error", "_____no_output_____" ], [ "import time\nX, x_labels, y_labels = generate_dataset('d', noise_background=False, shuffle=False)\nt1 = time.time()\nU, S, V, rows_ind, cols_ind = fnmtf_improved(X, 3, 3)\nt2 = time.time()\nprint ('dt: {} secs'.format(t2-t1))\nplot_factorization_result(U, S, V)\nscores(rows_ind, x_labels)\nscores(cols_ind, y_labels, row=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6cb70457dedd621b4526e5f076151a0305a0b9
39,175
ipynb
Jupyter Notebook
Notebooks/Uncategorized/Gender Names.ipynb
nealcaren/Text-Mining-with-Python-Notebooks
aebcd5b0ae515dbb34e9c4de34aa6dc265517638
[ "MIT" ]
2
2020-05-15T17:02:48.000Z
2021-12-14T23:22:38.000Z
Notebooks/name-genders.ipynb
nealcaren/Text-Mining-with-Python-Notebooks
aebcd5b0ae515dbb34e9c4de34aa6dc265517638
[ "MIT" ]
null
null
null
Notebooks/name-genders.ipynb
nealcaren/Text-Mining-with-Python-Notebooks
aebcd5b0ae515dbb34e9c4de34aa6dc265517638
[ "MIT" ]
null
null
null
56.044349
16,472
0.696056
[ [ [ "## Assigning gender based on first name\n\nA straightforward task in natural language processing is to assign gender based on first name. Social scientists are often interested in gender inequalities and may have a dataset that lists name but not gender, such as a list of journal articles with authors in a study of gendered citation practices. \n\nAssigning gender based on name is usually done by comparing a given name with the name's gender distribution on official records, such as the US Social Security baby name list. While this works for most names, some names, such as Gershun or Hunna, are too rare to have reliable estimates based on most available official records. Other names, such as Jian or Blake, are common among both men and women. A fourth category of names are those which are dispropriately one gender or another, but do have non-trivial numbers of a different gender, such as Cody or Kyle. For both these names and androgynous names, their are often generational differences in the gendered distribution. \n\nThe most efficient way to gender names in Python is with the `gender_guesser` library, which is based on Jörg Michael's multinational list of more than 48,000 names. The first time you use the library, you may need to install it:\n\n`%pip install gender_guesser`", "_____no_output_____" ], [ "The `gender_guesser` library is set up so that first you import the gender function and then create a detector. In my case, the detector is named `d` and one parameter is passed, which instructors the detector to ignore capitalization.", "_____no_output_____" ] ], [ [ "import gender_guesser.detector as gender", "_____no_output_____" ], [ "d = gender.Detector(case_sensitive=False)", "_____no_output_____" ] ], [ [ "When passed a name, the detector's `get_gender` returns either 'male', 'female', 'mostly_male', 'mostly_female', 'andy' (for androgenous names), or 'unknown' (for names not in the dataset).", "_____no_output_____" ] ], [ [ "d.get_gender(\"Barack\")", "_____no_output_____" ], [ "d.get_gender(\"Theresa\")", "_____no_output_____" ], [ "d.get_gender(\"JAMIE\")", "_____no_output_____" ], [ "d.get_gender(\"sidney\")", "_____no_output_____" ], [ "d.get_gender(\"Tal\")", "_____no_output_____" ] ], [ [ "In almost all cases, you will want to analyze a large list of names, rather than a single name. For example, the University of North Carolina, Chapel Hill makes available salary information on employees. The dataset includes name, department, position salary and years of employment, but not gender. ", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ndf = pd.read_csv(\"data/unc_salaries.csv\")", "_____no_output_____" ], [ "df.head(10)", "_____no_output_____" ] ], [ [ "A column with name-based gender assignment can be created by applying `d.get_gender` to the first name column.", "_____no_output_____" ] ], [ [ "df[\"Gender\"] = df[\"First Name\"].apply(d.get_gender)", "_____no_output_____" ], [ "df[\"Gender\"].value_counts(normalize=True)", "_____no_output_____" ] ], [ [ "For this dataset, the majority of the names can be gendered, while less than ten percent of names are not in the dataset. ", "_____no_output_____" ], [ "Selecting the rows in the dataframe where gender is unknown and the listing the values can be useful for inspecting cases and evaluating the gender-name assignment process.", "_____no_output_____" ] ], [ [ "cases = df[\"Gender\"] == \"unknown\"\n\ndf[cases][\"First Name\"].values", "_____no_output_____" ] ], [ [ "My quick interpreation of this list is that it names that are certainly rare in the US, and some are likely transliterated using a non-common English spelling. The name with missing gender are not-random and the process of creating missingness is likely correlated with other variables of interest, such as salary. This might impact a full-analysis of gender patterns, but I'll ignore that in the preliminary analysis.", "_____no_output_____" ], [ "If you were conducted your analysis in another statistical package, you could export your dataframe with the new gender column.", "_____no_output_____" ] ], [ [ "df.to_csv(\"unc_salaries_gendered.csv\")", "_____no_output_____" ] ], [ [ "You could also produce some summary statistics in your notebook. For example, the pandas `groupby` method can be used to estimate median salary by gender.", "_____no_output_____" ] ], [ [ "df.groupby(\"Gender\")[\"Salary\"].median()", "_____no_output_____" ] ], [ [ "Comparing the male and female-coded names, this shows evidence of a large salary gap based on gender. The \"mostly\" and unknown categories are in the middle, but interesting the androgynous names are associated with the lowest salaries. ", "_____no_output_____" ], [ "Grouping by gender and position may be useful in understanding the mechanisms that produce the gender gap. I also focus on just the individuals with names that are coded as male or female. ", "_____no_output_____" ] ], [ [ "subset = df[\"Gender\"].isin([\"male\", \"female\"])\n\ndf[subset].groupby([\"Position\", \"Gender\"])[\"Salary\"].median()", "_____no_output_____" ] ], [ [ "This summary dataframe can also be plotted, which clearly shows that the median salary for male Assistant Professors is higher than the median salary of the higher ranked female Associate Professors.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\ndf[subset].groupby(['Position','Gender'])['Salary'].median().plot(kind='barh');", "_____no_output_____" ] ], [ [ "Sometimes the first name will not be it's own field, but included as part of the name column that includes the full name. In that case, you will need to create a function that extracts the first name. \n\nIn this dataframe, the `name` column is the last name, followed by a comma, and then the first name and possibly a middle name or initial. A brief function extracts the first name,", "_____no_output_____" ] ], [ [ "def gender_name(name):\n \"\"\"\n Extracts and genders first name when the original name is formatted \"Last, First M\". \n Assumes a gender.Detector named `d` is already declared. \n \"\"\"\n\n first_name = name.split(\", \")[-1] # grab the slide after the comma\n first_name = first_name.split(\" \")[0] # remove middle name/initial\n gender = d.get_gender(first_name)\n return gender", "_____no_output_____" ] ], [ [ "This function can now be applied to the full name column.", "_____no_output_____" ] ], [ [ "df[\"Gender\"] = df[\"Full Name\"].apply(gender_name)", "_____no_output_____" ], [ "df[\"Gender\"].value_counts()", "_____no_output_____" ] ], [ [ "The results are the same as original gender column.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a6cbb375d030adcbed8595eb033cca41078d676
1,657
ipynb
Jupyter Notebook
git_tutorials/git_tutorial.ipynb
a13x00v/gradient_nbs
2ffc21c598dc51c07f19f8bf38504ce6ca337afe
[ "Apache-2.0" ]
null
null
null
git_tutorials/git_tutorial.ipynb
a13x00v/gradient_nbs
2ffc21c598dc51c07f19f8bf38504ce6ca337afe
[ "Apache-2.0" ]
null
null
null
git_tutorials/git_tutorial.ipynb
a13x00v/gradient_nbs
2ffc21c598dc51c07f19f8bf38504ce6ca337afe
[ "Apache-2.0" ]
null
null
null
17.082474
65
0.471334
[ [ [ "# git clone https://github.com/noallynoclan/gradient_nbs\n# cd gradient_nbs\n# git status\n# git add -A\n# git commit -m \"...\"\n# git status\n# git log --graph --oneline\n# git push", "_____no_output_____" ], [ "# made some changes", "_____no_output_____" ], [ "# experimental changes", "_____no_output_____" ], [ "3 * 5", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a6cbb73cabe817187b7ccca5ab5cd60cc6faa8e
155,164
ipynb
Jupyter Notebook
Week 4 - Data Quality, Exploratory Data Analysis, and Machine Learning/lab3_text_analysis_and_entity_resolution_student.ipynb
moritzmeister/BigDataWithApacheSpark
67d88ae76ded1a296f9bb69e0607409bd3adb0fc
[ "MIT" ]
117
2015-09-14T05:07:48.000Z
2021-05-26T17:03:22.000Z
Week 4 - Data Quality, Exploratory Data Analysis, and Machine Learning/lab3_text_analysis_and_entity_resolution_student.ipynb
moritzmeister/BigDataWithApacheSpark
67d88ae76ded1a296f9bb69e0607409bd3adb0fc
[ "MIT" ]
null
null
null
Week 4 - Data Quality, Exploratory Data Analysis, and Machine Learning/lab3_text_analysis_and_entity_resolution_student.ipynb
moritzmeister/BigDataWithApacheSpark
67d88ae76ded1a296f9bb69e0607409bd3adb0fc
[ "MIT" ]
129
2015-09-02T18:42:29.000Z
2021-02-18T08:33:03.000Z
76.247666
56,344
0.754866
[ [ [ "version 1.0.3\n#![Spark Logo](http://spark-mooc.github.io/web-assets/images/ta_Spark-logo-small.png) + ![Python Logo](http://spark-mooc.github.io/web-assets/images/python-logo-master-v3-TM-flattened_small.png)\n# **Text Analysis and Entity Resolution**\n####Entity resolution is a common, yet difficult problem in data cleaning and integration. This lab will demonstrate how we can use Apache Spark to apply powerful and scalable text analysis techniques and perform entity resolution across two datasets of commercial products.", "_____no_output_____" ], [ "#### Entity Resolution, or \"[Record linkage][wiki]\" is the term used by statisticians, epidemiologists, and historians, among others, to describe the process of joining records from one data source with another that describe the same entity. Our terms with the same meaning include, \"entity disambiguation/linking\", duplicate detection\", \"deduplication\", \"record matching\", \"(reference) reconciliation\", \"object identification\", \"data/information integration\", and \"conflation\".\n#### Entity Resolution (ER) refers to the task of finding records in a dataset that refer to the same entity across different data sources (e.g., data files, books, websites, databases). ER is necessary when joining datasets based on entities that may or may not share a common identifier (e.g., database key, URI, National identification number), as may be the case due to differences in record shape, storage location, and/or curator style or preference. A dataset that has undergone ER may be referred to as being cross-linked.\n[wiki]: https://en.wikipedia.org/wiki/Record_linkage", "_____no_output_____" ], [ "### Code\n#### This assignment can be completed using basic Python, pySpark Transformations and actions, and the plotting library matplotlib. Other libraries are not allowed.\n### Files\n#### Data files for this assignment are from the [metric-learning](https://code.google.com/p/metric-learning/) project and can be found at:\n`cs100/lab3`\n#### The directory contains the following files:\n* **Google.csv**, the Google Products dataset\n* **Amazon.csv**, the Amazon dataset\n* **Google_small.csv**, 200 records sampled from the Google data\n* **Amazon_small.csv**, 200 records sampled from the Amazon data\n* **Amazon_Google_perfectMapping.csv**, the \"gold standard\" mapping\n* **stopwords.txt**, a list of common English words\n#### Besides the complete data files, there are \"sample\" data files for each dataset - we will use these for **Part 1**. In addition, there is a \"gold standard\" file that contains all of the true mappings between entities in the two datasets. Every row in the gold standard file has a pair of record IDs (one Google, one Amazon) that belong to two record that describe the same thing in the real world. We will use the gold standard to evaluate our algorithms.", "_____no_output_____" ], [ "### **Part 0: Preliminaries**\n#### We read in each of the files and create an RDD consisting of lines.\n#### For each of the data files (\"Google.csv\", \"Amazon.csv\", and the samples), we want to parse the IDs out of each record. The IDs are the first column of the file (they are URLs for Google, and alphanumeric strings for Amazon). Omitting the headers, we load these data files into pair RDDs where the *mapping ID* is the key, and the value is a string consisting of the name/title, description, and manufacturer from the record.\n#### The file format of an Amazon line is:\n `\"id\",\"title\",\"description\",\"manufacturer\",\"price\"`\n#### The file format of a Google line is:\n `\"id\",\"name\",\"description\",\"manufacturer\",\"price\"`", "_____no_output_____" ] ], [ [ "import re\nDATAFILE_PATTERN = '^(.+),\"(.+)\",(.*),(.*),(.*)'\n\ndef removeQuotes(s):\n \"\"\" Remove quotation marks from an input string\n Args:\n s (str): input string that might have the quote \"\" characters\n Returns:\n str: a string without the quote characters\n \"\"\"\n return ''.join(i for i in s if i!='\"')\n\n\ndef parseDatafileLine(datafileLine):\n \"\"\" Parse a line of the data file using the specified regular expression pattern\n Args:\n datafileLine (str): input string that is a line from the data file\n Returns:\n str: a string parsed using the given regular expression and without the quote characters\n \"\"\"\n match = re.search(DATAFILE_PATTERN, datafileLine)\n if match is None:\n print 'Invalid datafile line: %s' % datafileLine\n return (datafileLine, -1)\n elif match.group(1) == '\"id\"':\n print 'Header datafile line: %s' % datafileLine\n return (datafileLine, 0)\n else:\n product = '%s %s %s' % (match.group(2), match.group(3), match.group(4))\n return ((removeQuotes(match.group(1)), product), 1)", "_____no_output_____" ], [ "import sys\nimport os\nfrom test_helper import Test\n\nbaseDir = os.path.join('data')\ninputPath = os.path.join('cs100', 'lab3')\n\nGOOGLE_PATH = 'Google.csv'\nGOOGLE_SMALL_PATH = 'Google_small.csv'\nAMAZON_PATH = 'Amazon.csv'\nAMAZON_SMALL_PATH = 'Amazon_small.csv'\nGOLD_STANDARD_PATH = 'Amazon_Google_perfectMapping.csv'\nSTOPWORDS_PATH = 'stopwords.txt'\n\ndef parseData(filename):\n \"\"\" Parse a data file\n Args:\n filename (str): input file name of the data file\n Returns:\n RDD: a RDD of parsed lines\n \"\"\"\n return (sc\n .textFile(filename, 4, 0)\n .map(parseDatafileLine)\n .cache())\n\ndef loadData(path):\n \"\"\" Load a data file\n Args:\n path (str): input file name of the data file\n Returns:\n RDD: a RDD of parsed valid lines\n \"\"\"\n filename = os.path.join(baseDir, inputPath, path)\n raw = parseData(filename).cache()\n failed = (raw\n .filter(lambda s: s[1] == -1)\n .map(lambda s: s[0]))\n for line in failed.take(10):\n print '%s - Invalid datafile line: %s' % (path, line)\n valid = (raw\n .filter(lambda s: s[1] == 1)\n .map(lambda s: s[0])\n .cache())\n print '%s - Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (path,\n raw.count(),\n valid.count(),\n failed.count())\n assert failed.count() == 0\n assert raw.count() == (valid.count() + 1)\n return valid\n\ngoogleSmall = loadData(GOOGLE_SMALL_PATH)\ngoogle = loadData(GOOGLE_PATH)\namazonSmall = loadData(AMAZON_SMALL_PATH)\namazon = loadData(AMAZON_PATH)", "Google_small.csv - Read 201 lines, successfully parsed 200 lines, failed to parse 0 lines\nGoogle.csv - Read 3227 lines, successfully parsed 3226 lines, failed to parse 0 lines\nAmazon_small.csv - Read 201 lines, successfully parsed 200 lines, failed to parse 0 lines\nAmazon.csv - Read 1364 lines, successfully parsed 1363 lines, failed to parse 0 lines\n" ] ], [ [ "#### Let's examine the lines that were just loaded in the two subset (small) files - one from Google and one from Amazon", "_____no_output_____" ] ], [ [ "for line in googleSmall.take(3):\n print 'google: %s: %s\\n' % (line[0], line[1])\n\nfor line in amazonSmall.take(3):\n print 'amazon: %s: %s\\n' % (line[0], line[1])", "google: http://www.google.com/base/feeds/snippets/11448761432933644608: spanish vocabulary builder \"expand your vocabulary! contains fun lessons that both teach and entertain you'll quickly find yourself mastering new terms. includes games and more!\" \n\ngoogle: http://www.google.com/base/feeds/snippets/8175198959985911471: topics presents: museums of world \"5 cd-rom set. step behind the velvet rope to examine some of the most treasured collections of antiquities art and inventions. includes the following the louvre - virtual visit 25 rooms in full screen interactive video detailed map of the louvre ...\" \n\ngoogle: http://www.google.com/base/feeds/snippets/18445827127704822533: sierrahome hse hallmark card studio special edition win 98 me 2000 xp \"hallmark card studio special edition (win 98 me 2000 xp)\" \"sierrahome\"\n\namazon: b000jz4hqo: clickart 950 000 - premier image pack (dvd-rom) \"broderbund\"\n\namazon: b0006zf55o: ca international - arcserve lap/desktop oem 30pk \"oem arcserve backup v11.1 win 30u for laptops and desktops\" \"computer associates\"\n\namazon: b00004tkvy: noah's ark activity center (jewel case ages 3-8) \"victory multimedia\"\n\n" ] ], [ [ "### **Part 1: ER as Text Similarity - Bags of Words**\n#### A simple approach to entity resolution is to treat all records as strings and compute their similarity with a string distance function. In this part, we will build some components for performing bag-of-words text-analysis, and then use them to compute record similarity.\n#### [Bag-of-words][bag-of-words] is a conceptually simple yet powerful approach to text analysis.\n#### The idea is to treat strings, a.k.a. **documents**, as *unordered collections* of words, or **tokens**, i.e., as bags of words.\n> #### **Note on terminology**: a \"token\" is the result of parsing the document down to the elements we consider \"atomic\" for the task at hand. Tokens can be things like words, numbers, acronyms, or other exotica like word-roots or fixed-length character strings.\n> #### Bag of words techniques all apply to any sort of token, so when we say \"bag-of-words\" we really mean \"bag-of-tokens,\" strictly speaking.\n#### Tokens become the atomic unit of text comparison. If we want to compare two documents, we count how many tokens they share in common. If we want to search for documents with keyword queries (this is what Google does), then we turn the keywords into tokens and find documents that contain them. The power of this approach is that it makes string comparisons insensitive to small differences that probably do not affect meaning much, for example, punctuation and word order.\n[bag-of-words]: https://en.wikipedia.org/wiki/Bag-of-words_model", "_____no_output_____" ], [ "### **1(a) Tokenize a String**\n#### Implement the function `simpleTokenize(string)` that takes a string and returns a list of non-empty tokens in the string. `simpleTokenize` should split strings using the provided regular expression. Since we want to make token-matching case insensitive, make sure all tokens are turned lower-case. Give an interpretation, in natural language, of what the regular expression, `split_regex`, matches.\n#### If you need help with Regular Expressions, try the site [regex101](https://regex101.com/) where you can interactively explore the results of applying different regular expressions to strings. *Note that \\W includes the \"_\" character*. You should use [re.split()](https://docs.python.org/2/library/re.html#re.split) to perform the string split. Also, make sure you remove any empty tokens.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\nquickbrownfox = 'A quick brown fox jumps over the lazy dog.'\nsplit_regex = r'\\W+'\n\ndef simpleTokenize(string):\n \"\"\" A simple implementation of input string tokenization\n Args:\n string (str): input string\n Returns:\n list: a list of tokens\n \"\"\"\n return [item for item in re.split(split_regex, string.lower()) if item]\n\nprint simpleTokenize(quickbrownfox) # Should give ['a', 'quick', 'brown', ... ]", "['a', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']\n" ], [ "# TEST Tokenize a String (1a)\nTest.assertEquals(simpleTokenize(quickbrownfox),\n ['a','quick','brown','fox','jumps','over','the','lazy','dog'],\n 'simpleTokenize should handle sample text')\nTest.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string')\nTest.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'],\n 'simpleTokenize should handle puntuations and lowercase result')\nTest.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'],\n 'simpleTokenize should not remove duplicates')", "1 test passed.\n1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **(1b) Removing stopwords**\n#### *[Stopwords][stopwords]* are common (English) words that do not contribute much to the content or meaning of a document (e.g., \"the\", \"a\", \"is\", \"to\", etc.). Stopwords add noise to bag-of-words comparisons, so they are usually excluded.\n#### Using the included file \"stopwords.txt\", implement `tokenize`, an improved tokenizer that does not emit stopwords.\n[stopwords]: https://en.wikipedia.org/wiki/Stop_words", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\nstopfile = os.path.join(baseDir, inputPath, STOPWORDS_PATH)\nstopwords = set(sc.textFile(stopfile).collect())\nprint 'These are the stopwords: %s' % stopwords\n\ndef tokenize(string):\n \"\"\" An implementation of input string tokenization that excludes stopwords\n Args:\n string (str): input string\n Returns:\n list: a list of tokens without stopwords\n \"\"\"\n return [token for token in simpleTokenize(string) if token not in stopwords]\n\nprint tokenize(quickbrownfox) # Should give ['quick', 'brown', ... ]", "These are the stopwords: set([u'all', u'just', u'being', u'over', u'both', u'through', u'yourselves', u'its', u'before', u'with', u'had', u'should', u'to', u'only', u'under', u'ours', u'has', u'do', u'them', u'his', u'very', u'they', u'not', u'during', u'now', u'him', u'nor', u'did', u'these', u't', u'each', u'where', u'because', u'doing', u'theirs', u'some', u'are', u'our', u'ourselves', u'out', u'what', u'for', u'below', u'does', u'above', u'between', u'she', u'be', u'we', u'after', u'here', u'hers', u'by', u'on', u'about', u'of', u'against', u's', u'or', u'own', u'into', u'yourself', u'down', u'your', u'from', u'her', u'whom', u'there', u'been', u'few', u'too', u'themselves', u'was', u'until', u'more', u'himself', u'that', u'but', u'off', u'herself', u'than', u'those', u'he', u'me', u'myself', u'this', u'up', u'will', u'while', u'can', u'were', u'my', u'and', u'then', u'is', u'in', u'am', u'it', u'an', u'as', u'itself', u'at', u'have', u'further', u'their', u'if', u'again', u'no', u'when', u'same', u'any', u'how', u'other', u'which', u'you', u'who', u'most', u'such', u'why', u'a', u'don', u'i', u'having', u'so', u'the', u'yours', u'once'])\n['quick', 'brown', 'fox', 'jumps', 'lazy', 'dog']\n" ], [ "# TEST Removing stopwords (1b)\nTest.assertEquals(tokenize(\"Why a the?\"), [], 'tokenize should remove all stopwords')\nTest.assertEquals(tokenize(\"Being at the_?\"), ['the_'], 'tokenize should handle non-stopwords')\nTest.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'],\n 'tokenize should handle sample text')", "1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **(1c) Tokenizing the small datasets**\n#### Now let's tokenize the two *small* datasets. For each ID in a dataset, `tokenize` the values, and then count the total number of tokens.\n#### How many tokens, total, are there in the two datasets?", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\namazonRecToToken = amazonSmall.map(lambda x: (x[0], tokenize(x[1])))\ngoogleRecToToken = googleSmall.map(lambda x: (x[0], tokenize(x[1])))\n\ndef countTokens(vendorRDD):\n \"\"\" Count and return the number of tokens\n Args:\n vendorRDD (RDD of (recordId, tokenizedValue)): Pair tuple of record ID to tokenized output\n Returns:\n count: count of all tokens\n \"\"\"\n return vendorRDD.map(lambda x: len(x[1])).sum()\n\ntotalTokens = countTokens(amazonRecToToken) + countTokens(googleRecToToken)\nprint 'There are %s tokens in the combined datasets' % totalTokens", "There are 22520 tokens in the combined datasets\n" ], [ "# TEST Tokenizing the small datasets (1c)\nTest.assertEquals(totalTokens, 22520, 'incorrect totalTokens')", "1 test passed.\n" ] ], [ [ "### **(1d) Amazon record with the most tokens**\n#### Which Amazon record has the biggest number of tokens?\n#### In other words, you want to sort the records and get the one with the largest count of tokens.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ndef findBiggestRecord(vendorRDD):\n \"\"\" Find and return the record with the largest number of tokens\n Args:\n vendorRDD (RDD of (recordId, tokens)): input Pair Tuple of record ID and tokens\n Returns:\n list: a list of 1 Pair Tuple of record ID and tokens\n \"\"\"\n return vendorRDD.takeOrdered(1, lambda x: -len(x[1]))\n\nbiggestRecordAmazon = findBiggestRecord(amazonRecToToken)\nprint 'The Amazon record with ID \"%s\" has the most tokens (%s)' % (biggestRecordAmazon[0][0],\n len(biggestRecordAmazon[0][1]))", "The Amazon record with ID \"b000o24l3q\" has the most tokens (1547)\n" ], [ "# TEST Amazon record with the most tokens (1d)\nTest.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon')\nTest.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon')", "1 test passed.\n1 test passed.\n" ] ], [ [ "### **Part 2: ER as Text Similarity - Weighted Bag-of-Words using TF-IDF**\n#### Bag-of-words comparisons are not very good when all tokens are treated the same: some tokens are more important than others. Weights give us a way to specify which tokens to favor. With weights, when we compare documents, instead of counting common tokens, we sum up the weights of common tokens. A good heuristic for assigning weights is called \"Term-Frequency/Inverse-Document-Frequency,\" or [TF-IDF][tfidf] for short.\n#### **TF**\n#### TF rewards tokens that appear many times in the same document. It is computed as the frequency of a token in a document, that is, if document *d* contains 100 tokens and token *t* appears in *d* 5 times, then the TF weight of *t* in *d* is *5/100 = 1/20*. The intuition for TF is that if a word occurs often in a document, then it is more important to the meaning of the document.\n#### **IDF**\n#### IDF rewards tokens that are rare overall in a dataset. The intuition is that it is more significant if two documents share a rare word than a common one. IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows:\n* #### Let *N* be the total number of documents in *U*\n* #### Find *n(t)*, the number of documents in *U* that contain *t*\n* #### Then *IDF(t) = N/n(t)*.\n#### Note that *n(t)/N* is the frequency of *t* in *U*, and *N/n(t)* is the inverse frequency.\n> #### **Note on terminology**: Sometimes token weights depend on the document the token belongs to, that is, the same token may have a different weight when it's found in different documents. We call these weights *local* weights. TF is an example of a local weight, because it depends on the length of the source. On the other hand, some token weights only depend on the token, and are the same everywhere that token is found. We call these weights *global*, and IDF is one such weight.\n#### **TF-IDF**\n#### Finally, to bring it all together, the total TF-IDF weight for a token in a document is the product of its TF and IDF weights.\n[tfidf]: https://en.wikipedia.org/wiki/Tf%E2%80%93idf", "_____no_output_____" ], [ "### **(2a) Implement a TF function**\n#### Implement `tf(tokens)` that takes a list of tokens and returns a Python [dictionary](https://docs.python.org/2/tutorial/datastructures.html#dictionaries) mapping tokens to TF weights.\n#### The steps your function should perform are:\n* #### Create an empty Python dictionary\n* #### For each of the tokens in the input `tokens` list, count 1 for each occurance and add the token to the dictionary\n* #### For each of the tokens in the dictionary, divide the token's count by the total number of tokens in the input `tokens` list", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\nfrom collections import Counter\ndef tf(tokens):\n \"\"\" Compute TF\n Args:\n tokens (list of str): input list of tokens from tokenize\n Returns:\n dictionary: a dictionary of tokens to its TF values\n \"\"\"\n count = len(tokens)\n word_freq = Counter(tokens)\n return {key: float(value)/count for key, value in word_freq.items()}\n\nprint tf(tokenize(quickbrownfox)) # Should give { 'quick': 0.1666 ... }", "{'brown': 0.16666666666666666, 'lazy': 0.16666666666666666, 'jumps': 0.16666666666666666, 'fox': 0.16666666666666666, 'dog': 0.16666666666666666, 'quick': 0.16666666666666666}\n" ], [ "# TEST Implement a TF function (2a)\ntf_test = tf(tokenize(quickbrownfox))\nTest.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666,\n 'jumps': 0.16666666666666666, 'fox': 0.16666666666666666,\n 'dog': 0.16666666666666666, 'quick': 0.16666666666666666},\n 'incorrect result for tf on sample text')\ntf_test2 = tf(tokenize('one_ one_ two!'))\nTest.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333},\n 'incorrect result for tf test')", "1 test passed.\n1 test passed.\n" ] ], [ [ "### **(2b) Create a corpus**\n#### Create a pair RDD called `corpusRDD`, consisting of a combination of the two small datasets, `amazonRecToToken` and `googleRecToToken`. Each element of the `corpusRDD` should be a pair consisting of a key from one of the small datasets (ID or URL) and the value is the associated value for that key from the small datasets.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ncorpusRDD = amazonRecToToken.union(googleRecToToken)", "_____no_output_____" ], [ "# TEST Create a corpus (2b)\nTest.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()')", "1 test passed.\n" ] ], [ [ "### **(2c) Implement an IDFs function**\n#### Implement `idfs` that assigns an IDF weight to every unique token in an RDD called `corpus`. The function should return an pair RDD where the `key` is the unique token and value is the IDF weight for the token.\n#### Recall that the IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows:\n* #### Let *N* be the total number of documents in *U*.\n* #### Find *n(t)*, the number of documents in *U* that contain *t*.\n* #### Then *IDF(t) = N/n(t)*.\n#### The steps your function should perform are:\n* #### Calculate *N*. Think about how you can calculate *N* from the input RDD.\n* #### Create an RDD (*not a pair RDD*) containing the unique tokens from each document in the input `corpus`. For each document, you should only include a token once, *even if it appears multiple times in that document.*\n* #### For each of the unique tokens, count how many times it appears in the document and then compute the IDF for that token: *N/n(t)*\n#### Use your `idfs` to compute the IDF weights for all tokens in `corpusRDD` (the combined small datasets).\n#### How many unique tokens are there?", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ndef idfs(corpus):\n \"\"\" Compute IDF\n Args:\n corpus (RDD): input corpus\n Returns:\n RDD: a RDD of (token, IDF value)\n \"\"\"\n N = corpus.count()\n uniqueTokens = corpus.flatMap(lambda x: list(set(x[1])))\n tokenCountPairTuple = uniqueTokens.map(lambda x: (x, 1))\n tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a, b: a + b)\n return tokenSumPairTuple.map(lambda x: (x[0], float(N)/x[1]))\n\nidfsSmall = idfs(amazonRecToToken.union(googleRecToToken))\nuniqueTokenCount = idfsSmall.count()\n\nprint 'There are %s unique tokens in the small datasets.' % uniqueTokenCount", " There are 4772 unique tokens in the small datasets.\n" ], [ "# TEST Implement an IDFs function (2c)\nTest.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount')\ntokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0]\nTest.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token')\nTest.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001,\n 'incorrect smallest IDF value')", "1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **(2d) Tokens with the smallest IDF**\n#### Print out the 11 tokens with the smallest IDF in the combined small dataset.", "_____no_output_____" ] ], [ [ "smallIDFTokens = idfsSmall.takeOrdered(11, lambda s: s[1])\nprint smallIDFTokens", "[('software', 4.25531914893617), ('new', 6.896551724137931), ('features', 6.896551724137931), ('use', 7.017543859649122), ('complete', 7.2727272727272725), ('easy', 7.6923076923076925), ('create', 8.333333333333334), ('system', 8.333333333333334), ('cd', 8.333333333333334), ('1', 8.51063829787234), ('windows', 8.51063829787234)]\n" ] ], [ [ "### **(2e) IDF Histogram**\n#### Plot a histogram of IDF values. Be sure to use appropriate scaling and bucketing for the data.\n#### First plot the histogram using `matplotlib`", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nsmall_idf_values = idfsSmall.map(lambda s: s[1]).collect()\nfig = plt.figure(figsize=(8,3))\nplt.hist(small_idf_values, 50, log=True)\npass", "_____no_output_____" ] ], [ [ "### **(2f) Implement a TF-IDF function**\n#### Use your `tf` function to implement a `tfidf(tokens, idfs)` function that takes a list of tokens from a document and a Python dictionary of IDF weights and returns a Python dictionary mapping individual tokens to total TF-IDF weights.\n#### The steps your function should perform are:\n* #### Calculate the token frequencies (TF) for `tokens`\n* #### Create a Python dictionary where each token maps to the token's frequency times the token's IDF weight\n#### Use your `tfidf` function to compute the weights of Amazon product record 'b000hkgj8k'. To do this, we need to extract the record for the token from the tokenized small Amazon dataset and we need to convert the IDFs for the small dataset into a Python dictionary. We can do the first part, by using a `filter()` transformation to extract the matching record and a `collect()` action to return the value to the driver. For the second part, we use the [`collectAsMap()` action](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collectAsMap) to return the IDFs to the driver as a Python dictionary.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ndef tfidf(tokens, idfs):\n \"\"\" Compute TF-IDF\n Args:\n tokens (list of str): input list of tokens from tokenize\n idfs (dictionary): record to IDF value\n Returns:\n dictionary: a dictionary of records to TF-IDF values\n \"\"\"\n tfs = tf(tokens)\n tfIdfDict = dict((k, tfs[k] * idfs[k]) for k in tokens if k in idfs)\n return tfIdfDict\n\nrecb000hkgj8k = amazonRecToToken.filter(lambda x: x[0] == 'b000hkgj8k').collect()[0][1]\nidfsSmallWeights = idfsSmall.collectAsMap()\nrec_b000hkgj8k_weights = tfidf(recb000hkgj8k, idfsSmallWeights)\n\nprint 'Amazon record \"b000hkgj8k\" has tokens and weights:\\n%s' % rec_b000hkgj8k_weights", "Amazon record \"b000hkgj8k\" has tokens and weights:\n{'autocad': 33.33333333333333, 'autodesk': 8.333333333333332, 'courseware': 66.66666666666666, 'psg': 33.33333333333333, '2007': 3.5087719298245617, 'customizing': 16.666666666666664, 'interface': 3.0303030303030303}\n" ], [ "# TEST Implement a TF-IDF function (2f)\nTest.assertEquals(rec_b000hkgj8k_weights,\n {'autocad': 33.33333333333333, 'autodesk': 8.333333333333332,\n 'courseware': 66.66666666666666, 'psg': 33.33333333333333,\n '2007': 3.5087719298245617, 'customizing': 16.666666666666664,\n 'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights')", "1 test passed.\n" ] ], [ [ "### **Part 3: ER as Text Similarity - Cosine Similarity**\n#### Now we are ready to do text comparisons in a formal way. The metric of string distance we will use is called **[cosine similarity][cosine]**. We will treat each document as a vector in some high dimensional space. Then, to compare two documents we compute the cosine of the angle between their two document vectors. This is *much* easier than it sounds.\n#### The first question to answer is how do we represent documents as vectors? The answer is familiar: bag-of-words! We treat each unique token as a dimension, and treat token weights as magnitudes in their respective token dimensions. For example, suppose we use simple counts as weights, and we want to interpret the string \"Hello, world! Goodbye, world!\" as a vector. Then in the \"hello\" and \"goodbye\" dimensions the vector has value 1, in the \"world\" dimension it has value 2, and it is zero in all other dimensions.\n#### The next question is: given two vectors how do we find the cosine of the angle between them? Recall the formula for the dot product of two vectors:\n#### $$ a \\cdot b = \\| a \\| \\| b \\| \\cos \\theta $$\n#### Here $ a \\cdot b = \\sum a_i b_i $ is the ordinary dot product of two vectors, and $ \\|a\\| = \\sqrt{ \\sum a_i^2 } $ is the norm of $ a $.\n#### We can rearrange terms and solve for the cosine to find it is simply the normalized dot product of the vectors. With our vector model, the dot product and norm computations are simple functions of the bag-of-words document representations, so we now have a formal way to compute similarity:\n#### $$ similarity = \\cos \\theta = \\frac{a \\cdot b}{\\|a\\| \\|b\\|} = \\frac{\\sum a_i b_i}{\\sqrt{\\sum a_i^2} \\sqrt{\\sum b_i^2}} $$\n#### Setting aside the algebra, the geometric interpretation is more intuitive. The angle between two document vectors is small if they share many tokens in common, because they are pointing in roughly the same direction. For that case, the cosine of the angle will be large. Otherwise, if the angle is large (and they have few words in common), the cosine is small. Therefore, cosine similarity scales proportionally with our intuitive sense of similarity.\n[cosine]: https://en.wikipedia.org/wiki/Cosine_similarity", "_____no_output_____" ], [ "### **(3a) Implement the components of a `cosineSimilarity` function**\n#### Implement the components of a `cosineSimilarity` function.\n#### Use the `tokenize` and `tfidf` functions, and the IDF weights from Part 2 for extracting tokens and assigning them weights.\n#### The steps you should perform are:\n* #### Define a function `dotprod` that takes two Python dictionaries and produces the dot product of them, where the dot product is defined as the sum of the product of values for tokens that appear in *both* dictionaries\n* #### Define a function `norm` that returns the square root of the dot product of a dictionary and itself\n* #### Define a function `cossim` that returns the dot product of two dictionaries divided by the norm of the first dictionary and then by the norm of the second dictionary", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\nimport math\n\ndef dotprod(a, b):\n \"\"\" Compute dot product\n Args:\n a (dictionary): first dictionary of record to value\n b (dictionary): second dictionary of record to value\n Returns:\n dotProd: result of the dot product with the two input dictionaries\n \"\"\"\n return sum(a[k] * b[k] for k in a.keys() if k in b.keys())\n\ndef norm(a):\n \"\"\" Compute square root of the dot product\n Args:\n a (dictionary): a dictionary of record to value\n Returns:\n norm: a dictionary of tokens to its TF values\n \"\"\"\n return math.sqrt(dotprod(a,a))\n\ndef cossim(a, b):\n \"\"\" Compute cosine similarity\n Args:\n a (dictionary): first dictionary of record to value\n b (dictionary): second dictionary of record to value\n Returns:\n cossim: dot product of two dictionaries divided by the norm of the first dictionary and\n then by the norm of the second dictionary\n \"\"\"\n return dotprod(a,b)/(norm(a) * norm(b))\n\ntestVec1 = {'foo': 2, 'bar': 3, 'baz': 5 }\ntestVec2 = {'foo': 1, 'bar': 0, 'baz': 20 }\ndp = dotprod(testVec1, testVec2)\nnm = norm(testVec1)\nprint dp, nm", "102 6.16441400297\n" ], [ "# TEST Implement the components of a cosineSimilarity function (3a)\nTest.assertEquals(dp, 102, 'incorrect dp')\nTest.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm')", "1 test passed.\n1 test passed.\n" ] ], [ [ "### **(3b) Implement a `cosineSimilarity` function**\n#### Implement a `cosineSimilarity(string1, string2, idfsDictionary)` function that takes two strings and a dictionary of IDF weights, and computes their cosine similarity in the context of some global IDF weights.\n#### The steps you should perform are:\n* #### Apply your `tfidf` function to the tokenized first and second strings, using the dictionary of IDF weights\n* #### Compute and return your `cossim` function applied to the results of the two `tfidf` functions", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ndef cosineSimilarity(string1, string2, idfsDictionary):\n \"\"\" Compute cosine similarity between two strings\n Args:\n string1 (str): first string\n string2 (str): second string\n idfsDictionary (dictionary): a dictionary of IDF values\n Returns:\n cossim: cosine similarity value\n \"\"\"\n w1 = tfidf(tokenize(string1), idfsDictionary)\n w2 = tfidf(tokenize(string2), idfsDictionary)\n return cossim(w1, w2)\n\ncossimAdobe = cosineSimilarity('Adobe Photoshop',\n 'Adobe Illustrator',\n idfsSmallWeights)\n\nprint cossimAdobe", "0.0577243382163\n" ], [ "# TEST Implement a cosineSimilarity function (3b)\nTest.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe')", "1 test passed.\n" ] ], [ [ "### **(3c) Perform Entity Resolution**\n#### Now we can finally do some entity resolution!\n#### For *every* product record in the small Google dataset, use your `cosineSimilarity` function to compute its similarity to every record in the small Amazon dataset. Then, build a dictionary mapping `(Google URL, Amazon ID)` tuples to similarity scores between 0 and 1.\n#### We'll do this computation two different ways, first we'll do it without a broadcast variable, and then we'll use a broadcast variable\n#### The steps you should perform are:\n* #### Create an RDD that is a combination of the small Google and small Amazon datasets that has as elements all pairs of elements (a, b) where a is in self and b is in other. The result will be an RDD of the form: `[ ((Google URL1, Google String1), (Amazon ID1, Amazon String1)), ((Google URL1, Google String1), (Amazon ID2, Amazon String2)), ((Google URL2, Google String2), (Amazon ID1, Amazon String1)), ... ]`\n* #### Define a worker function that given an element from the combination RDD computes the cosineSimlarity for the two records in the element\n* #### Apply the worker function to every element in the RDD\n#### Now, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ncrossSmall = (googleSmall\n .cartesian(amazonSmall)\n .cache())\n\ndef computeSimilarity(record):\n \"\"\" Compute similarity on a combination record\n Args:\n record: a pair, (google record, amazon record)\n Returns:\n pair: a pair, (google URL, amazon ID, cosine similarity value)\n \"\"\"\n googleRec = record[0]\n amazonRec = record[1]\n googleURL = googleRec[0]\n amazonID = amazonRec[0]\n googleValue = googleRec[1]\n amazonValue = amazonRec[1]\n cs = cosineSimilarity(googleValue, amazonValue, idfsSmallWeights)\n return (googleURL, amazonID, cs)\n\nsimilarities = (crossSmall\n .map(computeSimilarity)\n .cache())\n\ndef similar(amazonID, googleURL):\n \"\"\" Return similarity value\n Args:\n amazonID: amazon ID\n googleURL: google URL\n Returns:\n similar: cosine similarity value\n \"\"\"\n return (similarities\n .filter(lambda record: (record[0] == googleURL and record[1] == amazonID))\n .collect()[0][2])\n\nsimilarityAmazonGoogle = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')\nprint 'Requested similarity is %s.' % similarityAmazonGoogle", "Requested similarity is 0.000303171940451.\n" ], [ "# TEST Perform Entity Resolution (3c)\nTest.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001,\n 'incorrect similarityAmazonGoogle')", "1 test passed.\n" ] ], [ [ "### **(3d) Perform Entity Resolution with Broadcast Variables**\n#### The solution in (3c) works well for small datasets, but it requires Spark to (automatically) send the `idfsSmallWeights` variable to all the workers. If we didn't `cache()` similarities, then it might have to be recreated if we run `similar()` multiple times. This would cause Spark to send `idfsSmallWeights` every time.\n#### Instead, we can use a broadcast variable - we define the broadcast variable in the driver and then we can refer to it in each worker. Spark saves the broadcast variable at each worker, so it is only sent once.\n#### The steps you should perform are:\n* #### Define a `computeSimilarityBroadcast` function that given an element from the combination RDD computes the cosine simlarity for the two records in the element. This will be the same as the worker function `computeSimilarity` in (3c) except that it uses a broadcast variable.\n* #### Apply the worker function to every element in the RDD\n#### Again, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ndef computeSimilarityBroadcast(record):\n \"\"\" Compute similarity on a combination record, using Broadcast variable\n Args:\n record: a pair, (google record, amazon record)\n Returns:\n pair: a pair, (google URL, amazon ID, cosine similarity value)\n \"\"\"\n googleRec = record[0]\n amazonRec = record[1]\n googleURL = googleRec[0]\n amazonID = amazonRec[0]\n googleValue = googleRec[1]\n amazonValue = amazonRec[1]\n cs = cosineSimilarity(googleValue, amazonValue, idfsSmallBroadcast.value)\n return (googleURL, amazonID, cs)\n\nidfsSmallBroadcast = sc.broadcast(idfsSmallWeights)\nsimilaritiesBroadcast = (crossSmall\n .map(computeSimilarity)\n .cache())\n\ndef similarBroadcast(amazonID, googleURL):\n \"\"\" Return similarity value, computed using Broadcast variable\n Args:\n amazonID: amazon ID\n googleURL: google URL\n Returns:\n similar: cosine similarity value\n \"\"\"\n return (similaritiesBroadcast\n .filter(lambda record: (record[0] == googleURL and record[1] == amazonID))\n .collect()[0][2])\n\nsimilarityAmazonGoogleBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')\nprint 'Requested similarity is %s.' % similarityAmazonGoogleBroadcast", "Requested similarity is 0.000303171940451.\n" ], [ "# TEST Perform Entity Resolution with Broadcast Variables (3d)\nfrom pyspark import Broadcast\nTest.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast')\nTest.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value')\nTest.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001,\n 'incorrect similarityAmazonGoogle')", "1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **(3e) Perform a Gold Standard evaluation**\n#### First, we'll load the \"gold standard\" data and use it to answer several questions. We read and parse the Gold Standard data, where the format of each line is \"Amazon Product ID\",\"Google URL\". The resulting RDD has elements of the form (\"AmazonID GoogleURL\", 'gold')", "_____no_output_____" ] ], [ [ "GOLDFILE_PATTERN = '^(.+),(.+)'\n\n# Parse each line of a data file useing the specified regular expression pattern\ndef parse_goldfile_line(goldfile_line):\n \"\"\" Parse a line from the 'golden standard' data file\n Args:\n goldfile_line: a line of data\n Returns:\n pair: ((key, 'gold', 1 if successful or else 0))\n \"\"\"\n match = re.search(GOLDFILE_PATTERN, goldfile_line)\n if match is None:\n print 'Invalid goldfile line: %s' % goldfile_line\n return (goldfile_line, -1)\n elif match.group(1) == '\"idAmazon\"':\n print 'Header datafile line: %s' % goldfile_line\n return (goldfile_line, 0)\n else:\n key = '%s %s' % (removeQuotes(match.group(1)), removeQuotes(match.group(2)))\n return ((key, 'gold'), 1)\n\ngoldfile = os.path.join(baseDir, inputPath, GOLD_STANDARD_PATH)\ngsRaw = (sc\n .textFile(goldfile)\n .map(parse_goldfile_line)\n .cache())\n\ngsFailed = (gsRaw\n .filter(lambda s: s[1] == -1)\n .map(lambda s: s[0]))\nfor line in gsFailed.take(10):\n print 'Invalid goldfile line: %s' % line\n\ngoldStandard = (gsRaw\n .filter(lambda s: s[1] == 1)\n .map(lambda s: s[0])\n .cache())\n\nprint 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (gsRaw.count(),\n goldStandard.count(),\n gsFailed.count())\nassert (gsFailed.count() == 0)\nassert (gsRaw.count() == (goldStandard.count() + 1))", "Read 1301 lines, successfully parsed 1300 lines, failed to parse 0 lines\n" ] ], [ [ "### Using the \"gold standard\" data we can answer the following questions:\n* #### How many true duplicate pairs are there in the small datasets?\n* #### What is the average similarity score for true duplicates?\n* #### What about for non-duplicates?\n#### The steps you should perform are:\n* #### Create a new `sims` RDD from the `similaritiesBroadcast` RDD, where each element consists of a pair of the form (\"AmazonID GoogleURL\", cosineSimilarityScore). An example entry from `sims` is: ('b000bi7uqs http://www.google.com/base/feeds/snippets/18403148885652932189', 0.40202896125621296)\n* #### Combine the `sims` RDD with the `goldStandard` RDD by creating a new `trueDupsRDD` RDD that has the just the cosine similarity scores for those \"AmazonID GoogleURL\" pairs that appear in both the `sims` RDD and `goldStandard` RDD. Hint: you can do this using the join() transformation.\n* #### Count the number of true duplicate pairs in the `trueDupsRDD` dataset\n* #### Compute the average similarity score for true duplicates in the `trueDupsRDD` datasets. Remember to use `float` for calculation\n* #### Create a new `nonDupsRDD` RDD that has the just the cosine similarity scores for those \"AmazonID GoogleURL\" pairs from the `similaritiesBroadcast` RDD that **do not** appear in both the *sims* RDD and gold standard RDD.\n* #### Compute the average similarity score for non-duplicates in the last datasets. Remember to use `float` for calculation", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\nsims = similaritiesBroadcast.map(lambda x: (x[1] + \" \" + x[0], x[2]))\n\ntrueDupsRDD = (sims\n .join(goldStandard).map(lambda x: (x[0], x[1][0])))\n\ntrueDupsCount = trueDupsRDD.count()\navgSimDups = trueDupsRDD.map(lambda x: x[1]).sum()/float(trueDupsCount)\n\nnonDupsRDD = (sims\n .leftOuterJoin(goldStandard).filter(lambda x: x[1][1] == None).map(lambda x: (x[0], x[1][0])))\navgSimNon = nonDupsRDD.map(lambda x: x[1]).sum()/float(nonDupsRDD.count())\n\nprint 'There are %s true duplicates.' % trueDupsCount\nprint 'The average similarity of true duplicates is %s.' % avgSimDups\nprint 'And for non duplicates, it is %s.' % avgSimNon", "There are 146 true duplicates.\nThe average similarity of true duplicates is 0.264332573435.\nAnd for non duplicates, it is 0.00123476304656.\n" ], [ "# TEST Perform a Gold Standard evaluation (3e)\nTest.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount')\nTest.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups')\nTest.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon')", "1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **Part 4: Scalable ER**\n#### In the previous parts, we built a text similarity function and used it for small scale entity resolution. Our implementation is limited by its quadratic run time complexity, and is not practical for even modestly sized datasets. In this part, we will implement a more scalable algorithm and use it to do entity resolution on the full dataset.\n### Inverted Indices\n#### To improve our ER algorithm from the earlier parts, we should begin by analyzing its running time. In particular, the algorithm above is quadratic in two ways. First, we did a lot of redundant computation of tokens and weights, since each record was reprocessed every time it was compared. Second, we made quadratically many token comparisons between records.\n#### The first source of quadratic overhead can be eliminated with precomputation and look-up tables, but the second source is a little more tricky. In the worst case, every token in every record in one dataset exists in every record in the other dataset, and therefore every token makes a non-zero contribution to the cosine similarity. In this case, token comparison is unavoidably quadratic.\n#### But in reality most records have nothing (or very little) in common. Moreover, it is typical for a record in one dataset to have at most one duplicate record in the other dataset (this is the case assuming each dataset has been de-duplicated against itself). In this case, the output is linear in the size of the input and we can hope to achieve linear running time.\n#### An [**inverted index**](https://en.wikipedia.org/wiki/Inverted_index) is a data structure that will allow us to avoid making quadratically many token comparisons. It maps each token in the dataset to the list of documents that contain the token. So, instead of comparing, record by record, each token to every other token to see if they match, we will use inverted indices to *look up* records that match on a particular token.\n> #### **Note on terminology**: In text search, a *forward* index maps documents in a dataset to the tokens they contain. An *inverted* index supports the inverse mapping.\n> #### **Note**: For this section, use the complete Google and Amazon datasets, not the samples", "_____no_output_____" ], [ "### **(4a) Tokenize the full dataset**\n#### Tokenize each of the two full datasets for Google and Amazon.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\namazonFullRecToToken = amazon.map(lambda x: (x[0], tokenize(x[1])))\ngoogleFullRecToToken = google.map(lambda x: (x[0], tokenize(x[1])))\nprint 'Amazon full dataset is %s products, Google full dataset is %s products' % (amazonFullRecToToken.count(),\n googleFullRecToToken.count())", "Amazon full dataset is 1363 products, Google full dataset is 3226 products\n" ], [ "# TEST Tokenize the full dataset (4a)\nTest.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()')\nTest.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()')", "1 test passed.\n1 test passed.\n" ] ], [ [ "### **(4b) Compute IDFs and TF-IDFs for the full datasets**\n#### We will reuse your code from above to compute IDF weights for the complete combined datasets.\n#### The steps you should perform are:\n* #### Create a new `fullCorpusRDD` that contains the tokens from the full Amazon and Google datasets.\n* #### Apply your `idfs` function to the `fullCorpusRDD`\n* #### Create a broadcast variable containing a dictionary of the IDF weights for the full dataset.\n* #### For each of the Amazon and Google full datasets, create weight RDDs that map IDs/URLs to TF-IDF weighted token vectors.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\nfullCorpusRDD = amazonFullRecToToken.union(googleFullRecToToken)\nidfsFull = idfs(fullCorpusRDD)\nidfsFullCount = idfsFull.count()\nprint 'There are %s unique tokens in the full datasets.' % idfsFullCount\n\n# Recompute IDFs for full dataset\nidfsFullWeights = idfsFull.collectAsMap()\nidfsFullBroadcast = sc.broadcast(idfsFullWeights)\n\n# Pre-compute TF-IDF weights. Build mappings from record ID weight vector.\namazonWeightsRDD = amazonFullRecToToken.map(lambda x: (x[0], tfidf(x[1],idfsFullBroadcast.value)))\ngoogleWeightsRDD = googleFullRecToToken.map(lambda x: (x[0], tfidf(x[1],idfsFullBroadcast.value)))\n\nprint 'There are %s Amazon weights and %s Google weights.' % (amazonWeightsRDD.count(),\n googleWeightsRDD.count())", "There are 17078 unique tokens in the full datasets.\nThere are 1363 Amazon weights and 3226 Google weights.\n" ], [ "# TEST Compute IDFs and TF-IDFs for the full datasets (4b)\nTest.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount')\nTest.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()')\nTest.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()')", "1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **(4c) Compute Norms for the weights from the full datasets**\n#### We will reuse your code from above to compute norms of the IDF weights for the complete combined dataset.\n#### The steps you should perform are:\n* #### Create two collections, one for each of the full Amazon and Google datasets, where IDs/URLs map to the norm of the associated TF-IDF weighted token vectors.\n* #### Convert each collection into a broadcast variable, containing a dictionary of the norm of IDF weights for the full dataset", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\namazonNorms = amazonWeightsRDD.map(lambda x: (x[0], norm(x[1])))\namazonNormsBroadcast = sc.broadcast(amazonNorms.collectAsMap())\ngoogleNorms = googleWeightsRDD.map(lambda x: (x[0], norm(x[1])))\ngoogleNormsBroadcast = sc.broadcast(googleNorms.collectAsMap())", "_____no_output_____" ], [ "# TEST Compute Norms for the weights from the full datasets (4c)\nTest.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast')\nTest.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value')\nTest.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast')\nTest.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value')", "1 test passed.\n1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **(4d) Create inverted indicies from the full datasets**\n#### Build inverted indices of both data sources.\n#### The steps you should perform are:\n* #### Create an invert function that given a pair of (ID/URL, TF-IDF weighted token vector), returns a list of pairs of (token, ID/URL). Recall that the TF-IDF weighted token vector is a Python dictionary with keys that are tokens and values that are weights.\n* #### Use your invert function to convert the full Amazon and Google TF-IDF weighted token vector datasets into two RDDs where each element is a pair of a token and an ID/URL that contain that token. These are inverted indicies.", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ndef invert(record):\n \"\"\" Invert (ID, tokens) to a list of (token, ID)\n Args:\n record: a pair, (ID, token vector)\n Returns:\n pairs: a list of pairs of token to ID\n \"\"\"\n pairs = [(token, record[0]) for token in record[1]]\n return pairs\n\namazonInvPairsRDD = (amazonWeightsRDD\n .flatMap(invert)\n .cache())\n\ngoogleInvPairsRDD = (googleWeightsRDD\n .flatMap(invert)\n .cache())\n\n\nprint 'There are %s Amazon inverted pairs and %s Google inverted pairs.' % (amazonInvPairsRDD.count(),\n googleInvPairsRDD.count())", "There are 111387 Amazon inverted pairs and 77678 Google inverted pairs.\n" ], [ "# TEST Create inverted indicies from the full datasets (4d)\ninvertedPair = invert((1, {'foo': 2}))\nTest.assertEquals(invertedPair[0][1], 1, 'incorrect invert result')\nTest.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()')\nTest.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()')", "1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **(4e) Identify common tokens from the full dataset**\n#### We are now in position to efficiently perform ER on the full datasets. Implement the following algorithm to build an RDD that maps a pair of (ID, URL) to a list of tokens they share in common:\n* #### Using the two inverted indicies (RDDs where each element is a pair of a token and an ID or URL that contains that token), create a new RDD that contains only tokens that appear in both datasets. This will yield an RDD of pairs of (token, iterable(ID, URL)).\n* #### We need a mapping from (ID, URL) to token, so create a function that will swap the elements of the RDD you just created to create this new RDD consisting of ((ID, URL), token) pairs.\n* #### Finally, create an RDD consisting of pairs mapping (ID, URL) to all the tokens the pair shares in common", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\ndef swap(record):\n \"\"\" Swap (token, (ID, URL)) to ((ID, URL), token)\n Args:\n record: a pair, (token, (ID, URL))\n Returns:\n pair: ((ID, URL), token)\n \"\"\"\n token = record[0]\n keys = record[1]\n return (keys, token)\n\ncommonTokens = (amazonInvPairsRDD\n .join(googleInvPairsRDD).map(swap).groupByKey()\n .cache())\n\nprint 'Found %d common tokens' % commonTokens.count()", "Found 2441100 common tokens\n" ], [ "# TEST Identify common tokens from the full dataset (4e)\nTest.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()')", "1 test passed.\n" ] ], [ [ "### **(4f) Identify common tokens from the full dataset**\n#### Use the data structures from parts **(4a)** and **(4e)** to build a dictionary to map record pairs to cosine similarity scores.\n#### The steps you should perform are:\n* #### Create two broadcast dictionaries from the amazonWeights and googleWeights RDDs\n* #### Create a `fastCosinesSimilarity` function that takes in a record consisting of the pair ((Amazon ID, Google URL), tokens list) and computes the sum for each of the tokens in the token list of the products of the Amazon weight for the token times the Google weight for the token. The sum should then be divided by the norm for the Google URL and then divided by the norm for the Amazon ID. The function should return this value in a pair with the key being the (Amazon ID, Google URL). *Make sure you use broadcast variables you created for both the weights and norms*\n* #### Apply your `fastCosinesSimilarity` function to the common tokens from the full dataset", "_____no_output_____" ] ], [ [ "# TODO: Replace <FILL IN> with appropriate code\namazonWeightsBroadcast = sc.broadcast(amazonWeightsRDD.collectAsMap())\ngoogleWeightsBroadcast = sc.broadcast(googleWeightsRDD.collectAsMap())\n\ndef fastCosineSimilarity(record):\n \"\"\" Compute Cosine Similarity using Broadcast variables\n Args:\n record: ((ID, URL), token)\n Returns:\n pair: ((ID, URL), cosine similarity value)\n \"\"\"\n amazonRec = record[0][0]\n googleRec = record[0][1]\n tokens = record[1]\n s = sum(amazonWeightsBroadcast.value[amazonRec][i] * googleWeightsBroadcast.value[googleRec][i] for i in tokens)\n value = s/(amazonNormsBroadcast.value[amazonRec] * googleNormsBroadcast.value[googleRec])\n key = (amazonRec, googleRec)\n return (key, value)\n\nsimilaritiesFullRDD = (commonTokens\n .map(fastCosineSimilarity)\n .cache())\n\nprint similaritiesFullRDD.count()", "2441100\n" ], [ "# TEST Identify common tokens from the full dataset (4f)\nsimilarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect()\nTest.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)')\nTest.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity')\nTest.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()')", "1 test passed.\n1 test passed.\n1 test passed.\n" ] ], [ [ "### **Part 5: Analysis**\n#### Now we have an authoritative list of record-pair similarities, but we need a way to use those similarities to decide if two records are duplicates or not. The simplest approach is to pick a **threshold**. Pairs whose similarity is above the threshold are declared duplicates, and pairs below the threshold are declared distinct.\n#### To decide where to set the threshold we need to understand what kind of errors result at different levels. If we set the threshold too low, we get more **false positives**, that is, record-pairs we say are duplicates that in reality are not. If we set the threshold too high, we get more **false negatives**, that is, record-pairs that really are duplicates but that we miss.\n#### ER algorithms are evaluated by the common metrics of information retrieval and search called **precision** and **recall**. Precision asks of all the record-pairs marked duplicates, what fraction are true duplicates? Recall asks of all the true duplicates in the data, what fraction did we successfully find? As with false positives and false negatives, there is a trade-off between precision and recall. A third metric, called **F-measure**, takes the harmonic mean of precision and recall to measure overall goodness in a single value:\n#### $$ Fmeasure = 2 \\frac{precision * recall}{precision + recall} $$\n> #### **Note**: In this part, we use the \"gold standard\" mapping from the included file to look up true duplicates, and the results of Part 4.\n> #### **Note**: In this part, you will not be writing any code. We've written all of the code for you. Run each cell and then answer the quiz questions on Studio.", "_____no_output_____" ], [ "### **(5a) Counting True Positives, False Positives, and False Negatives**\n#### We need functions that count True Positives (true duplicates above the threshold), and False Positives and False Negatives:\n* #### We start with creating the `simsFullRDD` from our `similaritiesFullRDD` that consists of a pair of ((Amazon ID, Google URL), simlarity score)\n* #### From this RDD, we create an RDD consisting of only the similarity scores\n* #### To look up the similarity scores for true duplicates, we perform a left outer join using the `goldStandard` RDD and `simsFullRDD` and extract the", "_____no_output_____" ] ], [ [ "# Create an RDD of ((Amazon ID, Google URL), similarity score)\nsimsFullRDD = similaritiesFullRDD.map(lambda x: (\"%s %s\" % (x[0][0], x[0][1]), x[1]))\nassert (simsFullRDD.count() == 2441100)\n\n# Create an RDD of just the similarity scores\nsimsFullValuesRDD = (simsFullRDD\n .map(lambda x: x[1])\n .cache())\nassert (simsFullValuesRDD.count() == 2441100)\n\n# Look up all similarity scores for true duplicates\n\n# This helper function will return the similarity score for records that are in the gold standard and the simsFullRDD (True positives), and will return 0 for records that are in the gold standard but not in simsFullRDD (False Negatives).\ndef gs_value(record):\n if (record[1][1] is None):\n return 0\n else:\n return record[1][1]\n\n# Join the gold standard and simsFullRDD, and then extract the similarities scores using the helper function\ntrueDupSimsRDD = (goldStandard\n .leftOuterJoin(simsFullRDD)\n .map(gs_value)\n .cache())\nprint 'There are %s true duplicates.' % trueDupSimsRDD.count()\nassert(trueDupSimsRDD.count() == 1300)", "There are 1300 true duplicates.\n" ] ], [ [ "#### The next step is to pick a threshold between 0 and 1 for the count of True Positives (true duplicates above the threshold). However, we would like to explore many different thresholds. To do this, we divide the space of thresholds into 100 bins, and take the following actions:\n* #### We use Spark Accumulators to implement our counting function. We define a custom accumulator type, `VectorAccumulatorParam`, along with functions to initialize the accumulator's vector to zero, and to add two vectors. Note that we have to use the += operator because you can only add to an accumulator.\n* #### We create a helper function to create a list with one entry (bit) set to a value and all others set to 0.\n* #### We create 101 bins for the 100 threshold values between 0 and 1.\n* #### Now, for each similarity score, we can compute the false positives. We do this by adding each similarity score to the appropriate bin of the vector. Then we remove true positives from the vector by using the gold standard data.\n* #### We define functions for computing false positive and negative and true positives, for a given threshold.", "_____no_output_____" ] ], [ [ "from pyspark.accumulators import AccumulatorParam\nclass VectorAccumulatorParam(AccumulatorParam):\n # Initialize the VectorAccumulator to 0\n def zero(self, value):\n return [0] * len(value)\n\n # Add two VectorAccumulator variables\n def addInPlace(self, val1, val2):\n for i in xrange(len(val1)):\n val1[i] += val2[i]\n return val1\n\n# Return a list with entry x set to value and all other entries set to 0\ndef set_bit(x, value, length):\n bits = []\n for y in xrange(length):\n if (x == y):\n bits.append(value)\n else:\n bits.append(0)\n return bits\n\n# Pre-bin counts of false positives for different threshold ranges\nBINS = 101\nnthresholds = 100\ndef bin(similarity):\n return int(similarity * nthresholds)\n\n# fpCounts[i] = number of entries (possible false positives) where bin(similarity) == i\nzeros = [0] * BINS\nfpCounts = sc.accumulator(zeros, VectorAccumulatorParam())\n\ndef add_element(score):\n global fpCounts\n b = bin(score)\n fpCounts += set_bit(b, 1, BINS)\n\nsimsFullValuesRDD.foreach(add_element)\n\n# Remove true positives from FP counts\ndef sub_element(score):\n global fpCounts\n b = bin(score)\n fpCounts += set_bit(b, -1, BINS)\n\ntrueDupSimsRDD.foreach(sub_element)\n\ndef falsepos(threshold):\n fpList = fpCounts.value\n return sum([fpList[b] for b in range(0, BINS) if float(b) / nthresholds >= threshold])\n\ndef falseneg(threshold):\n return trueDupSimsRDD.filter(lambda x: x < threshold).count()\n\ndef truepos(threshold):\n return trueDupSimsRDD.count() - falsenegDict[threshold]", "_____no_output_____" ] ], [ [ "### **(5b) Precision, Recall, and F-measures**\n#### We define functions so that we can compute the [Precision][precision-recall], [Recall][precision-recall], and [F-measure][f-measure] as a function of threshold value:\n* #### Precision = true-positives / (true-positives + false-positives)\n* #### Recall = true-positives / (true-positives + false-negatives)\n* #### F-measure = 2 x Recall x Precision / (Recall + Precision)\n[precision-recall]: https://en.wikipedia.org/wiki/Precision_and_recall\n[f-measure]: https://en.wikipedia.org/wiki/Precision_and_recall#F-measure", "_____no_output_____" ] ], [ [ "# Precision = true-positives / (true-positives + false-positives)\n# Recall = true-positives / (true-positives + false-negatives)\n# F-measure = 2 x Recall x Precision / (Recall + Precision)\n\ndef precision(threshold):\n tp = trueposDict[threshold]\n return float(tp) / (tp + falseposDict[threshold])\n\ndef recall(threshold):\n tp = trueposDict[threshold]\n return float(tp) / (tp + falsenegDict[threshold])\n\ndef fmeasure(threshold):\n r = recall(threshold)\n p = precision(threshold)\n return 2 * r * p / (r + p)", "_____no_output_____" ] ], [ [ "### **(5c) Line Plots**\n#### We can make line plots of precision, recall, and F-measure as a function of threshold value, for thresholds between 0.0 and 1.0. You can change `nthresholds` (above in part **(5a)**) to change the threshold values to plot.", "_____no_output_____" ] ], [ [ "thresholds = [float(n) / nthresholds for n in range(0, nthresholds)]\nfalseposDict = dict([(t, falsepos(t)) for t in thresholds])\nfalsenegDict = dict([(t, falseneg(t)) for t in thresholds])\ntrueposDict = dict([(t, truepos(t)) for t in thresholds])\n\nprecisions = [precision(t) for t in thresholds]\nrecalls = [recall(t) for t in thresholds]\nfmeasures = [fmeasure(t) for t in thresholds]\n\nprint precisions[0], fmeasures[0]\nassert (abs(precisions[0] - 0.000532546802671) < 0.0000001)\nassert (abs(fmeasures[0] - 0.00106452669505) < 0.0000001)\n\n\nfig = plt.figure()\nplt.plot(thresholds, precisions)\nplt.plot(thresholds, recalls)\nplt.plot(thresholds, fmeasures)\nplt.legend(['Precision', 'Recall', 'F-measure'])\npass", "0.000532546802671 0.00106452669505\n" ] ], [ [ "### Discussion\n#### State-of-the-art tools can get an F-measure of about 60% on this dataset. In this lab exercise, our best F-measure is closer to 40%. Look at some examples of errors (both False Positives and False Negatives) and think about what went wrong.\n### There are several ways we might improve our simple classifier, including:\n#### * Using additional attributes\n#### * Performing better featurization of our textual data (e.g., stemming, n-grams, etc.)\n#### * Using different similarity functions", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6cc3deda26cee8a67d015c26e4086c26e9ffdc
17,934
ipynb
Jupyter Notebook
notebooks-spanish/03-representacion_datos_aa.ipynb
jagalvis348/tutorial-sklearn
e0a4eee6b074b35d38df8e48f9df9b1979f4b381
[ "CC0-1.0" ]
1
2020-11-24T08:47:16.000Z
2020-11-24T08:47:16.000Z
notebooks-spanish/03-representacion_datos_aa.ipynb
jfcaballero/Tutorial-sobre-scikit-learn-completo
e0a4eee6b074b35d38df8e48f9df9b1979f4b381
[ "CC0-1.0" ]
null
null
null
notebooks-spanish/03-representacion_datos_aa.ipynb
jfcaballero/Tutorial-sobre-scikit-learn-completo
e0a4eee6b074b35d38df8e48f9df9b1979f4b381
[ "CC0-1.0" ]
null
null
null
29.545305
573
0.599086
[ [ [ "# Representación y visualización de datos", "_____no_output_____" ], [ "El aprendizaje automático trata de ajustar modelos a los datos; por esta razón, empezaremos discutiendo como los datos pueden ser representados para ser accesibles por el ordenador. Además de esto, nos basaremos en los ejemplos de matplotlib de la sección anterior para usarlos para representar datos.", "_____no_output_____" ], [ "## Datos en scikit-learn", "_____no_output_____" ], [ "Los datos en scikit-learn, salvo algunas excepciones, suelen estar almacenados en \n**arrays de 2 dimensiones**, con forma `[n_samples, n_features]`. Muchos algoritmos aceptan también matrices ``scipy.sparse`` con la misma forma.", "_____no_output_____" ], [ "- **n_samples:** este es el número de ejemplos. Cada ejemplo es un item a procesar (por ejemplo, clasificar). Un ejemplo puede ser un documento, una imagen, un sonido, un vídeo, un objeto astronómico, una fila de una base de datos o de un fichero CSV, o cualquier cosa que se pueda describir usando un conjunto prefijado de trazas cuantitativas.\n- **n_features:** este es el número de características descriptoras que se utilizan para describir cada item de forma cuantitativa. Las características son, generalmente, valores reales, aunque pueden ser categóricas o valores discretos.\n\nEl número de características debe ser fijado de antemano. Sin embargo, puede ser extremadamente alto (por ejemplo, millones de características), siendo cero en la mayoría de casos. En este tipo de datos, es buena idea usar matrices `scipy.sparse` que manejan mucho mejor la memoria.\n\nComo ya comentamos en la sección anterior, representamos los ejemplos (puntos o instancias) como filas en el array de datos y almacenamos las características correspondientes, las \"dimensiones\", como columnas.", "_____no_output_____" ], [ "### Un ejemplo simple: el dataset Iris", "_____no_output_____" ], [ "Como ejemplo de un dataset simple, vamos a echar un vistazo al conjunto iris almacenado en scikit-learn.\nLos datos consisten en medidas de tres especies de flores iris distintas:", "_____no_output_____" ], [ "Iris Setosa\n<img src=\"figures/iris_setosa.jpg\" width=\"50%\">\n\nIris Versicolor\n<img src=\"figures/iris_versicolor.jpg\" width=\"50%\">\n\nIris Virginica\n<img src=\"figures/iris_virginica.jpg\" width=\"50%\">\n\n", "_____no_output_____" ], [ "### Pregunta rápida:", "_____no_output_____" ], [ "**Asumamos que estamos interesados en categorizar nuevos ejemplos; queremos predecir si una flor nueva va a ser Iris-Setosa, Iris-Versicolor, o Iris-Virginica. Basándonos en lo discutido en secciones anteriores, ¿cómo construiríamos este dataset?**\n\nRecuerda: necesitamos un array 2D con forma (*shape*) `[n_samples x n_features]`.\n- ¿Qué sería `n_samples`?\n- ¿Qué podría ser `n_features`?\nRecuerda que debe haber un número **fijo** de características por cada ejemplo, y cada característica *j* debe ser el mismo tipo de cantidad para cada ejemplo.", "_____no_output_____" ], [ "### Cargando el dataset Iris desde scikit-learn", "_____no_output_____" ], [ "Para futuros experimentos con algoritmos de aprendizaje automático, te recomendamos que añadas a favoritos el [Repositorio UCI](http://archive.ics.uci.edu/ml/), que aloja muchos de los datasets que se utilizan para probar los algoritmos de aprendizaje automático. Además, algunos de estos datasets ya están incluidos en scikit-learn, pudiendo así evitar tener que descargar, leer, convertir y limpiar los ficheros de texto o CSV. El listado de datasets ya disponibles en scikit learn puede consultarse [aquí](http://scikit-learn.org/stable/datasets/#toy-datasets).\n\nPor ejemplo, scikit-learn contiene el dataset iris. Los datos consisten en:\n- Características:\n 1. Longitud de sépalo en cm\n 2. Ancho de sépalo en cm\n 3. Longitud de pétalo en cm\n 4. Ancho de sépalo en cm\n- Etiquetas a predecir:\n 1. Iris Setosa\n 2. Iris Versicolour\n 3. Iris Virginica", "_____no_output_____" ], [ "<img src=\"figures/petal_sepal.jpg\" alt=\"Sepal\" style=\"width: 50%;\"/>\n\n(Image: \"Petal-sepal\". Licensed under CC BY-SA 3.0 via Wikimedia Commons - https://commons.wikimedia.org/wiki/File:Petal-sepal.jpg#/media/File:Petal-sepal.jpg)", "_____no_output_____" ], [ "``scikit-learn`` incluye una copia del archivo CSV de iris junto con una función que lo lee a arrays de numpy:", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\niris = load_iris()", "_____no_output_____" ] ], [ [ "El dataset es un objeto ``Bunch``. Puedes ver que contiene utilizando el método ``keys()``:", "_____no_output_____" ] ], [ [ "iris.keys()", "_____no_output_____" ] ], [ [ "Las características de cada flor se encuentra en el atributo ``data`` del dataset:", "_____no_output_____" ] ], [ [ "n_samples, n_features = iris.data.shape\nprint('Número de ejemplos:', n_samples)\nprint('Número de características:', n_features)\n# sepal length, sepal width, petal length y petal width del primer ejemplo (primera flor)\nprint(iris.data[0])", "_____no_output_____" ] ], [ [ "La información sobre la clase de cada ejemplo se encuentra en el atributo ``target`` del dataset:", "_____no_output_____" ] ], [ [ "print(iris.data.shape)\nprint(iris.target.shape)", "_____no_output_____" ], [ "print(iris.target)", "_____no_output_____" ], [ "import numpy as np\n\nnp.bincount(iris.target)", "_____no_output_____" ] ], [ [ "La función de numpy llamada `bincount` (arriba) nos permite ver que las clases se distribuyen de forma uniforme en este conjunto de datos (50 flores de cada especie), donde:\n- clase 0: Iris-Setosa\n- clase 1: Iris-Versicolor\n- clase 2: Iris-Virginica", "_____no_output_____" ], [ "Los nombres de las clases se almacenan en ``target_names``:", "_____no_output_____" ] ], [ [ "print(iris.target_names)", "_____no_output_____" ] ], [ [ "Estos datos tienen cuatro dimensiones, pero podemos visualizar una o dos de las dimensiones usando un histograma o un scatter. Primero, activamos el *matplotlib inline mode*:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "x_index = 3\ncolors = ['blue', 'red', 'green']\n\nfor label, color in zip(range(len(iris.target_names)), colors):\n plt.hist(iris.data[iris.target==label, x_index], \n label=iris.target_names[label],\n color=color)\n\nplt.xlabel(iris.feature_names[x_index])\nplt.legend(loc='upper right')\nplt.show()", "_____no_output_____" ], [ "x_index = 3\ny_index = 0\n\ncolors = ['blue', 'red', 'green']\n\nfor label, color in zip(range(len(iris.target_names)), colors):\n plt.scatter(iris.data[iris.target==label, x_index], \n iris.data[iris.target==label, y_index],\n label=iris.target_names[label],\n c=color)\n\nplt.xlabel(iris.feature_names[x_index])\nplt.ylabel(iris.feature_names[y_index])\nplt.legend(loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n <b>Ejercicio</b>:\n <ul>\n <li>\n **Cambia** `x_index` **e** `y_index` ** en el script anterior y encuentra una combinación de los dos parámetros que separe de la mejor forma posible las tres clases.**\n </li>\n <li>\n Este ejercicio es un adelanto a lo que se denomina **reducción de dimensionalidad**, que veremos después.\n </li>\n </ul>\n</div>", "_____no_output_____" ], [ "### Matrices scatterplot\n\nEn lugar de realizar los plots por separado, una herramienta común que utilizan los analistas son las **matrices scatterplot**.\n\nEstas matrices muestran los scatter plots entre todas las características del dataset, así como los histogramas para ver la distribución de cada característica.", "_____no_output_____" ] ], [ [ "import pandas as pd\n \niris_df = pd.DataFrame(iris.data, columns=iris.feature_names)\npd.plotting.scatter_matrix(iris_df, c=iris.target, figsize=(8, 8));", "_____no_output_____" ] ], [ [ "## Otros datasets disponibles", "_____no_output_____" ], [ "[Scikit-learn pone a disposición de la comunidad una gran cantidad de datasets](http://scikit-learn.org/stable/datasets/#dataset-loading-utilities). Vienen en tres modos:\n- **Packaged Data:** pequeños datasets ya disponibles en la distribución de scikit-learn, a los que se puede acceder mediante ``sklearn.datasets.load_*``\n- **Downloadable Data:** estos datasets son más grandes y pueden descargarse mediante herramientas que scikit-learn\n ya incluye. Estas herramientas están en ``sklearn.datasets.fetch_*``\n- **Generated Data:** estos datasets se generan mediante modelos basados en semillas aleatorias (datasets sintéticos). Están disponibles en ``sklearn.datasets.make_*``\n\nPuedes explorar las herramientas de datasets de scikit-learn usando la funcionalidad de autocompletado que tiene IPython. Tras importar el paquete ``datasets`` de ``sklearn``, teclea\n\n datasets.load_<TAB>\n\no\n\n datasets.fetch_<TAB>\n\no\n\n datasets.make_<TAB>\n\npara ver una lista de las funciones disponibles", "_____no_output_____" ] ], [ [ "from sklearn import datasets", "_____no_output_____" ] ], [ [ "Advertencia: muchos de estos datasets son bastante grandes y puede llevar bastante tiempo descargarlos.\n\nSi comienzas una descarga con un libro de IPython y luego quieres detenerla, puedes utilizar la opción \"kernel interrupt\" accesible por el menú o con ``Ctrl-m i``.\n\nPuedes presionar ``Ctrl-m h`` para una lista de todos los atajos ``ipython``.", "_____no_output_____" ], [ "## Cargando los datos de dígitos", "_____no_output_____" ], [ "Ahora vamos a ver otro dataset, donde podemos estudiar mejor como representar los datos. Podemos explorar los datos de la siguiente forma:", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_digits\ndigits = load_digits()", "_____no_output_____" ], [ "digits.keys()", "_____no_output_____" ], [ "n_samples, n_features = digits.data.shape\nprint((n_samples, n_features))", "_____no_output_____" ], [ "print(digits.data[0])\nprint(digits.data[-1])\nprint(digits.target)", "_____no_output_____" ] ], [ [ "Aquí la etiqueta es directamente el dígito que representa cada ejemplo. Los datos consisten en un array de longitud 64... pero, ¿qué significan estos datos?", "_____no_output_____" ], [ "Una pista viene dada por el hecho de que tenemos dos versiones de los datos:\n``data`` y ``images``. Vamos a echar un vistazo a ambas:", "_____no_output_____" ] ], [ [ "print(digits.data.shape)\nprint(digits.images.shape)", "_____no_output_____" ] ], [ [ "Podemos ver que son lo mismo, mediante un simple *reshaping*:", "_____no_output_____" ] ], [ [ "import numpy as np\nprint(np.all(digits.images.reshape((1797, 64)) == digits.data))", "_____no_output_____" ] ], [ [ "Vamos a visualizar los datos. Es un poco más complejo que el scatter plot que hicimos anteriormente.", "_____no_output_____" ] ], [ [ "# Configurar la figura\nfig = plt.figure(figsize=(6, 6)) # tamaño en pulgadas\nfig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n# mostrar algunos dígitos: cada imagen es de 8x8\nfor i in range(64):\n ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])\n ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')\n \n # Etiquetar la imagen con el valor objetivo\n ax.text(0, 7, str(digits.target[i]))", "_____no_output_____" ] ], [ [ "Ahora podemos saber que significan las características. Cada característica es una cantidad real que representa la oscuridad de un píxel en una imagen 8x8 de un dígito manuscrito.\n\nAunque cada ejemplo tiene datos que son inherentemente de dos dimensiones, la matriz de datos incluye estos datos 2D en un **solo vector**, contenido en cada **fila** de la misma.", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\n <b>Ejercicio: trabajando con un dataset de reconocimiento facial</b>:\n <ul>\n <li>\n Vamos a pararnos a explorar el dataset de reconocimiento facial de Olivetti.\nDescarga los datos (sobre 1.4MB), y visualiza las caras.\nPuedes copiar el código utilizado para visualizar los dígitos, modificándolo convenientemente.\n </li>\n </ul>\n</div>", "_____no_output_____" ] ], [ [ "from sklearn.datasets import fetch_olivetti_faces", "_____no_output_____" ], [ "# descarga el dataset faces\n", "_____no_output_____" ], [ "# Utiliza el script anterior para representar las caras\n# Pista: plt.cm.bone es un buen colormap para este dataset\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
4a6ce592a5ee4722c42ddb0ad270cf0047b0f07c
307,228
ipynb
Jupyter Notebook
Learn/Week 4 Machine Learning/Week_4_Summary.ipynb
mazharrasyad/Data-Science-SanberCode
3a6a770d5d0f4453b76cae0c4c9b642f7abed24c
[ "MIT" ]
3
2021-05-26T19:07:32.000Z
2021-06-25T03:42:18.000Z
Learn/Week 4 Machine Learning/Week_4_Summary.ipynb
mazharrasyad/Data-Science-SanberCode
3a6a770d5d0f4453b76cae0c4c9b642f7abed24c
[ "MIT" ]
null
null
null
Learn/Week 4 Machine Learning/Week_4_Summary.ipynb
mazharrasyad/Data-Science-SanberCode
3a6a770d5d0f4453b76cae0c4c9b642f7abed24c
[ "MIT" ]
null
null
null
50.340488
53,154
0.531752
[ [ [ "# Day 1", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\nimport pandas as pd\nimport numpy as np\n\niris = load_iris()\ndf = pd.DataFrame(np.c_[iris['data'], iris['target']], columns = iris['feature_names'] + ['species'])\ndf['species'] = df['species'].replace([0,1,2], iris.target_names)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "rng = np.random.RandomState(42)\nx = 10 * rng.rand(50)\ny = 2 * x - 1 + rng.randn(50)", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "plt.scatter(x, y)\nplt.show()", "_____no_output_____" ], [ "# 1\nfrom sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "# 2\nLinearRegression?\nmodel_lr = LinearRegression(fit_intercept=True)", "_____no_output_____" ], [ "# 3\n# x = data feature\n# y = data target\nx.shape\nx_matriks = x[:, np.newaxis]\nx_matriks.shape", "_____no_output_____" ], [ "# 4\n# model_lr.fit(input_data, output_data)\nmodel_lr.fit(x_matriks, y)", "_____no_output_____" ], [ "# Testing\nx_test = np.linspace(10, 12, 15)\nx_test = x_test[:, np.newaxis]\nx_test", "_____no_output_____" ], [ "# 5\ny_test = model_lr.predict(x_test)\ny_test", "_____no_output_____" ], [ "y_train = model_lr.predict(x_matriks)", "_____no_output_____" ], [ "plt.scatter(x, y, color='r')\nplt.plot(x, y_train, label=\"Model Training\")\nplt.plot(x_test, y_test, label=\"Test Result/hasil Prediksi\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "# Day 2", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\nimport pandas as pd\nimport numpy as np\n\niris = load_iris()\ndf = pd.DataFrame(np.c_[iris['data'], iris['target']], columns = iris['feature_names'] + ['species'])", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "iris", "_____no_output_____" ], [ "from scipy import stats\n\nz = stats.zscore(df)", "_____no_output_____" ], [ "z", "_____no_output_____" ], [ "print(np.where(z>3))", "(array([15]), array([1]))\n" ], [ "# import class model\nfrom sklearn.neighbors import KNeighborsClassifier", "_____no_output_____" ], [ "z[15][1]", "_____no_output_____" ], [ "# Membuat objek model dan memilih hyperparameter\n# KNeighborsClassifier?\nmodel_knn = KNeighborsClassifier(n_neighbors=6, weights='distance')", "_____no_output_____" ], [ "# Memisahkan data feature dan target\nX = df.drop('species', axis=1)\ny = df['species']", "_____no_output_____" ], [ "X", "_____no_output_____" ], [ "# Perintahkan model untuk mempelajari data dengan menggunakan method .fit()\nmodel_knn.fit(X, y)", "_____no_output_____" ], [ "# predict\nx_new = np.array([\n [2.5, 4, 3, 0.1],\n [1, 3.5, 1.7, 0.4],\n [4, 1, 3, 0.3]\n])", "_____no_output_____" ], [ "y_new = model_knn.predict(x_new)", "_____no_output_____" ], [ "y_new", "_____no_output_____" ], [ "# 0 = sentosa\n# 1 = versicolor\n# 2 = virginica", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "rng = np.random.RandomState(1)\nx = 10*rng.rand(50)\ny = 5*x + 10 + rng.rand(50)", "_____no_output_____" ], [ "plt.scatter(x, y)\nplt.show()", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "model_lr = LinearRegression(fit_intercept=True)", "_____no_output_____" ], [ "model_lr.fit(x[:, np.newaxis], y)", "_____no_output_____" ], [ "y_predict = model_lr.predict(x[:, np.newaxis])", "_____no_output_____" ], [ "plt.plot(x, y_predict, color='r', label='Model Predicted Data')\nplt.scatter(x, y, label='Actual Data')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "model_lr.coef_", "_____no_output_____" ], [ "model_lr.intercept_", "_____no_output_____" ], [ "# y = 5*x + 10 + rng.rand(50)", "_____no_output_____" ], [ "x = rng.rand(50, 3)\ny = np.dot(x, [4, 2, 7]) + 20 # sama dengan x*4 + x*2 + x*7 + 20", "_____no_output_____" ], [ "x.shape", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "model_lr2 = LinearRegression(fit_intercept=True)", "_____no_output_____" ], [ "model_lr2.fit(x, y)", "_____no_output_____" ], [ "y_predict = model_lr2.predict(x)", "_____no_output_____" ], [ "model_lr2.coef_", "_____no_output_____" ], [ "model_lr2.intercept_", "_____no_output_____" ] ], [ [ "# Day 3", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\n\nmodel_knn = KNeighborsClassifier(n_neighbors=2)\nx_train = df.drop('species', axis=1)\ny_train = df['species']\nmodel_knn.fit(x_train, y_train)", "_____no_output_____" ], [ "# cara salah dalam mengevaluasi model\ny_prediksi = model_knn.predict(x_train)", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\n\nscore = accuracy_score(y_train, y_prediksi)\nscore", "_____no_output_____" ], [ "# cara yang benar\nx = df.drop('species', axis=1)\ny = df['species']", "_____no_output_____" ], [ "y.value_counts()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=21, stratify=y)\n# x -> x_train, x_test -0.3-0.2\n# y -> y_train, y_test -0.3-0.2", "_____no_output_____" ], [ "# valuenya sama karena stratify\ny_train.value_counts()", "_____no_output_____" ], [ "print(x_train.shape)\nprint(x_test.shape)", "(120, 4)\n(30, 4)\n" ], [ "model_knn = KNeighborsClassifier(n_neighbors=2)\nmodel_knn.fit(x_train, y_train)", "_____no_output_____" ], [ "y_predik = model_knn.predict(x_test)", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\n\nscore = accuracy_score(y_test, y_predik)", "_____no_output_____" ], [ "score", "_____no_output_____" ], [ "from sklearn.model_selection import cross_val_score\n\nmodel_knn = KNeighborsClassifier(n_neighbors=2)\ncv_result = cross_val_score(model_knn, x, y, cv=10)\ncv_result.mean()", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\n\ncolnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndf = pd.read_csv('pima-indians-diabetes.csv', names=colnames)\ndf.head()", "_____no_output_____" ], [ "df['class'].value_counts()", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import scale\n\nX = df.drop('class', axis=1)\nXs = scale(X)\ny = df['class']\n\nX_train, X_test, y_train, y_test = train_test_split(Xs, y, random_state=21, stratify=y, test_size=0.2)\n\nmodel_lr = LogisticRegression(random_state=21)\nparams_grid = {\n 'C':np.arange(0.1, 1, 0.1), 'class_weight':[{0:x, 1:1-x} for x in np.arange(0.1, 0.9, 0.1)] \n}\ngscv = GridSearchCV(model_lr, params_grid, cv=10, scoring='f1')\ngscv.fit(X_train, y_train)", "_____no_output_____" ], [ "X_test", "_____no_output_____" ], [ "y_pred = gscv.predict(X_test)\ny_pred", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix, classification_report\n\nconfusion_matrix(y_test, y_pred, labels=[1, 0])", "_____no_output_____" ], [ "TP = 39\nFN = 15\nFP = 25\nTN = 75", "_____no_output_____" ], [ "print(classification_report(y_test, y_pred))", " precision recall f1-score support\n\n 0 0.83 0.75 0.79 100\n 1 0.61 0.72 0.66 54\n\n accuracy 0.74 154\n macro avg 0.72 0.74 0.73 154\nweighted avg 0.75 0.74 0.74 154\n\n" ], [ "# menghitung nilai precisi, recall, f-1 score dari model kita dalam memprediksi data yang positif\nprecision = TP/(TP+FP)\nrecall = TP/(TP+FN)\nf1score = 2 * precision * recall / (precision + recall)\nprint(precision)\nprint(recall)\nprint(f1score)", "0.609375\n0.7222222222222222\n0.6610169491525424\n" ], [ "# menghitung nilai precisi, recall, f-1 score dari model kita dalam memprediksi data yang negatif\nprecision = TN/(TN+FN)\nrecall = TN/(TN+FP)\nf1score = (precision * recall * 2) / (precision + recall)\nprint(precision)\nprint(recall)\nprint(f1score)", "0.8333333333333334\n0.75\n0.7894736842105262\n" ] ], [ [ "# Day 4", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\nimport pandas as pd\nimport numpy as np\n\ncolnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndf = pd.read_csv('pima-indians-diabetes.csv', names=colnames)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_validate, cross_val_score\n\nX = df.drop('class', axis=1)\ny = df['class']\n\nmodel = KNeighborsClassifier(n_neighbors=5)\ncv_score1 = cross_validate(model, X, y, cv=10, return_train_score=True)\ncv_score2 = cross_val_score(model, X, y, cv=10)", "_____no_output_____" ], [ "cv_score1", "_____no_output_____" ], [ "cv_score2", "_____no_output_____" ], [ "cv_score1['test_score'].mean()", "_____no_output_____" ], [ "cv_score2.mean()", "_____no_output_____" ], [ "def knn_predict(k):\n model = KNeighborsClassifier(n_neighbors=k)\n score = cross_validate(model, X, y, cv=10, return_train_score=True)\n train_score = score['train_score'].mean()\n test_score = score['test_score'].mean()\n return train_score, test_score", "_____no_output_____" ], [ "train_scores = []\ntest_scores = []\n\nfor k in range(2, 100):\n # lakukan fitting\n # kemudian scoring\n train_score, test_score = knn_predict(k)\n train_scores.append(train_score)\n test_scores.append(test_score)", "_____no_output_____" ], [ "train_scores", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nfig, ax = plt.subplots(figsize=(14, 8))\n\nax.plot(range(2, 100), train_scores, marker='x', color='b', label='Train Scores')\nax.plot(range(2, 100), test_scores, marker='o', color='g', label='Test Scores')\nax.set_xlabel('Nilai K')\nax.set_ylabel('Score')\n\nfig.legend()\nplt.show()", "_____no_output_____" ], [ "from sklearn.model_selection import GridSearchCV, RandomizedSearchCV\n\nmodel = KNeighborsClassifier()\nparam_grid = {'n_neighbors':np.arange(5, 50), 'weights':['distance', 'uniform']}\ngscv = GridSearchCV(model, param_grid=param_grid, scoring='accuracy', cv=5)\ngscv.fit(X, y)", "_____no_output_____" ], [ "gscv.best_params_", "_____no_output_____" ], [ "gscv.best_score_", "_____no_output_____" ], [ "rscv = RandomizedSearchCV(model, param_grid, n_iter=15, scoring='accuracy', cv=5)\nrscv.fit(X, y)", "_____no_output_____" ], [ "rscv.best_params_", "_____no_output_____" ], [ "rscv.best_score_", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "# Day 5", "_____no_output_____" ] ], [ [ "data = {\n 'pendidikan_terakhir' : ['SD', 'SMP', 'SMA', 'SMP', 'SMP'],\n 'tempat_tinggal' : ['Bandung', 'Garut', 'Bandung', 'Cirebon', 'Jakarta'],\n 'status' : ['Menikah', 'Jomblo', 'Janda', 'Jomblo', 'Duda'],\n 'tingkat_ekonomi' : ['Kurang Mampu', 'Berkecukupan', 'Mampu', 'Sangat Mampu', 'Mampu'],\n 'jumlah_anak' : [1, 4, 2, 0, 3]\n}", "_____no_output_____" ], [ "import pandas as pd\n\ndf = pd.DataFrame(data)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df = pd.get_dummies(df, columns=['tempat_tinggal', 'status'])\ndf", "_____no_output_____" ], [ "obj_dict = {\n 'Kurang Mampu' : 0,\n 'Berkecukupan' : 1,\n 'Mampu' : 2,\n 'Sangat Mampu' : 3\n}", "_____no_output_____" ], [ "df['tingkat_ekonomi'] = df['tingkat_ekonomi'].replace(obj_dict)", "_____no_output_____" ], [ "df['tingkat_ekonomi']", "_____no_output_____" ], [ "import numpy as np\n\ndata = {\n 'pendidikan_terakhir' : [np.nan, 'SMP', 'SD', 'SMP', 'SMP', 'SD', 'SMP', 'SMA', 'SD'],\n 'tingkat_ekonomi' : [0, 1, 2, 3, 2, 2, 1, 1, 3],\n # 'jumlah_anak' : [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 1, 2]\n 'jumlah_anak' : [1, np.nan, np.nan, 1, 1, 1, 3, 1, 2]\n}\n\ndata_ts = {\n 'Hari' : [1, 2, 3, 4, 5],\n 'Jumlah' : [12, 23, np.nan, 12, 20]\n}", "_____no_output_____" ], [ "df = pd.DataFrame(data)\ndf_ts = pd.DataFrame(data_ts)", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "5 Cara dalam menghandle missing value:\n1. Drop missing value : Jumlah missing value data banyak\n2. Filling with mean/median : berlaku untuk data yang bertipe numerik\n3. Filling with modus : berlaku untuk data yang bertipe kategori\n4. Filling with bffill atau ffill\n5. KNN", "_____no_output_____" ] ], [ [ "1. # drop berdasarkan row\ndf.dropna(axis=0)", "_____no_output_____" ], [ "# 1. drop berdasarkan column\ndf.drop(['jumlah_anak'], axis=1)", "_____no_output_____" ], [ "# 2 kelemahannya kurang akurat\ndf['jumlah_anak'] = df['jumlah_anak'].fillna(df['jumlah_anak'].mean())\ndf['jumlah_anak']", "_____no_output_____" ], [ "df['jumlah_anak'] = df['jumlah_anak'].astype(int)\ndf['jumlah_anak']", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "# 3\ndf['pendidikan_terakhir'].value_counts()", "_____no_output_____" ], [ "df['pendidikan_terakhir'] = df['pendidikan_terakhir'].fillna('SMP')\ndf", "_____no_output_____" ], [ "# 4 bfill nan diisi dengan nilai sebelumnya\ndf_ts.fillna(method='bfill')", "_____no_output_____" ], [ "# 4 ffill nan diisi dengan nilai sebelumnya\ndf_ts.fillna(method='ffill')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "from sklearn.impute import KNNImputer\n\nimp = KNNImputer(n_neighbors=5)\n# imp.fit_transform(df['jumlah_anak'][:, np.newaxis])\nimp.fit_transform(df[['jumlah_anak', 'tingkat_ekonomi']])", "_____no_output_____" ], [ "import pandas as pd\n\ncolnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndf = pd.read_csv('pima-indians-diabetes.csv', names=colnames)\ndf.head()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "X = df.drop('class', axis=1)\nX.head()", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler\n\nstdscalar = StandardScaler()\ndatascale = stdscalar.fit_transform(X)\ncolnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age']\ndfscale = pd.DataFrame(datascale, columns=colnames)\ndfscale", "_____no_output_____" ], [ "dfscale.describe()", "_____no_output_____" ], [ "from sklearn.preprocessing import Normalizer\n\nnormscaler = Normalizer()\ndatanorm = normscaler.fit_transform(X)\ncolnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age']\ndfnorm = pd.DataFrame(datanorm, columns=colnames)\ndfnorm", "_____no_output_____" ], [ "dfnorm.describe()", "_____no_output_____" ] ], [ [ "1. Normalization digunakan ketika kita tidak tahu bahwa kita tidak harus memiliki asumsi bahwa data kita itu memiliki distribusi normal, dan kita memakai algoritma ML yang tidak harus mengasumsikan bentuk distribusi dari data... contohnya KNN, neural network, dll\n\n2. Standardization apabila data kita berasumsi memiliki distribusi normal", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a6ceb66fbc58378d8d837348c821be9f02f039f
802,764
ipynb
Jupyter Notebook
Default Data EDA.ipynb
withusanty/Bank-Defaulter-Data-Analysis
3dd2a5763f2e013c40a053ad18560a9d5ad61321
[ "Unlicense" ]
null
null
null
Default Data EDA.ipynb
withusanty/Bank-Defaulter-Data-Analysis
3dd2a5763f2e013c40a053ad18560a9d5ad61321
[ "Unlicense" ]
null
null
null
Default Data EDA.ipynb
withusanty/Bank-Defaulter-Data-Analysis
3dd2a5763f2e013c40a053ad18560a9d5ad61321
[ "Unlicense" ]
null
null
null
818.311927
559,096
0.953473
[ [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)", "_____no_output_____" ], [ "df = pd.read_csv('credit.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10000 entries, 0 to 9999\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 default 10000 non-null object \n 1 student 10000 non-null object \n 2 balance 10000 non-null float64\n 3 income 10000 non-null float64\ndtypes: float64(2), object(2)\nmemory usage: 312.6+ KB\n" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "df.duplicated().sum()", "_____no_output_____" ], [ "df.corr()", "_____no_output_____" ], [ "df['default'].value_counts()", "_____no_output_____" ] ], [ [ "# How many defaulter in the data?", "_____no_output_____" ] ], [ [ "df['default'].value_counts().plot(kind='pie',autopct=lambda p:'{:.2f}%\\n({:.0f})'.format(p,(p/100)*(df['default'].value_counts().sum())))", "_____no_output_____" ] ], [ [ "# How many students in this data?", "_____no_output_____" ] ], [ [ "df['student'].value_counts().plot(kind='pie',autopct=lambda p:'{:.2f}%\\n({:.0f})'.format(p,(p/100)*(df['student'].value_counts().sum())))", "_____no_output_____" ] ], [ [ "# How balance column looks like?", "_____no_output_____" ] ], [ [ "sns.distplot(df['balance'])", "_____no_output_____" ] ], [ [ "We can say this is right skewed data.\nmajority of the people have balance between 500-1000", "_____no_output_____" ], [ "# How income column looks like?", "_____no_output_____" ] ], [ [ "sns.distplot(df['income'])", "_____no_output_____" ], [ "df.sort_values(by='student')", "_____no_output_____" ], [ "def Value_Countplot(data,hue=None,x_pos=0.25,rotation=None):\n '''\n data: data\n hue: hue data\n x_pos: int/float - to position the value\n \n '''\n ax = sns.countplot(data,hue=hue)\n for i in ax.patches:\n height = i.get_height() # get height of bar (value of y axis)\n x = i.get_x() # get x_axis value\n ax.text(x=x+x_pos,y=height-(height/10),s=height,fontsize=10,fontweight='bold',rotation=rotation)", "_____no_output_____" ] ], [ [ "# Students vs Defaulter on Bar Plot", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(15,5))\n# plt.subplot(311)\nValue_Countplot(df['default'],hue=df['student'],x_pos=0.15)\nplt.title('Bar Plot: Students vs Defaulter',fontdict={'fontsize':20,'color':'red'})\n", "_____no_output_____" ], [ "plt.figure(figsize=(15,8))\nsns.scatterplot(df['balance'],df['income'],hue=df['default'],style=df['student'])\nplt.title('Scatter Plot: Income vs Balance vs Student vs Default',fontdict={'fontsize':20,'color':'red'})\n", "_____no_output_____" ], [ "sns.clustermap(pd.crosstab(df['student'],df['default']))\nplt.title('Cluster Map: Students vs Defaulter',fontdict={'fontsize':20,'color':'red'})\n", "_____no_output_____" ], [ "sns.pairplot(df,hue='default',kind='kde')\n# plt.title('Pair Plot: Income vs Balance vs Default',fontdict={'fontsize':20,'color':'red'})", "_____no_output_____" ], [ "plt.figure(figsize=(15,5))\nsns.boxplot(df['balance'],df['default'])\nplt.title('Box Plot: Balance vs Default',fontdict={'fontsize':20,'color':'red'})\n", "_____no_output_____" ], [ "plt.figure(figsize=(15,5))\nsns.boxplot(df['balance'],df['student'])\nplt.title('Box Plot: Balance vs Student',fontdict={'fontsize':20,'color':'red'})", "_____no_output_____" ], [ "plt.figure(figsize=(15,5))\nsns.boxplot(df['income'],df['student'])\nplt.title('Box Plot: Income vs Student',fontdict={'fontsize':20,'color':'red'})", "_____no_output_____" ], [ "plt.figure(figsize=(15,5))\nsns.boxplot(df['income'],df['default'])\nplt.title('Box Plot: Income vs Default',fontdict={'fontsize':20,'color':'red'})", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6cefb6728cf5bd6bdb60223c3a52637f7bb96c
576,223
ipynb
Jupyter Notebook
Simulated_annealing_in_Python.ipynb
doc22940/notebooks-2
6a7bdec5ed2195005d64ca1f9eaf6613d68fb8ca
[ "MIT" ]
102
2016-06-25T09:30:00.000Z
2022-03-24T21:02:49.000Z
Simulated_annealing_in_Python.ipynb
Jimmy-INL/notebooks
ccf5ebc11131f56305c484cfd4556f4bcf63c19b
[ "MIT" ]
34
2016-06-26T12:21:30.000Z
2021-04-06T09:19:49.000Z
Simulated_annealing_in_Python.ipynb
Jimmy-INL/notebooks
ccf5ebc11131f56305c484cfd4556f4bcf63c19b
[ "MIT" ]
44
2017-05-13T23:54:56.000Z
2021-07-17T15:34:24.000Z
903.170846
132,294
0.940433
[ [ [ "# Table of Contents\n <p><div class=\"lev1 toc-item\"><a href=\"#Simulated-annealing-in-Python\" data-toc-modified-id=\"Simulated-annealing-in-Python-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Simulated annealing in Python</a></div><div class=\"lev2 toc-item\"><a href=\"#References\" data-toc-modified-id=\"References-11\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>References</a></div><div class=\"lev2 toc-item\"><a href=\"#See-also\" data-toc-modified-id=\"See-also-12\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>See also</a></div><div class=\"lev2 toc-item\"><a href=\"#About\" data-toc-modified-id=\"About-13\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>About</a></div><div class=\"lev2 toc-item\"><a href=\"#Algorithm\" data-toc-modified-id=\"Algorithm-14\"><span class=\"toc-item-num\">1.4&nbsp;&nbsp;</span>Algorithm</a></div><div class=\"lev2 toc-item\"><a href=\"#Basic-but-generic-Python-code\" data-toc-modified-id=\"Basic-but-generic-Python-code-15\"><span class=\"toc-item-num\">1.5&nbsp;&nbsp;</span>Basic but generic Python code</a></div><div class=\"lev2 toc-item\"><a href=\"#Basic-example\" data-toc-modified-id=\"Basic-example-16\"><span class=\"toc-item-num\">1.6&nbsp;&nbsp;</span>Basic example</a></div><div class=\"lev2 toc-item\"><a href=\"#Visualizing-the-steps\" data-toc-modified-id=\"Visualizing-the-steps-17\"><span class=\"toc-item-num\">1.7&nbsp;&nbsp;</span>Visualizing the steps</a></div><div class=\"lev2 toc-item\"><a href=\"#More-visualizations\" data-toc-modified-id=\"More-visualizations-18\"><span class=\"toc-item-num\">1.8&nbsp;&nbsp;</span>More visualizations</a></div>", "_____no_output_____" ], [ "# Simulated annealing in Python\n\nThis small notebook implements, in [Python 3](https://docs.python.org/3/), the [simulated annealing](https://en.wikipedia.org/wiki/Simulated_annealing) algorithm for numerical optimization.\n\n## References\n- The Wikipedia page: [simulated annealing](https://en.wikipedia.org/wiki/Simulated_annealing).\n- It was implemented in `scipy.optimize` before version 0.14: [`scipy.optimize.anneal`](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.optimize.anneal.html).\n- [This blog post](http://apmonitor.com/me575/index.php/Main/SimulatedAnnealing).\n- These Stack Overflow questions: [15853513](https://stackoverflow.com/questions/15853513/) and [19757551](https://stackoverflow.com/questions/19757551/).\n\n## See also\n- For a real-world use of simulated annealing, this Python module seems useful: [perrygeo/simanneal on GitHub](https://github.com/perrygeo/simanneal).\n\n## About\n- *Date:* 20/07/2017.\n- *Author:* [Lilian Besson](https://GitHub.com/Naereen), (C) 2017.\n- *Licence:* [MIT Licence](http://lbesson.mit-license.org).\n\n----", "_____no_output_____" ], [ "> This notebook should be compatible with both Python versions, [2](https://docs.python.org/2/) and [3](https://docs.python.org/3/).", "_____no_output_____" ] ], [ [ "from __future__ import print_function, division # Python 2 compatibility if needed", "_____no_output_____" ], [ "import numpy as np\nimport numpy.random as rn\nimport matplotlib.pyplot as plt # to plot\nimport matplotlib as mpl\n\nfrom scipy import optimize # to compare\n\nimport seaborn as sns\nsns.set(context=\"talk\", style=\"darkgrid\", palette=\"hls\", font=\"sans-serif\", font_scale=1.05)\n\nFIGSIZE = (19, 8) #: Figure size, in inches!\nmpl.rcParams['figure.figsize'] = FIGSIZE", "_____no_output_____" ] ], [ [ "----\n\n## Algorithm\n\nThe following pseudocode presents the simulated annealing heuristic.\n\n- It starts from a state $s_0$ and continues to either a maximum of $k_{\\max}$ steps or until a state with an energy of $e_{\\min}$ or less is found.\n- In the process, the call $\\mathrm{neighbour}(s)$ should generate a randomly chosen neighbour of a given state $s$.\n- The annealing schedule is defined by the call $\\mathrm{temperature}(r)$, which should yield the temperature to use, given the fraction $r$ of the time budget that has been expended so far.", "_____no_output_____" ], [ "> **Simulated Annealing**:\n> \n> - Let $s$ = $s_0$\n> - For $k = 0$ through $k_{\\max}$ (exclusive):\n> + $T := \\mathrm{temperature}(k ∕ k_{\\max})$\n> + Pick a random neighbour, $s_{\\mathrm{new}} := \\mathrm{neighbour}(s)$\n> + If $P(E(s), E(s_{\\mathrm{new}}), T) \\geq \\mathrm{random}(0, 1)$:\n> * $s := s_{\\mathrm{new}}$\n> - Output: the final state $s$", "_____no_output_____" ], [ "----\n\n## Basic but generic Python code", "_____no_output_____" ], [ "Let us start with a very generic implementation:", "_____no_output_____" ] ], [ [ "def annealing(random_start,\n cost_function,\n random_neighbour,\n acceptance,\n temperature,\n maxsteps=1000,\n debug=True):\n \"\"\" Optimize the black-box function 'cost_function' with the simulated annealing algorithm.\"\"\"\n state = random_start()\n cost = cost_function(state)\n states, costs = [state], [cost]\n for step in range(maxsteps):\n fraction = step / float(maxsteps)\n T = temperature(fraction)\n new_state = random_neighbour(state, fraction)\n new_cost = cost_function(new_state)\n if debug: print(\"Step #{:>2}/{:>2} : T = {:>4.3g}, state = {:>4.3g}, cost = {:>4.3g}, new_state = {:>4.3g}, new_cost = {:>4.3g} ...\".format(step, maxsteps, T, state, cost, new_state, new_cost))\n if acceptance_probability(cost, new_cost, T) > rn.random():\n state, cost = new_state, new_cost\n states.append(state)\n costs.append(cost)\n # print(\" ==> Accept it!\")\n # else:\n # print(\" ==> Reject it...\")\n return state, cost_function(state), states, costs", "_____no_output_____" ] ], [ [ "----\n\n## Basic example\n\nWe will use this to find the global minimum of the function $x \\mapsto x^2$ on $[-10, 10]$.", "_____no_output_____" ] ], [ [ "interval = (-10, 10)\n\ndef f(x):\n \"\"\" Function to minimize.\"\"\"\n return x ** 2\n\ndef clip(x):\n \"\"\" Force x to be in the interval.\"\"\"\n a, b = interval\n return max(min(x, b), a)", "_____no_output_____" ], [ "def random_start():\n \"\"\" Random point in the interval.\"\"\"\n a, b = interval\n return a + (b - a) * rn.random_sample()", "_____no_output_____" ], [ "def cost_function(x):\n \"\"\" Cost of x = f(x).\"\"\"\n return f(x)", "_____no_output_____" ], [ "def random_neighbour(x, fraction=1):\n \"\"\"Move a little bit x, from the left or the right.\"\"\"\n amplitude = (max(interval) - min(interval)) * fraction / 10\n delta = (-amplitude/2.) + amplitude * rn.random_sample()\n return clip(x + delta)", "_____no_output_____" ], [ "def acceptance_probability(cost, new_cost, temperature):\n if new_cost < cost:\n # print(\" - Acceptance probabilty = 1 as new_cost = {} < cost = {}...\".format(new_cost, cost))\n return 1\n else:\n p = np.exp(- (new_cost - cost) / temperature)\n # print(\" - Acceptance probabilty = {:.3g}...\".format(p))\n return p", "_____no_output_____" ], [ "def temperature(fraction):\n \"\"\" Example of temperature dicreasing as the process goes on.\"\"\"\n return max(0.01, min(1, 1 - fraction))", "_____no_output_____" ] ], [ [ "Let's try!", "_____no_output_____" ] ], [ [ "annealing(random_start, cost_function, random_neighbour, acceptance_probability, temperature, maxsteps=30, debug=True);", "Step # 0/30 : T = 1, state = -7.45, cost = 55.5, new_state = -7.45, new_cost = 55.5 ...\nStep # 1/30 : T = 0.967, state = -7.45, cost = 55.5, new_state = -7.44, new_cost = 55.4 ...\nStep # 2/30 : T = 0.933, state = -7.44, cost = 55.4, new_state = -7.5, new_cost = 56.2 ...\nStep # 3/30 : T = 0.9, state = -7.5, cost = 56.2, new_state = -7.59, new_cost = 57.6 ...\nStep # 4/30 : T = 0.867, state = -7.59, cost = 57.6, new_state = -7.64, new_cost = 58.3 ...\nStep # 5/30 : T = 0.833, state = -7.59, cost = 57.6, new_state = -7.51, new_cost = 56.4 ...\nStep # 6/30 : T = 0.8, state = -7.51, cost = 56.4, new_state = -7.53, new_cost = 56.6 ...\nStep # 7/30 : T = 0.767, state = -7.53, cost = 56.6, new_state = -7.58, new_cost = 57.5 ...\nStep # 8/30 : T = 0.733, state = -7.53, cost = 56.6, new_state = -7.6, new_cost = 57.8 ...\nStep # 9/30 : T = 0.7, state = -7.53, cost = 56.6, new_state = -7.51, new_cost = 56.4 ...\nStep #10/30 : T = 0.667, state = -7.51, cost = 56.4, new_state = -7.24, new_cost = 52.4 ...\nStep #11/30 : T = 0.633, state = -7.24, cost = 52.4, new_state = -6.98, new_cost = 48.7 ...\nStep #12/30 : T = 0.6, state = -6.98, cost = 48.7, new_state = -6.6, new_cost = 43.5 ...\nStep #13/30 : T = 0.567, state = -6.6, cost = 43.5, new_state = -6.69, new_cost = 44.8 ...\nStep #14/30 : T = 0.533, state = -6.6, cost = 43.5, new_state = -6.84, new_cost = 46.8 ...\nStep #15/30 : T = 0.5, state = -6.6, cost = 43.5, new_state = -6.45, new_cost = 41.6 ...\nStep #16/30 : T = 0.467, state = -6.45, cost = 41.6, new_state = -6.24, new_cost = 38.9 ...\nStep #17/30 : T = 0.433, state = -6.24, cost = 38.9, new_state = -6.52, new_cost = 42.5 ...\nStep #18/30 : T = 0.4, state = -6.24, cost = 38.9, new_state = -5.92, new_cost = 35.1 ...\nStep #19/30 : T = 0.367, state = -5.92, cost = 35.1, new_state = -6.35, new_cost = 40.4 ...\nStep #20/30 : T = 0.333, state = -5.92, cost = 35.1, new_state = -5.98, new_cost = 35.8 ...\nStep #21/30 : T = 0.3, state = -5.92, cost = 35.1, new_state = -5.35, new_cost = 28.6 ...\nStep #22/30 : T = 0.267, state = -5.35, cost = 28.6, new_state = -4.67, new_cost = 21.8 ...\nStep #23/30 : T = 0.233, state = -4.67, cost = 21.8, new_state = -4.44, new_cost = 19.7 ...\nStep #24/30 : T = 0.2, state = -4.44, cost = 19.7, new_state = -4.59, new_cost = 21.1 ...\nStep #25/30 : T = 0.167, state = -4.44, cost = 19.7, new_state = -4.04, new_cost = 16.3 ...\nStep #26/30 : T = 0.133, state = -4.04, cost = 16.3, new_state = -4.77, new_cost = 22.8 ...\nStep #27/30 : T = 0.1, state = -4.04, cost = 16.3, new_state = -4.7, new_cost = 22.1 ...\nStep #28/30 : T = 0.0667, state = -4.04, cost = 16.3, new_state = -3.44, new_cost = 11.8 ...\nStep #29/30 : T = 0.0333, state = -3.44, cost = 11.8, new_state = -2.6, new_cost = 6.78 ...\n" ] ], [ [ "Now with more steps:", "_____no_output_____" ] ], [ [ "state, c, states, costs = annealing(random_start, cost_function, random_neighbour, acceptance_probability, temperature, maxsteps=1000, debug=False)\n\nstate\nc", "_____no_output_____" ] ], [ [ "----\n\n## Visualizing the steps", "_____no_output_____" ] ], [ [ "def see_annealing(states, costs):\n plt.figure()\n plt.suptitle(\"Evolution of states and costs of the simulated annealing\")\n plt.subplot(121)\n plt.plot(states, 'r')\n plt.title(\"States\")\n plt.subplot(122)\n plt.plot(costs, 'b')\n plt.title(\"Costs\")\n plt.show()", "_____no_output_____" ], [ "see_annealing(states, costs)", "_____no_output_____" ] ], [ [ "----\n\n## More visualizations", "_____no_output_____" ] ], [ [ "def visualize_annealing(cost_function):\n state, c, states, costs = annealing(random_start, cost_function, random_neighbour, acceptance_probability, temperature, maxsteps=1000, debug=False)\n see_annealing(states, costs)\n return state, c", "_____no_output_____" ], [ "visualize_annealing(lambda x: x**3)", "_____no_output_____" ], [ "visualize_annealing(lambda x: x**2)", "_____no_output_____" ], [ "visualize_annealing(np.abs)", "_____no_output_____" ], [ "visualize_annealing(np.cos)", "_____no_output_____" ], [ "visualize_annealing(lambda x: np.sin(x) + np.cos(x))", "_____no_output_____" ] ], [ [ "In all these examples, the simulated annealing converges to a global minimum.\nIt can be non-unique, but it is found.", "_____no_output_____" ], [ "----\n> That's it for today, folks!\n\nMore notebooks can be found on [my GitHub page](https://GitHub.com/Naereen/notebooks).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
4a6cfcf1cc055bfb7921a4681b60713a99ec7291
378,551
ipynb
Jupyter Notebook
Data_visualization pt2.ipynb
twinspica14/jupyter_files
e2d530e14e0241045467d1f757f13a964903a410
[ "MIT" ]
null
null
null
Data_visualization pt2.ipynb
twinspica14/jupyter_files
e2d530e14e0241045467d1f757f13a964903a410
[ "MIT" ]
null
null
null
Data_visualization pt2.ipynb
twinspica14/jupyter_files
e2d530e14e0241045467d1f757f13a964903a410
[ "MIT" ]
null
null
null
161.154108
67,180
0.819821
[ [ [ "import numpy as np\nimport pandas as pd\n\n# stats\nfrom scipy import stats\n# Plotting\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport math\n\n\n%matplotlib inline", "_____no_output_____" ], [ "_df4 = pd.read_csv('winequality-red.csv',sep=\";\")\n_df4\n\n# _df4.head()", "_____no_output_____" ] ], [ [ "# Basics of MatPlotLib\n", "_____no_output_____" ], [ "# Pylab interface, where we use plt.\n# Stateful interface where we use ax = plt.axes()\n# Object Oriented one where we use ax = plt.subplots() to create an array of axes\n\n## 1st is pylab", "_____no_output_____" ] ], [ [ "# linspace(start,end,step) gives value equally spaced, not a + step, a1 + step, but a divided in equal step\nx = np.linspace(-np.pi,2*np.pi,256,endpoint=True)\nc,s = np.cos(x), np.sin(x)\n\n\nplt.figure(figsize=(12,6), dpi=80,facecolor =\"cyan\") #figure() gives control over frame,dpi,edgecolor,facecolor,linewidth\n\n\n\n\nplt.subplot(2,1,1)#subplot(number_rows,number_columns,#plot_number)\n\n\nplt.xlim(-4.0,4.0)\n\n\n\nplt.xticks(np.linspace(-4,4,9,endpoint=True))\n\nplt.yticks(np.linspace(-1,1,5,endpoint=True))\n\n\nplt.plot(x,c,color='green', linestyle=\"-.\",label=\"cos(x)\")##### width x height in figsize\n\n# ':' gives .....\n# '-.' gives -.-.-.\n# '--' gives - - - - \n# '-' gives -\n\n# Setting x & y limits\n# plt.xlim(start,end) same for ylim()\n\n\n\n\n# set y ticks\n\n\n\nplt.legend()\n\n\nplt.subplot(2,1,2)\nplt.plot(x,s,':c',label=\"sin(x)\") #plot(x,y)\n\n\n\n\n\n# we can save figureby savefig(\"../path/file_name.png\", dpi=72)\n'''\n many file formats are available so plase check\n'''\n\n\n\n\n\n\n\n# plt.show() # Should be used when running from script, or else from ipython it's not important, should be used only once\n# to update grapgh we use plt.draw()\nplt.legend()\n", "_____no_output_____" ] ], [ [ "# Above Interface was stateful based, But we will go for object oriented interface", "_____no_output_____" ] ], [ [ "# Creating Above figure in oop\nx = np.linspace(-2*np.pi,2*np.pi,256)\n\n\nplt.style.use('seaborn-whitegrid') # could also be classical\n\n\nfig, ax = plt.subplots(2)\n\n### Creating ax[] array of axes\n\n\n\n\nax[0].plot(x,np.sin(x),':c',label=\"sin(x)\")\nax[0].set(xlabel=\"x\",ylabel=\"sin(x)\",title=\"sin(x)\") # set(xlim(),ylim(),xlabel=\"\",ylabel=\"\",title=\"\")\nax[0].legend()\nax[1].plot(x,np.cos(x))", "_____no_output_____" ], [ "fig = plt.figure()\nax = plt.axes()\nx = np.linspace(0,10,2000)\nax.plot(x,np.sin(x),'--c')# c first letter of color only for\n\nax.plot(x,np.cos(x),':r') # rgbcmyk Cyan, Magneta,Yello,blacK ", "_____no_output_____" ] ], [ [ "# plt.axis([xmin, xmax, ymin, ymax],'tight') to set limit in a single call, It also allows to tighten bounds.\n## Above dig is not bound tight\n\n# Labeling Plots\n## plt.title(\"xxxxxx\") plt.xlabel(\"xxx\") plt.ylabel(\"xxxx\") ", "_____no_output_____" ] ], [ [ "x = np.linspace(0,10,30)\nax = plt.axes()\nax.plot(x,np.sin(x),'o',color=\"black\")", "_____no_output_____" ], [ "plt.figure(figsize=(12,12),dpi=80)\nrng = np.random.RandomState(0)\nfor marker in ['o','.',',','x','+','v','^','<','>','s','d']:\n plt.plot(rng.rand(5),rng.rand(5),marker,label=\"marker = {}\".format(marker),color=\"red\")\n \nplt.legend(numpoints=1)", "_____no_output_____" ] ], [ [ "## This markers can also be combined with '-' line like '-o'\n## full coustomization of markers be like\n### plt.plot(x,np.sin(x),'>c',markersize=15,linewidth=1,markerfacecolor='white',markeredgecolor=\"red\",markeredgewidth=2)\n", "_____no_output_____" ] ], [ [ "plt.plot(x,np.sin(x),'-pc',markersize=15,linewidth=1,markerfacecolor='white',markeredgecolor=\"red\",markeredgewidth=2,label=\"line\")\nplt.legend()\n", "_____no_output_____" ], [ "y = np.random.randint(0,100,50)\nx = np.random.randint(0,50,50)\nplt.scatter(x,y,c=y,s=y,alpha=0.3,cmap='viridis')\nplt.colorbar()", "_____no_output_____" ], [ "_d2 = _df4.pivot_table(values='pH',index=\"alcohol\",columns=\"quality\")\n_d2", "_____no_output_____" ], [ "# scatter graph can also be used for plotting 4 max function, two extra in c and size\nplt.style.use('seaborn-whitegrid')\na = _df4['pH'].value_counts()\nplt.figure(figsize=(12,12),dpi=80)\n\nplt.scatter(_df4['quality'],_df4['alcohol'],s=100,c=_df4['pH'],cmap=plt.cm.PuOr) #alpha for opaquness\nplt.colorbar()\n", "_____no_output_____" ] ], [ [ "## plt.scatter(f(x),f(y),s=f(z),c=f(w),cmap=plt.cm.PuOr,alpha=n)\n### s and c can take numbers as well as function, alpha is used for transparency n-(0,1)\n\n# color-map i.e cmap is too important to choose which colormap we would follow\n\n## we can refer for different color-map on below given link\n\n# https://chrisalbon.com/python/set_the_color_of_a_matplotlib.html", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12,12),dpi=80)\nplt.plot(_df4['quality'],_df4['alcohol'],'o',markersize=15,linewidth=1,markerfacecolor='white',markeredgecolor=\"red\",markeredgewidth=2)", "_____no_output_____" ] ], [ [ "# Plot Error Bars\nplt.errorbar(x,f(x), yerr=dy,fmt='o',color=\" \",ecolor=\" \",elinewidth=3,capsize=0) x axis error (xerr)", "_____no_output_____" ], [ "# I have skipped continuous error, please go through pdf\n\n\n# We will start object-oriented approach", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a6d06ea5d9bb0c61e9b7aa401978659470dc1f6
7,904
ipynb
Jupyter Notebook
tutorials/Tutorial4_FAQ_style_QA.ipynb
vchulski/haystack
bbfccf5cf639190e3b0b34feea444a1bf22f6027
[ "Apache-2.0" ]
null
null
null
tutorials/Tutorial4_FAQ_style_QA.ipynb
vchulski/haystack
bbfccf5cf639190e3b0b34feea444a1bf22f6027
[ "Apache-2.0" ]
null
null
null
tutorials/Tutorial4_FAQ_style_QA.ipynb
vchulski/haystack
bbfccf5cf639190e3b0b34feea444a1bf22f6027
[ "Apache-2.0" ]
null
null
null
31.742972
230
0.59375
[ [ [ "## \"FAQ-Style QA\": Utilizing existing FAQs for Question Answering\n\nWhile *extractive Question Answering* works on pure texts and is therefore more generalizable, there's also a common alternative that utilizes existing FAQ data.\n\nPros:\n- Very fast at inference time\n- Utilize existing FAQ data\n- Quite good control over answers\n\nCons:\n- Generalizability: We can only answer questions that are similar to existing ones in FAQ\n\nIn some use cases, a combination of extractive QA and FAQ-style can also be an interesting option.\n\n*Use this [link](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial4_Tutorial4_FAQ_style_QA.ipynb) to open the notebook in Google Colab.*\n", "_____no_output_____" ] ], [ [ "#TODO\n! pip install git+git://github.com/deepset-ai/haystack.git@319e238f4652a05a95f02fa4cd19ef406440a789\n#! pip install farm-haystack", "_____no_output_____" ], [ "from haystack import Finder\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\n\nfrom haystack.retriever.elasticsearch import EmbeddingRetriever\nfrom haystack.utils import print_answers\nimport pandas as pd\nimport requests\n", "_____no_output_____" ] ], [ [ "### Start an Elasticsearch server\nYou can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source.", "_____no_output_____" ] ], [ [ "# Recommended: Start Elasticsearch using Docker\n# ! docker run -d -p 9200:9200 -e \"discovery.type=single-node\" elasticsearch:7.6.2", "_____no_output_____" ], [ "# In Colab / No Docker environments: Start Elasticsearch from source\n! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz -q\n! tar -xzf elasticsearch-7.6.2-linux-x86_64.tar.gz\n! chown -R daemon:daemon elasticsearch-7.6.2\n\nimport os\nfrom subprocess import Popen, PIPE, STDOUT\nes_server = Popen(['elasticsearch-7.6.2/bin/elasticsearch'],\n stdout=PIPE, stderr=STDOUT,\n preexec_fn=lambda: os.setuid(1) # as daemon\n )\n# wait until ES has started\n! sleep 30\n", "_____no_output_____" ] ], [ [ "### Init the DocumentStore\nIn contrast to Tutorial 1 (extractive QA), we:\n\n* specify the name of our `text_field` in Elasticsearch that we want to return as an answer\n* specify the name of our `embedding_field` in Elasticsearch where we'll store the embedding of our question and that is used later for calculating our similarity to the incoming user question\n* set `excluded_meta_data=[\"question_emb\"]` so that we don't return the huge embedding vectors in our search results", "_____no_output_____" ] ], [ [ "from haystack.database.elasticsearch import ElasticsearchDocumentStore\ndocument_store = ElasticsearchDocumentStore(host=\"localhost\", username=\"\", password=\"\",\n index=\"document\",\n text_field=\"answer\",\n embedding_field=\"question_emb\",\n embedding_dim=768,\n excluded_meta_data=[\"question_emb\"])", "04/28/2020 12:27:32 - INFO - elasticsearch - PUT http://localhost:9200/document [status:400 request:0.010s]\n" ] ], [ [ "### Create a Retriever using embeddings\nInstead of retrieving via Elasticsearch's plain BM25, we want to use vector similarity of the questions (user question vs. FAQ ones).\nWe can use the `EmbeddingRetriever` for this purpose and specify a model that we use for the embeddings.", "_____no_output_____" ] ], [ [ "retriever = EmbeddingRetriever(document_store=document_store, embedding_model=\"deepset/sentence_bert\", gpu=False)", "_____no_output_____" ] ], [ [ "### Prepare & Index FAQ data\nWe create a pandas dataframe containing some FAQ data (i.e curated pairs of question + answer) and index those in elasticsearch.\nHere: We download some question-answer pairs related to COVID-19", "_____no_output_____" ] ], [ [ "# Download\ntemp = requests.get(\"https://raw.githubusercontent.com/deepset-ai/COVID-QA/master/data/faqs/faq_covidbert.csv\")\nopen('small_faq_covid.csv', 'wb').write(temp.content)\n\n# Get dataframe with columns \"question\", \"answer\" and some custom metadata\ndf = pd.read_csv(\"small_faq_covid.csv\")\n# Minimal cleaning\ndf.fillna(value=\"\", inplace=True)\ndf[\"question\"] = df[\"question\"].apply(lambda x: x.strip())\nprint(df.head())\n\n# Get embeddings for our questions from the FAQs\nquestions = list(df[\"question\"].values)\ndf[\"question_emb\"] = retriever.create_embedding(texts=questions)\n\n# Convert Dataframe to list of dicts and index them in our DocumentStore\ndocs_to_index = df.to_dict(orient=\"records\")\ndocument_store.write_documents(docs_to_index)", "_____no_output_____" ] ], [ [ "### Ask questions\nInitialize a Finder (this time without a reader) and ask questions", "_____no_output_____" ] ], [ [ "finder = Finder(reader=None, retriever=retriever)\nprediction = finder.get_answers_via_similar_questions(question=\"How is the virus spreading?\", top_k_retriever=10)\nprint_answers(prediction, details=\"all\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6d2ab8c7d37e61983c01f836ed1f8918b4f7e6
132,236
ipynb
Jupyter Notebook
primeiras_medidas_no_VNA.ipynb
lsmanoel/coax_RF-50_RG-58
78a9f4a33f74ae7c9f0d4f03265a14c88d11bc82
[ "MIT" ]
null
null
null
primeiras_medidas_no_VNA.ipynb
lsmanoel/coax_RF-50_RG-58
78a9f4a33f74ae7c9f0d4f03265a14c88d11bc82
[ "MIT" ]
null
null
null
primeiras_medidas_no_VNA.ipynb
lsmanoel/coax_RF-50_RG-58
78a9f4a33f74ae7c9f0d4f03265a14c88d11bc82
[ "MIT" ]
null
null
null
1,555.717647
129,924
0.947246
[ [ [ "<a href=\"https://colab.research.google.com/github/lsmanoel/coax_RF-50_RG-58/blob/master/primeiras_medidas_no_VNA.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndata_url = 'https://raw.githubusercontent.com/lsmanoel/coax_RF-50_RG-58/master/dataset/Teste_1.csv'\n\ndata_pd = pd.read_csv(url_elc_plane_bottom_24_MHz)\n\ns12_magdb_np = data_pd['s12-magnitude (db)'].values\ns12_ph_np = data_pd['s12-phase (°)'].values\nf = data_pd['Frequency'].values\n\nfig = plt.figure(figsize = (10, 10))\nplt.plot(f, s12_ph_np/10)\nplt.plot(f, s12_magdb_np)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
4a6d306dbae7a4855f9de4c40f8bab84374c6869
45,217
ipynb
Jupyter Notebook
Basics/Variables&DataTypes.ipynb
gagana-hg/Python
9bf6ac8ab1278c27f0c6c9e08e98b8e13aa92458
[ "MIT" ]
null
null
null
Basics/Variables&DataTypes.ipynb
gagana-hg/Python
9bf6ac8ab1278c27f0c6c9e08e98b8e13aa92458
[ "MIT" ]
null
null
null
Basics/Variables&DataTypes.ipynb
gagana-hg/Python
9bf6ac8ab1278c27f0c6c9e08e98b8e13aa92458
[ "MIT" ]
null
null
null
64.320057
31,652
0.810779
[ [ [ "## **Variables and Data Types**", "_____no_output_____" ], [ "**Topics Covered** \n> Creating Variable \n> DataTypes \n> None Keyword \n> Multi Line statement and Multi Comment", "_____no_output_____" ], [ "-----\n\n### Creating a Variable\n\n* Variables are used to store values. In Python you don't have to declare a varaible. \n* Variable is created the moment you assign a value to it.\n\n* *3 Rules*\n> 1. It can be only one word.\n> 2. It can use only letters, numbers, and the underscore ( _ ) character.\n> 3. It can’t begin with a number.\n\n---- ", "_____no_output_____" ] ], [ [ "# One word. No space allowed\nmy Wallet = 10", "_____no_output_____" ], [ "# No special character\n@wallet = 10\n@wallet", "_____no_output_____" ], [ "# Should begin either with alphabet or _\n10wallet = 10", "_____no_output_____" ], [ "# Type string\ni = \"hello\"\ni", "_____no_output_____" ], [ "# Type integer\nvariable_1 = 10\nvariable_1", "_____no_output_____" ], [ "x = 10 # An integer variable", "_____no_output_____" ], [ "# assigning value 10\nwallet = 10\nwallet", "_____no_output_____" ], [ "# Updating the value\nwallet = 20\nwallet", "_____no_output_____" ] ], [ [ "1. Variables do not need to be declared with any particular type and can even change type after they have been set.", "_____no_output_____" ] ], [ [ "# Previously x stored an integer value\nx = \"Hello\" # A string", "_____no_output_____" ] ], [ [ "2. Assigning values to multiple variables", "_____no_output_____" ] ], [ [ "x, y, z = \"Red\", \"Black\", \"White\"\nprint(x)\nprint(y)\nprint(z)", "Red\nBlack\nWhite\n" ] ], [ [ "3. Assign the same value to multiple variables in one line", "_____no_output_____" ] ], [ [ "x = y = z = \"Red\"\nprint(x)\nprint(y)\nprint(z)", "Red\nRed\nRed\n" ] ], [ [ "-------------\n### Data Types \n* Data types are the classification of objects. \n* The basic types build into Python include float, int, str and bool. \n\n![image.png](attachment:b3eb87ca-5e37-4b56-ad80-7f2f7fdd7d0f.png)", "_____no_output_____" ] ], [ [ "x = \"Hello\"\ntype(x)", "_____no_output_____" ], [ "x = 'Hello'\ntype(x)", "_____no_output_____" ], [ "x = \"100\"\ntype(x)", "_____no_output_____" ], [ "x = 10\ntype(x)", "_____no_output_____" ], [ "x = 10.5\ntype(x)", "_____no_output_____" ], [ "x = \"True\"\ntype(x)", "_____no_output_____" ], [ "x = True\ny = False\n\ntype(x)", "_____no_output_____" ], [ "false = 10\ny = false\nprint(y) # print 10\ny = False\nprint(y) # Print False", "10\nFalse\n" ], [ "y = false", "_____no_output_____" ] ], [ [ "> The above threw an error because python is case sensitive. We need to use `False` as built in value. \nElse it treats it as a variable.", "_____no_output_____" ], [ "> `type()` is used to check data type of a given object.", "_____no_output_____" ], [ "**Q**. An example for Case sensitive variable", "_____no_output_____" ] ], [ [ "x = False\nfalse = 10\nprint(x, false)", "False 10\n" ] ], [ [ "****\n\n### Multiline statement\n\nWe can make a statement extend over multiple lines with the line continuation character(`\\`). \n* Explicit Continuation : When you right away use the line continuation character (`\\`) to split a statement into multiple lines. \n* Implicit line continuation is when you split a statement using either of parentheses ( ), brackets [ ] and braces { }.", "_____no_output_____" ] ], [ [ "# Explicit line continuation\na = 1 + 2 + 3 + \\\n 4 + 5 + 6 + \\\n 7 + 8 + 9\na", "_____no_output_____" ], [ "# Implicit\na = (1 + 2 + 3 +\n 4 + 5 + 6 +\n 7 + 8 + 9)\na", "_____no_output_____" ] ], [ [ "****\n\n### Multiline Comment", "_____no_output_____" ], [ "\nTriple quotes (`'''` or `\"\"\"`) are generally used for multi-line strings. But they can be used as a multi-line comment as well.", "_____no_output_____" ] ], [ [ "\"\"\"This is also a\nperfect example of\nmulti-line comments\"\"\"", "_____no_output_____" ], [ "\"\"\"This is also a\nperfect example of\nmulti-line comments\"\"\"\na = b = c = \"Hello\"", "_____no_output_____" ] ], [ [ "***\n### None Keyword \n> The **`None`** keyword is used to define a `null` value, or no value at all. \n> `None` is not the same as 0, False, or an empty string. \n> Comparing `None` to anything will always return False except None itself", "_____no_output_____" ] ], [ [ "X = None\n\"Value of X : {0} , Type of X : {1}\".format(X,type(X))", "_____no_output_____" ], [ "# None is always false\nbool(None)", "_____no_output_____" ] ], [ [ "--------", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a6d3cda77132c8dac1c7a7721fce37a5210cafb
21,483
ipynb
Jupyter Notebook
examples/Driver class development.ipynb
usnistgov/gpsdata
23b2b9ecb13e64da24d6fb15bb2dd3b44c932308
[ "Apache-2.0" ]
6
2019-10-16T16:13:16.000Z
2020-07-29T10:46:51.000Z
examples/Driver class development.ipynb
usnistgov/gpsdata
23b2b9ecb13e64da24d6fb15bb2dd3b44c932308
[ "Apache-2.0" ]
2
2020-01-17T15:26:00.000Z
2020-01-17T17:58:26.000Z
examples/Driver class development.ipynb
usnistgov/gpsdata
23b2b9ecb13e64da24d6fb15bb2dd3b44c932308
[ "Apache-2.0" ]
1
2020-02-20T06:00:53.000Z
2020-02-20T06:00:53.000Z
58.857534
604
0.636224
[ [ [ "# Writing a Device driver\n### Basic structure\nHere is a simple (but complete and functional) code block that implements a VISA driver for a power sensor:", "_____no_output_____" ] ], [ [ "import labbench as lb\nimport pandas as pd\n\n# Specific driver definitions are implemented by subclassing classes like lb.VISADevice\nclass PowerSensor(lb.VISADevice):\n initiate_continuous = lb.property.bool(key='INIT:CONT')\n output_trigger = lb.property.bool(key='OUTP:TRIG')\n trigger_source = lb.property.str(key='TRIG:SOUR', only=['IMM','INT','EXT','BUS','INT1'])\n trigger_count = lb.property.int(key='TRIG:COUN', min=1,max=200,step=1)\n measurement_rate = lb.property.str(key='SENS:MRAT', only=['NORM','DOUB','FAST'])\n sweep_aperture = lb.property.float(key='SWE:APER', min=20e-6, max=200e-3,help='time (in s)')\n frequency = lb.property.float(key='SENS:FREQ', min=10e6, max=18e9,help='center frequency (Hz)')\n\n def preset (self):\n \"\"\" Apply the instrument's preset state.\n \"\"\"\n self.write('SYST:PRES')\n\n def fetch (self):\n \"\"\" Get already-acquired data from the instrument.\n\n Returns:\n The data trace packaged as a pd.DataFrame\n \"\"\"\n response = self.query('FETC?').split(',')\n if len(response)==1:\n return float(response[0])\n else:\n return pd.to_numeric(pd.Series(response))", "_____no_output_____" ] ], [ [ "Let's work through what this does.\n\n### 1. Every `labbench` driver is a subclass of a labbench Device class, such as lb.VISADevice:\n\nThis is the definition of the PowerSensor:\n```python\nclass PowerSensor(lb.VISADevice):\n # ...\n```\nThis single line gives our power sensor driver all of the general capabilities of a VISA driver this driver class (known as \"subclassing\" \"inheriting\" in software engineering). This means that in this one line, the PowerSensor driver has adopted _all of the same member and attribute features as a \"plain\" VISADevice_. The `VISADevice` class helps streamline use of the `pyvisa` with features like\n* managing connection and disconnection, given a VISA resource string;\n* shortcuts for accessing simple instrument states, implemented entirely based on definitions (discussed below); and\n* wrapper methods (i.e., member functions) for pyvisa resource `write` and `query` methods.\n\nA more complete listing of everything that comes with `lb.VISADevice` is in the [programming reference](http://ssm.ipages.nist.gov/labbench/labbench.html#labbench.backends.VISADevice).\n\nThis power sensor driver definition is just that - a definition. To _use_ the driver and connect to the instrument in the lab, instantiate it and connect to the device. This is the simplest recommended way to instantiate, connect, and then disconnect in a script:\n```python\n# Here is the `with` block\nwith PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor:\n pass\n # The sensor is connected in this \"with\" block. Afterward, it disconnects, even\n # if there is an exception. Automation code that uses the sensor would go here.\n\n# Now the `with` block is done and we're disconnected\nprint('Disconnected, all done')\n```\nIt's nice to leave the sensor connected sometimes, like for interactive play on a python prompt. In that case, you can manually connect and disconnect:\n```python\nsensor = PowerSensor('TCPIP::10.0.0.1::::INSTR')\nsensor.connect()\n# The sensor is connected now. Automation code that uses the sensor would go here.\nsensor.disconnect() # We have to manually disconnect when we don't use a with block.\nprint('Disconnected, all done')\n```\n\nThere are two key pieces here:\n* The instantiation, `PowerSensor('TCPIP::10.0.0.1::::INSTR')`, is where we create a power sensor object that we can interact with. All `VISADevice` drivers use this standard resource string formatting; other types of drivers have different formats.\n* The `with` block (talked about under the name _context management_ in python language documents) serves two functions for any labbench driver (not just VISADevice):\n 1. The instrument is connected at the start of the with block\n 2. guarantees that the instrument will be disconnected after the with end of the with block, _even if there is an error inside the block!_\n \n\n \n### 2. Getting and setting simple parameters in the device the `state` object\n##### Reading the definition\nEach driver has an attribute called `state`. It is an optional way to give your users shortcuts to get and set simple instrument settings. This is the definition from the example above:\n\n```python\n initiate_continuous = lb.Bool (key='INIT:CONT')\n output_trigger = lb.Bool (key='OUTP:TRIG')\n trigger_source = lb.EnumBytes (key='TRIG:SOUR', values=['IMM','INT','EXT','BUS','INT1'])\n trigger_count = lb.Int (key='TRIG:COUN', min=1,max=200,step=1)\n measurement_rate = lb.EnumBytes (key='SENS:MRAT', values=['NORM','DOUB','FAST'])\n sweep_aperture = lb.Float (key='SWE:APER', min=20e-6, max=200e-3,help='time (in s)')\n frequency = lb.Float (key='SENS:FREQ', min=10e6, max=18e9,help='input center frequency (in Hz)')\n```\n\nThe `VISADevice` driver uses the metadata given for each descriptor above to determine how to communicate with the remote instrument on assignment. Behind the scenes, the `state` object has extra features that can monitor changes to these states to automatically record the changes we make to these states to a database, or (in the future) automatically generate a GUI front-panel.\n\n*Every* labbench driver has a state object, including at least the boolean state called `connected` (indicating whether the host computer is connected with the remote device is connected or not).\n\n---\n##### Using state attributes\n\nMaking an instance of PowerSensor - in the example, this was `PowerSensor('TCPIP::10.0.0.1::::INSTR')` - causes the `state` object to become interactive.\n\nAssignment causes causes the setting to be applied to the instrument. For example, \n`sensor.state.initiate_continuous = True` makes machinery inside `lb.VISADevice` do the following:\n1. validate that `True` is a valid python boolean value (because we defined it as `lb.Bool`)\n2. convert the python boolean `True` to a string (because `lb.VISADevice` knows SCPI uses string commands)\n3. send the SCPI string `'INIT:CONT TRUE'` (because we told it the command string is `'INIT:CONT'`, and by default it assumes that settings should be applied as `'<command> <value>'`)\n\nLikewise, a parameter \"get\" operation is triggered by simply using the attribute. The statement `print(sensor.state.initiate_continuous)` triggers `lb.VISADevice` to do the following:\n1. an SCPI query with the string `'INIT:CONT?'` (because we told it the command string is `'INIT:CONT'`, and by default it assumes that settings should be applied as `'<command>?'` with return values reported in a response string),\n2. the response string is converted to a python boolean type (because we defined it as `lb.Bool`),\n3. the converted boolean value is passed to the `print` function for display.\n\n##### Example of assigning to and from states\nHere is working example that gets and sets parameter values by communicating with the device.\n\n```python\nwith PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor:\n\n # This prints True if we're still in the with block\n print(sensor.state.isopen) \n \n # Use SCPI to request the identity of the sensor,\n # then return and print it. This was inherited from\n # VISADevice, so it is available on any VISADevice driver.\n print(sensor.state.identity)\n \n # PowerSensor.state.frequency is defined as a float. Assigning\n # to it causes logic inherited from lb.VISADevice\n # to convert this to a string, and then write the SCPI string\n # 'SENS:FREQ 2.45e9' to the instrument.\n sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz\n \n # We can also access the remote value of sensor.state.frequency.\n # Behind the scenes, each time we fetch the value, magic in\n # lb.VISADevice retrieves the current value from the instrument\n # with the SCPI query 'SENS:FREQ?', and then converts it to a floating point\n # number.\n print('The sensor frequency is {} GHz'.format(sensor.state.frequency/1e9))\n \nprint(sensor.state.isopen) # Prints False - we're disconnected\n```\nSimply put: assigning to or from with the attribute in the driver state instance causes remote set or get operations. The python data type matches the definition in the `state` class.\n\n##### Discovering and navigating states\nInheriting from `VISADevice` means that `PowerSensor.state` includes the seven states defined here, plus all others listed provided by VISADevice.state. Since these aren't listed here, it can get confusing tracking what has been inherited (like in other object-oriented libraries). Fortunately, there are many ways to explore the entire list of states that have been inherited from the parent state class:\n1. Look it up [in the API reference manual](http://ssm.ipages.nist.gov/labbench/labbench.html#labbench.visa.VISADevice.state)\n2. When working with an instantiated driver object in an ipython or jupyter notebook command prompt, type `lb.VISADevice.state.` and press tab to autocomplete a list of valid options. You'll also see some functions to do esoteric things with these states.\n3. When working in an editor like pycharm or spyder, you can ctrl+click on the right side of `VISADevice.state` to skip directly to looking at the definition of `VISADevice.state` in the `labbench` library\n4. When working in any kind of python prompt, you can use the `help` function\n ```python\n help(PowerSensor.state)\n ```\n5. When working in an ipython or jupyter prompt, a nicer option than 4. is the ipython help magick:\n ```python\n PowerSensor.state?\n ```\n\n##### Writing state attributes\nThe way we code this is a little unusual outside of python packages for web development. When we write a driver class, we add attributes defined with helper information such as\n- the python type that should represent the parameter\n- bounds for acceptable values of the parameter\n- descriptive \"help\" information for the user\n\nThese attributes are a kind of python type state class is a _descriptor_. We call them \"traits\" because following an underlying library that we extend, [traitlets](https://github.com/ipython/traitlets) under the hood. The example includes seven state traits.\n\nAfter instantiating with `PowerSensor()`, we can start interacting with `sensor.state`. Each one is now a live object we can assign to and use like any other python object. The difference is, each time we get the value, it is queried from the instrument, and each time we assign to it (the normal `=` operator), a set command goes to the instrument to set it.\n\nThe definition above includes metadata that dictates the python data type handled for this assignment operation, and how it should be converted:\n\n| **Descriptor metadata type** \t| **Uses in `PowerSensor` example** | **Behavior depends on the Device implementation** \t|\n|---------------------------------\t|------------------------------------|-----------------------------------\t|\n| Python data type for assignment \t| `lb.Float`, `lb.EnumBytes`, etc.\t | No \t |\n| Data validation settings \t| `min`,`max`,`step` (for numbers) | No |\n| | `values` (for enumerated types) \t | No \t|\n| Documentation strings \t| `help` | No \t|\n| Associated backend command \t| `command` | Yes \t|\n\nSome types of drivers ignore `command` keyword, as discussed in [how to write a labbench device driver](how to write a device driver).\n\n\n\n### 3. Device methods for commands and data acquisition\nThe `state` class above is useful for remote assignment operations on simple scalar data types. Supporting a broader collection of operation types (\"trigger a measurement,\" \"fetch and return measurement data,\" etc.) need the flexibility of more general-purpose functions. In python, a member function of a class is called a method.\n\nHere are the methods defined in `PowerSensor`:\n```python\ndef preset (self):\n self.write('SYST:PRES')\n\ndef fetch (self):\n response = self.query('FETC?').split(',')\n if len(response)==1:\n return float(response[0])\n else:\n return pd.to_numeric(pd.Series(response))\n```\nThese are the methods that are specific to our power sensor device. \n* The `preset` function tells the device to revert to its default state.\n* The `fetch` method performs some text processing on the response from the device, and returns either a single scalar or a pandas Series if the result is a sequence of power values.\n\nThe `labbench` convention is that the names of these methods are verbs (or sentence predicates, when single words are not specific enough).\n\n##### Example data acquisition script\nHere is an example that presets the device, sets the center frequency to 2.45 GHz, and then collects 10 power samples:", "_____no_output_____" ] ], [ [ "with PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor:\n print('Connected to power sensor {}'.format(sensor.state.identity))\n \n sensor.preset()\n sensor.wait() # VISADevice includes in the standard VISA wait method, which sends SCPI '*WAI'\n sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz\n \n power_levels = pd.Series([sensor.fetch() for i in range(10)])\n\nprint('All done! Got these power levels: ')\nprint(power_levels) ", "_____no_output_____" ] ], [ [ "##### Discovering and navigating device driver methods\nInheritance has similar implications as it does for the `VISADevice.state` class. Inheriting from `VISADevice` means that `PowerSensor` includes the `preset` and `fetch` methods, plus many more from `lb.VISADevice` (some of which it inherited from `lb.Device`). Since these aren't listed in the example definition above, it can get confusing tracking what methods are available through inheritance (like in other object-oriented libraries). Sometimes, informally, this confusion is called \"abstraction halitosis.\" Fortunately, there are many ways to identify the available objects and methods:\n1. Look it up [in the API reference manual](http://ssm.ipages.nist.gov/labbench/labbench.html#labbench.visa.VISADevice)\n2. When working with an instantiated driver object in an ipython or jupyter notebook command prompt, type `lb.VISADevice.` and press tab to autocomplete a list of valid options. You'll also see some functions to do esoteric things with these states.\n3. When working in an editor like pycharm or spyder, you can ctrl+click on the right side of `VISADevice` to skip directly to looking at the definition of `VISADevice` in the `labbench` library\n4. When working in any kind of python prompt, you can use the `help` function\n ```python\n help(PowerSensor)\n ```\n5. When working in an ipython or jupyter prompt, a nicely formatted version of 4. is the ipython help magick:\n ```python\n PowerSensor?\n ```\n\n## Miscellaneous extras\n##### Connecting to multiple devices\nThe best way to connect to multiple devices is to use a single `with` block. For example, a 10-sample acquisition with two power sensors might look like this:", "_____no_output_____" ] ], [ [ "with PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor1,\\\n PowerSensor('TCPIP::10.0.0.2::::INSTR') as sensor2:\n print('Connected to power sensors')\n \n for sensor in sensor1, sensor2:\n sensor.preset()\n sensor.wait() # VISADevice includes in the standard VISA wait method, which sends SCPI '*WAI'\n sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz\n \n power_levels = pd.DataFrame([[sensor1.fetch(),sensor2.fetch()] for i in range(10)])\n\nprint('All done! Got these power levels: ')\nprint(power_levels) ", "_____no_output_____" ] ], [ [ "##### Execute a function on state changes\nDatabase management and user interface tools make extensive use of callbacks, which gives an opportunity for you to execute custom code any time an assignment causes a state to change. A state change can occur in a couple of ways: \n* This triggers a callback if 2.45e9 is different than the last observed frequency:\n ```python\n sensor.state.frequency = 2.45e9\n ```\n* This triggers a callback if the instrument returns a frequency that is is different than the last observed frequency\n ```python\n current_freq = sensor.state.frequency \n ```\n \nConfigure a function call on an observed change with the `observe` method in `sensor.state`:", "_____no_output_____" ] ], [ [ "def callback(change):\n \"\"\" the callback function is given a single argument. change\n is a dictionary containing the descriptor ('frequency'),\n the state instance that contains frequency, and both\n the old and new values.\n \"\"\"\n # insert GUI update here?\n # commit updated state to a database here?\n print(change)\n\nwith PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor:\n sensor.state.observe(callback)\n \n sensor.preset()\n sensor.wait() # VISADevice includes in the standard VISA wait method, which sends SCPI '*WAI'\n sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz \n\nprint('All done! Got these power levels: ')\nprint(power_levels)", "_____no_output_____" ] ], [ [ "Use of callbacks can help separate the actual measurement loop (the contents of the `with` block) from other functions for debugging, GUI, and database management. The result can be code that is more clear.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6d44546c994bb698ccdb35929ba4cd72211373
30,229
ipynb
Jupyter Notebook
Python for Finance - Code Files/81 Running a Regression in Python/Python 3/Running a Regression in Python - Solution.ipynb
siddharthjain1611/Python_for_Finance_Investment_Fundamentals-and-Data-Analytics
f2f1e22f2d578c59f833f8f3c8b4523d91286e9e
[ "MIT" ]
3
2020-03-24T12:58:37.000Z
2020-08-03T17:22:35.000Z
Python for Finance - Code Files/81 Running a Regression in Python/Python 3/Running a Regression in Python - Solution.ipynb
siddharthjain1611/Python_for_Finance_Investment_Fundamentals-and-Data-Analytics
f2f1e22f2d578c59f833f8f3c8b4523d91286e9e
[ "MIT" ]
null
null
null
Python for Finance - Code Files/81 Running a Regression in Python/Python 3/Running a Regression in Python - Solution.ipynb
siddharthjain1611/Python_for_Finance_Investment_Fundamentals-and-Data-Analytics
f2f1e22f2d578c59f833f8f3c8b4523d91286e9e
[ "MIT" ]
1
2021-10-19T23:59:37.000Z
2021-10-19T23:59:37.000Z
36.159091
8,128
0.476695
[ [ [ "## Running a Regression in Python", "_____no_output_____" ], [ "*Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*", "_____no_output_____" ], [ "*A teacher at school decided her students should take an IQ test. She prepared 5 tests she believed were aligned with the requirements of the IQ examination.\nThe father of one child in the class turned out to be an econometrician, so he asked her for the results of the 30 kids. The file contained the points they earned on each test and the final IQ score.*", "_____no_output_____" ], [ "Load the IQ_data excel file. ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nfrom scipy import stats\nimport statsmodels.api as sm \n\nimport matplotlib.pyplot as plt", "C:\\Users\\365\\Anaconda3\\lib\\site-packages\\statsmodels\\compat\\pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n from pandas.core import datetools\n" ], [ "data = pd.read_excel('D:/Python/Data_Files/IQ_data.xlsx')", "_____no_output_____" ], [ "data", "_____no_output_____" ] ], [ [ "Prepare the data for a univariate regression of Test 1 based on the IQ result. Store the Test 1 scores in a variable, called X, and the IQ points in another variable, named Y. ", "_____no_output_____" ] ], [ [ "data[['IQ', 'Test 1']]", "_____no_output_____" ] ], [ [ "### Univariate Regression", "_____no_output_____" ] ], [ [ "X = data['Test 1']\nY = data['IQ']", "_____no_output_____" ], [ "X", "_____no_output_____" ], [ "Y", "_____no_output_____" ] ], [ [ "Create a well-organized scatter plot. Use the “axis” method with the following start and end points: [0, 120, 0, 150]. Label the axes “Test 1” and “IQ”, respectively.", "_____no_output_____" ] ], [ [ "plt.scatter(X,Y)\nplt.axis([0, 120, 0, 150])\nplt.ylabel('IQ')\nplt.xlabel('Test 1')\nplt.show()", "_____no_output_____" ] ], [ [ "Just by looking at the graph, do you believe Test 1 is a good predictor of the final IQ score?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6d483f9fe184941b047ebecea9ce70d7f74ad5
133,249
ipynb
Jupyter Notebook
Matplotlib_multivariate/Additional_Plot_Practice.ipynb
feng27/AIPND
86f59548126fd2e233edf8ecf022cb6c9d95e267
[ "MIT" ]
null
null
null
Matplotlib_multivariate/Additional_Plot_Practice.ipynb
feng27/AIPND
86f59548126fd2e233edf8ecf022cb6c9d95e267
[ "MIT" ]
null
null
null
Matplotlib_multivariate/Additional_Plot_Practice.ipynb
feng27/AIPND
86f59548126fd2e233edf8ecf022cb6c9d95e267
[ "MIT" ]
null
null
null
372.203911
92,232
0.922198
[ [ [ "# prerequisite package imports\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\n%matplotlib inline\n\nfrom solutions_multiv import additionalplot_solution_1, additionalplot_solution_2", "_____no_output_____" ] ], [ [ "We will continue to work with the fuel economy dataset in this workspace.", "_____no_output_____" ] ], [ [ "fuel_econ = pd.read_csv('./data/fuel_econ.csv')\nfuel_econ.head()", "_____no_output_____" ] ], [ [ "**Task 1**: Practice creating a plot matrix, by depicting the relationship between five numeric variables in the fuel efficiency dataset: 'displ', 'co2', 'city', 'highway', and 'comb'. Do you see any interesting relationships that weren't highlighted previously?", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE", "_____no_output_____" ], [ "# run this cell to check your work against ours\nadditionalplot_solution_1()", "I set up my PairGrid to plot scatterplots off the diagonal and histograms on the diagonal. The intersections where 'co2' meets the fuel mileage measures are fairly interesting in how tight the curves are. You'll explore this more in the next task.\n" ] ], [ [ "**Task 2**: The output of the preceding task pointed out a potentially interesting relationship between co2 emissions and overall fuel efficiency. Engineer a new variable that depicts CO2 emissions as a function of gallons of gas (g / gal). (The 'co2' variable is in units of g / mi, and the 'comb' variable is in units of mi / gal.) Then, plot this new emissions variable against engine size ('displ') and fuel type ('fuelType'). For this task, compare not just Premium Gasoline and Regular Gasoline, but also Diesel fuel.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE", "_____no_output_____" ], [ "# run this cell to check your work against ours\nadditionalplot_solution_2()", "Due to the high number of data points and their high amount of overlap, I've chosen to plot the data in a faceted plot. You can see that engine sizes are smaller for cars that use regular gasoline against those that use premium gas. Most cars fall in an emissions band a bit below 9 kg CO2 per gallon; diesel cars are consistently higher, a little above 10 kg CO2 per gallon. This makes sense, since a gallon of gas gets burned no matter how efficient the process. More strikingly, there's a smattering of points with much smaller emissions. If you inspect these points more closely you'll see that they represent hybrid cars that use battery energy in addition to conventional fuel! To pull these mechanically out of the dataset requires more data than that which was trimmed to create it - and additional research to understand why these points don't fit the normal CO2 bands.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6d66155e284e111644df6ef4b64157fd8edf8b
4,109
ipynb
Jupyter Notebook
Untitled.ipynb
yigitozgumus/Polimi_Thesis
711c1edcf1fdb92fc6c15bf5ab1be141c13995c3
[ "MIT" ]
3
2019-07-27T14:00:42.000Z
2020-01-17T17:07:51.000Z
Untitled.ipynb
yigitozgumus/Polimi_Thesis
711c1edcf1fdb92fc6c15bf5ab1be141c13995c3
[ "MIT" ]
null
null
null
Untitled.ipynb
yigitozgumus/Polimi_Thesis
711c1edcf1fdb92fc6c15bf5ab1be141c13995c3
[ "MIT" ]
4
2019-10-22T02:58:26.000Z
2020-10-06T09:59:26.000Z
38.401869
1,532
0.713069
[ [ [ "import os\nimport numpy as np\nfrom skimage import io\nfrom PIL import Image\nfrom tqdm import tqdm, tqdm_notebook\nfrom time import sleep\nfrom time import time\nfrom utils.dirs import listdir_nohidden\nfrom utils.factory import create\nfrom utils.logger import Logger\nfrom utils.dirs import create_dirs\nfrom models import *\nfrom trainers import *\n\nfrom utils.visualization import *\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold\nfrom sklearn.metrics import roc_curve, auc, precision_recall_fscore_support, precision_recall_curve\nimport matplotlib.gridspec as gridspec\nimport seaborn as sns\nimport pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\nfrom time import sleep\nfrom time import time\nplt.style.use('seaborn') \n\nfrom IPython.display import set_matplotlib_formats\nset_matplotlib_formats('retina')\nimport os", "_____no_output_____" ], [ "arrays = [np.random.normal(loc=0.0,scale=1.0,size=(4,32)) for i in range(5)]\narray = np.random.normal(loc=0.0,scale=1.0,size=(4,32))", "_____no_output_____" ], [ "for i in range(5):\n plt.grid(b=None)\n plt.axis('off')\n plt.imshow(arrays[i])\n plt.savefig(\"lr_{}.png\".format(i),transparent = True, bbox_inches = 'tight', pad_inches = 0)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a6d6c87b160ba57db9fe61fa16b4de32c45ebc1
38,196
ipynb
Jupyter Notebook
notebooks/Python-in-2-days/D1_L4_NumPy/05-Computation-on-arrays-broadcasting.ipynb
alexzzlin/ml_training
895e4092276303f283aa7b510ffc942e5207828f
[ "MIT" ]
1
2020-11-30T01:43:57.000Z
2020-11-30T01:43:57.000Z
notebooks/Python-in-2-days/D1_L4_NumPy/05-Computation-on-arrays-broadcasting.ipynb
alexzzlin/ml_training
895e4092276303f283aa7b510ffc942e5207828f
[ "MIT" ]
null
null
null
notebooks/Python-in-2-days/D1_L4_NumPy/05-Computation-on-arrays-broadcasting.ipynb
alexzzlin/ml_training
895e4092276303f283aa7b510ffc942e5207828f
[ "MIT" ]
6
2021-01-07T01:07:27.000Z
2021-03-28T18:14:29.000Z
52.395062
20,176
0.760158
[ [ [ "# Computation on Arrays: Broadcasting", "_____no_output_____" ], [ "We saw in the previous section how NumPy's universal functions can be used to *vectorize* operations and thereby remove slow Python loops.\nAnother means of vectorizing operations is to use NumPy's *broadcasting* functionality.\nBroadcasting is simply a set of rules for applying binary ufuncs (e.g., addition, subtraction, multiplication, etc.) on arrays of different sizes.", "_____no_output_____" ], [ "## Introducing Broadcasting\n\nRecall that for arrays of the same size, binary operations are performed on an element-by-element basis:", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "a = np.array([0, 1, 2])\nb = np.array([5, 5, 5])\na + b", "_____no_output_____" ] ], [ [ "Broadcasting allows these types of binary operations to be performed on arrays of different sizes–for example, we can just as easily add a scalar (think of it as a zero-dimensional array) to an array:", "_____no_output_____" ] ], [ [ "a + 5", "_____no_output_____" ] ], [ [ "We can think of this as an operation that stretches or duplicates the value ``5`` into the array ``[5, 5, 5]``, and adds the results.\nThe advantage of NumPy's broadcasting is that this duplication of values does not actually take place, but it is a useful mental model as we think about broadcasting.\n\nWe can similarly extend this to arrays of higher dimension. Observe the result when we add a one-dimensional array to a two-dimensional array:", "_____no_output_____" ] ], [ [ "M = np.ones((3, 3))\nM", "_____no_output_____" ], [ "M + a", "_____no_output_____" ] ], [ [ "Here the one-dimensional array ``a`` is stretched, or broadcast across the second dimension in order to match the shape of ``M``.\n\nWhile these examples are relatively easy to understand, more complicated cases can involve broadcasting of both arrays. Consider the following example:", "_____no_output_____" ] ], [ [ "a = np.arange(3)\nb = np.arange(3)[:, np.newaxis]\n\nprint(a)\nprint(b)", "[0 1 2]\n[[0]\n [1]\n [2]]\n" ], [ "a + b", "_____no_output_____" ] ], [ [ "Just as before we stretched or broadcasted one value to match the shape of the other, here we've stretched *both* ``a`` and ``b`` to match a common shape, and the result is a two-dimensional array!\nThe geometry of these examples is visualized in the following figure.", "_____no_output_____" ], [ "![Broadcasting Visual](figures/broadcasting.png)", "_____no_output_____" ], [ "The light boxes represent the broadcasted values: again, this extra memory is not actually allocated in the course of the operation, but it can be useful conceptually to imagine that it is.", "_____no_output_____" ], [ "## Rules of Broadcasting\n\nBroadcasting in NumPy follows a strict set of rules to determine the interaction between the two arrays:\n\n- Rule 1: If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is *padded* with ones on its leading (left) side.\n- Rule 2: If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is stretched to match the other shape.\n- Rule 3: If in any dimension the sizes disagree and neither is equal to 1, an error is raised.\n\nTo make these rules clear, let's consider a few examples in detail.", "_____no_output_____" ], [ "### Broadcasting example 1\n\nLet's look at adding a two-dimensional array to a one-dimensional array:", "_____no_output_____" ] ], [ [ "M = np.ones((2, 3))\na = np.arange(3)", "_____no_output_____" ] ], [ [ "Let's consider an operation on these two arrays. The shape of the arrays are\n\n- ``M.shape = (2, 3)``\n- ``a.shape = (3,)``\n\nWe see by rule 1 that the array ``a`` has fewer dimensions, so we pad it on the left with ones:\n\n- ``M.shape -> (2, 3)``\n- ``a.shape -> (1, 3)``\n\nBy rule 2, we now see that the first dimension disagrees, so we stretch this dimension to match:\n\n- ``M.shape -> (2, 3)``\n- ``a.shape -> (2, 3)``\n\nThe shapes match, and we see that the final shape will be ``(2, 3)``:", "_____no_output_____" ] ], [ [ "M + a", "_____no_output_____" ] ], [ [ "### Broadcasting example 2\n\nLet's take a look at an example where both arrays need to be broadcast:", "_____no_output_____" ] ], [ [ "a = np.arange(3).reshape((3, 1))\nb = np.arange(3)", "_____no_output_____" ] ], [ [ "Again, we'll start by writing out the shape of the arrays:\n\n- ``a.shape = (3, 1)``\n- ``b.shape = (3,)``\n\nRule 1 says we must pad the shape of ``b`` with ones:\n\n- ``a.shape -> (3, 1)``\n- ``b.shape -> (1, 3)``\n\nAnd rule 2 tells us that we upgrade each of these ones to match the corresponding size of the other array:\n\n- ``a.shape -> (3, 3)``\n- ``b.shape -> (3, 3)``\n\nBecause the result matches, these shapes are compatible. We can see this here:", "_____no_output_____" ] ], [ [ "a + b", "_____no_output_____" ] ], [ [ "### Broadcasting example 3\n\nNow let's take a look at an example in which the two arrays are not compatible:", "_____no_output_____" ] ], [ [ "M = np.ones((3, 2))\na = np.arange(3)", "_____no_output_____" ] ], [ [ "This is just a slightly different situation than in the first example: the matrix ``M`` is transposed.\nHow does this affect the calculation? The shape of the arrays are\n\n- ``M.shape = (3, 2)``\n- ``a.shape = (3,)``\n\nAgain, rule 1 tells us that we must pad the shape of ``a`` with ones:\n\n- ``M.shape -> (3, 2)``\n- ``a.shape -> (1, 3)``\n\nBy rule 2, the first dimension of ``a`` is stretched to match that of ``M``:\n\n- ``M.shape -> (3, 2)``\n- ``a.shape -> (3, 3)``\n\nNow we hit rule 3–the final shapes do not match, so these two arrays are incompatible, as we can observe by attempting this operation:", "_____no_output_____" ] ], [ [ "M + a", "_____no_output_____" ] ], [ [ "Note the potential confusion here: you could imagine making ``a`` and ``M`` compatible by, say, padding ``a``'s shape with ones on the right rather than the left.\nBut this is not how the broadcasting rules work!\nThat sort of flexibility might be useful in some cases, but it would lead to potential areas of ambiguity.\nIf right-side padding is what you'd like, you can do this explicitly by reshaping the array (we'll use the ``np.newaxis`` keyword introduced in The Basics of NumPy Arrays):", "_____no_output_____" ] ], [ [ "a[:, np.newaxis].shape", "_____no_output_____" ], [ "M + a[:, np.newaxis]", "_____no_output_____" ] ], [ [ "Also note that while we've been focusing on the ``+`` operator here, these broadcasting rules apply to *any* binary ``ufunc``.\nFor example, here is the ``logaddexp(a, b)`` function, which computes ``log(exp(a) + exp(b))`` with more precision than the naive approach:", "_____no_output_____" ] ], [ [ "np.logaddexp(M, a[:, np.newaxis])", "_____no_output_____" ] ], [ [ "For more information on the many available universal functions, refer to Computation on NumPy Arrays: Universal Functions.", "_____no_output_____" ], [ "## Broadcasting in Practice", "_____no_output_____" ], [ "Broadcasting operations form the core of many examples we'll see throughout this book.\nWe'll now take a look at a couple simple examples of where they can be useful.", "_____no_output_____" ], [ "### Centering an array", "_____no_output_____" ], [ "In the previous section, we saw that ufuncs allow a NumPy user to remove the need to explicitly write slow Python loops. Broadcasting extends this ability.\nOne commonly seen example is when centering an array of data.\nImagine you have an array of 10 observations, each of which consists of 3 values.\nUsing the standard convention, we'll store this in a $10 \\times 3$ array:", "_____no_output_____" ] ], [ [ "X = np.random.random((10, 3))", "_____no_output_____" ] ], [ [ "We can compute the mean of each feature using the ``mean`` aggregate across the first dimension:", "_____no_output_____" ] ], [ [ "Xmean = X.mean(0)\nXmean", "_____no_output_____" ] ], [ [ "And now we can center the ``X`` array by subtracting the mean (this is a broadcasting operation):", "_____no_output_____" ] ], [ [ "X_centered = X - Xmean", "_____no_output_____" ] ], [ [ "To double-check that we've done this correctly, we can check that the centered array has near zero mean:", "_____no_output_____" ] ], [ [ "X_centered.mean(0)", "_____no_output_____" ] ], [ [ "To within machine precision, the mean is now zero.", "_____no_output_____" ], [ "### Plotting a two-dimensional function", "_____no_output_____" ], [ "One place that broadcasting is very useful is in displaying images based on two-dimensional functions.\nIf we want to define a function $z = f(x, y)$, broadcasting can be used to compute the function across the grid:", "_____no_output_____" ] ], [ [ "# x and y have 50 steps from 0 to 5\nx = np.linspace(0, 5, 50)\ny = np.linspace(0, 5, 50)[:, np.newaxis]\n\nz = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)", "_____no_output_____" ] ], [ [ "We'll use Matplotlib to plot this two-dimensional array (these tools will be discussed in full in Density and Contour Plots):", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "plt.imshow(z, origin='lower', extent=[0, 5, 0, 5],\n cmap='viridis')\nplt.colorbar();", "_____no_output_____" ] ], [ [ "The result is a compelling visualization of the two-dimensional function.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a6d74354a69a10ee0e8cbf0d94a33ed1ff0bc5d
54,875
ipynb
Jupyter Notebook
.ipynb_checkpoints/search-4e-checkpoint.ipynb
jcarlosangelesc/IntelligentSystems
5a96073b7cbff639a4d17d9741227cff3feaec7e
[ "MIT" ]
null
null
null
.ipynb_checkpoints/search-4e-checkpoint.ipynb
jcarlosangelesc/IntelligentSystems
5a96073b7cbff639a4d17d9741227cff3feaec7e
[ "MIT" ]
null
null
null
.ipynb_checkpoints/search-4e-checkpoint.ipynb
jcarlosangelesc/IntelligentSystems
5a96073b7cbff639a4d17d9741227cff3feaec7e
[ "MIT" ]
null
null
null
32.859281
1,106
0.534451
[ [ [ "*Note: This is not yet ready, but shows the direction I'm leaning in for Fourth Edition Search.*\n\n# State-Space Search\n\nThis notebook describes several state-space search algorithms, and how they can be used to solve a variety of problems. We start with a simple algorithm and a simple domain: finding a route from city to city. Later we will explore other algorithms and domains.\n\n## The Route-Finding Domain\n\nLike all state-space search problems, in a route-finding problem you will be given:\n- A start state (for example, `'A'` for the city Arad).\n- A goal state (for example, `'B'` for the city Bucharest).\n- Actions that can change state (for example, driving from `'A'` to `'S'`).\n\nYou will be asked to find:\n- A path from the start state, through intermediate states, to the goal state.\n\nWe'll use this map:\n\n<img src=\"http://robotics.cs.tamu.edu/dshell/cs625/images/map.jpg\" height=\"366\" width=\"603\">\n\nA state-space search problem can be represented by a *graph*, where the vertexes of the graph are the states of the problem (in this case, cities) and the edges of the graph are the actions (in this case, driving along a road).\n\nWe'll represent a city by its single initial letter. \nWe'll represent the graph of connections as a `dict` that maps each city to a list of the neighboring cities (connected by a road). For now we don't explicitly represent the actions, nor the distances\nbetween cities.", "_____no_output_____" ] ], [ [ "romania = {\n 'A': ['Z', 'T', 'S'],\n 'B': ['F', 'P', 'G', 'U'],\n 'C': ['D', 'R', 'P'],\n 'D': ['M', 'C'],\n 'E': ['H'],\n 'F': ['S', 'B'],\n 'G': ['B'],\n 'H': ['U', 'E'],\n 'I': ['N', 'V'],\n 'L': ['T', 'M'],\n 'M': ['L', 'D'],\n 'N': ['I'],\n 'O': ['Z', 'S'],\n 'P': ['R', 'C', 'B'],\n 'R': ['S', 'C', 'P'],\n 'S': ['A', 'O', 'F', 'R'],\n 'T': ['A', 'L'],\n 'U': ['B', 'V', 'H'],\n 'V': ['U', 'I'],\n 'Z': ['O', 'A']}", "_____no_output_____" ] ], [ [ "Suppose we want to get from `A` to `B`. Where can we go from the start state, `A`?", "_____no_output_____" ] ], [ [ "romania['A']", "_____no_output_____" ] ], [ [ "We see that from `A` we can get to any of the three cities `['Z', 'T', 'S']`. Which should we choose? *We don't know.* That's the whole point of *search*: we don't know which immediate action is best, so we'll have to explore, until we find a *path* that leads to the goal. \n\nHow do we explore? We'll start with a simple algorithm that will get us from `A` to `B`. We'll keep a *frontier*&mdash;a collection of not-yet-explored states&mdash;and expand the frontier outward until it reaches the goal. To be more precise:\n\n- Initially, the only state in the frontier is the start state, `'A'`.\n- Until we reach the goal, or run out of states in the frontier to explore, do the following:\n - Remove the first state from the frontier. Call it `s`.\n - If `s` is the goal, we're done. Return the path to `s`.\n - Otherwise, consider all the neighboring states of `s`. For each one:\n - If we have not previously explored the state, add it to the end of the frontier.\n - Also keep track of the previous state that led to this new neighboring state; we'll need this to reconstruct the path to the goal, and to keep us from re-visiting previously explored states.\n \n# A Simple Search Algorithm: `breadth_first`\n \nThe function `breadth_first` implements this strategy:", "_____no_output_____" ] ], [ [ "from collections import deque # Doubly-ended queue: pop from left, append to right.\n\ndef breadth_first(start, goal, neighbors):\n \"Find a shortest sequence of states from start to the goal.\"\n frontier = deque([start]) # A queue of states\n previous = {start: None} # start has no previous state; other states will\n while frontier:\n s = frontier.popleft()\n if s == goal:\n return path(previous, s)\n for s2 in neighbors[s]:\n if s2 not in previous:\n frontier.append(s2)\n previous[s2] = s\n \ndef path(previous, s): \n \"Return a list of states that lead to state s, according to the previous dict.\"\n return [] if (s is None) else path(previous, previous[s]) + [s]", "_____no_output_____" ] ], [ [ "A couple of things to note: \n\n1. We always add new states to the end of the frontier queue. That means that all the states that are adjacent to the start state will come first in the queue, then all the states that are two steps away, then three steps, etc.\nThat's what we mean by *breadth-first* search.\n2. We recover the path to an `end` state by following the trail of `previous[end]` pointers, all the way back to `start`.\nThe dict `previous` is a map of `{state: previous_state}`. \n3. When we finally get an `s` that is the goal state, we know we have found a shortest path, because any other state in the queue must correspond to a path that is as long or longer.\n3. Note that `previous` contains all the states that are currently in `frontier` as well as all the states that were in `frontier` in the past.\n4. If no path to the goal is found, then `breadth_first` returns `None`. If a path is found, it returns the sequence of states on the path.\n\nSome examples:", "_____no_output_____" ] ], [ [ "breadth_first('A', 'B', romania)", "_____no_output_____" ], [ "breadth_first('L', 'N', romania)", "_____no_output_____" ], [ "breadth_first('N', 'L', romania)", "_____no_output_____" ], [ "breadth_first('E', 'E', romania)", "_____no_output_____" ] ], [ [ "Now let's try a different kind of problem that can be solved with the same search function.\n\n## Word Ladders Problem\n\nA *word ladder* problem is this: given a start word and a goal word, find the shortest way to transform the start word into the goal word by changing one letter at a time, such that each change results in a word. For example starting with `green` we can reach `grass` in 7 steps:\n\n`green` &rarr; `greed` &rarr; `treed` &rarr; `trees` &rarr; `tress` &rarr; `cress` &rarr; `crass` &rarr; `grass`\n\nWe will need a dictionary of words. We'll use 5-letter words from the [Stanford GraphBase](http://www-cs-faculty.stanford.edu/~uno/sgb.html) project for this purpose. Let's get that file from aimadata.", "_____no_output_____" ] ], [ [ "from search import *\nsgb_words = DataFile(\"EN-text/sgb-words.txt\")", "_____no_output_____" ] ], [ [ "We can assign `WORDS` to be the set of all the words in this file:", "_____no_output_____" ] ], [ [ "WORDS = set(sgb_words.read().split())\nlen(WORDS)", "_____no_output_____" ] ], [ [ "And define `neighboring_words` to return the set of all words that are a one-letter change away from a given `word`:", "_____no_output_____" ] ], [ [ "def neighboring_words(word):\n \"All words that are one letter away from this word.\"\n neighbors = {word[:i] + c + word[i+1:]\n for i in range(len(word))\n for c in 'abcdefghijklmnopqrstuvwxyz'\n if c != word[i]}\n return neighbors & WORDS", "_____no_output_____" ] ], [ [ "For example:", "_____no_output_____" ] ], [ [ "neighboring_words('hello')", "_____no_output_____" ], [ "neighboring_words('world')", "_____no_output_____" ] ], [ [ "Now we can create `word_neighbors` as a dict of `{word: {neighboring_word, ...}}`: ", "_____no_output_____" ] ], [ [ "word_neighbors = {word: neighboring_words(word)\n for word in WORDS}", "_____no_output_____" ] ], [ [ "Now the `breadth_first` function can be used to solve a word ladder problem:", "_____no_output_____" ] ], [ [ "breadth_first('green', 'grass', word_neighbors)", "_____no_output_____" ], [ "breadth_first('smart', 'brain', word_neighbors)", "_____no_output_____" ], [ "breadth_first('frown', 'smile', word_neighbors)", "_____no_output_____" ] ], [ [ "# More General Search Algorithms\n\nNow we'll embelish the `breadth_first` algorithm to make a family of search algorithms with more capabilities:\n\n1. We distinguish between an *action* and the *result* of an action.\n3. We allow different measures of the cost of a solution (not just the number of steps in the sequence).\n4. We search through the state space in an order that is more likely to lead to an optimal solution quickly.\n\nHere's how we do these things:\n\n1. Instead of having a graph of neighboring states, we instead have an object of type *Problem*. A Problem\nhas one method, `Problem.actions(state)` to return a collection of the actions that are allowed in a state,\nand another method, `Problem.result(state, action)` that says what happens when you take an action.\n2. We keep a set, `explored` of states that have already been explored. We also have a class, `Frontier`, that makes it efficient to ask if a state is on the frontier.\n3. Each action has a cost associated with it (in fact, the cost can vary with both the state and the action).\n4. The `Frontier` class acts as a priority queue, allowing the \"best\" state to be explored next.\nWe represent a sequence of actions and resulting states as a linked list of `Node` objects.\n\nThe algorithm `breadth_first_search` is basically the same as `breadth_first`, but using our new conventions:", "_____no_output_____" ] ], [ [ "def breadth_first_search(problem):\n \"Search for goal; paths with least number of steps first.\"\n if problem.is_goal(problem.initial): \n return Node(problem.initial)\n frontier = FrontierQ(Node(problem.initial), LIFO=False)\n explored = set()\n while frontier:\n node = frontier.pop()\n explored.add(node.state)\n for action in problem.actions(node.state):\n child = node.child(problem, action)\n if child.state not in explored and child.state not in frontier:\n if problem.is_goal(child.state):\n return child\n frontier.add(child)", "_____no_output_____" ] ], [ [ "Next is `uniform_cost_search`, in which each step can have a different cost, and we still consider first one os the states with minimum cost so far.", "_____no_output_____" ] ], [ [ "def uniform_cost_search(problem, costfn=lambda node: node.path_cost):\n frontier = FrontierPQ(Node(problem.initial), costfn)\n explored = set()\n while frontier:\n node = frontier.pop()\n if problem.is_goal(node.state):\n return node\n explored.add(node.state)\n for action in problem.actions(node.state):\n child = node.child(problem, action)\n if child.state not in explored and child not in frontier:\n frontier.add(child)\n elif child in frontier and frontier.cost[child] < child.path_cost:\n frontier.replace(child)", "_____no_output_____" ] ], [ [ "Finally, `astar_search` in which the cost includes an estimate of the distance to the goal as well as the distance travelled so far.", "_____no_output_____" ] ], [ [ "def astar_search(problem, heuristic):\n costfn = lambda node: node.path_cost + heuristic(node.state)\n return uniform_cost_search(problem, costfn)", "_____no_output_____" ] ], [ [ "# Search Tree Nodes\n\nThe solution to a search problem is now a linked list of `Node`s, where each `Node`\nincludes a `state` and the `path_cost` of getting to the state. In addition, for every `Node` except for the first (root) `Node`, there is a previous `Node` (indicating the state that lead to this `Node`) and an `action` (indicating the action taken to get here).", "_____no_output_____" ] ], [ [ "class Node(object):\n \"\"\"A node in a search tree. A search tree is spanning tree over states.\n A Node contains a state, the previous node in the tree, the action that\n takes us from the previous state to this state, and the path cost to get to \n this state. If a state is arrived at by two paths, then there are two nodes \n with the same state.\"\"\"\n\n def __init__(self, state, previous=None, action=None, step_cost=1):\n \"Create a search tree Node, derived from a previous Node by an action.\"\n self.state = state\n self.previous = previous\n self.action = action\n self.path_cost = 0 if previous is None else (previous.path_cost + step_cost)\n\n def __repr__(self): return \"<Node {}: {}>\".format(self.state, self.path_cost)\n \n def __lt__(self, other): return self.path_cost < other.path_cost\n \n def child(self, problem, action):\n \"The Node you get by taking an action from this Node.\"\n result = problem.result(self.state, action)\n return Node(result, self, action, \n problem.step_cost(self.state, action, result)) ", "_____no_output_____" ] ], [ [ "# Frontiers\n\nA frontier is a collection of Nodes that acts like both a Queue and a Set. A frontier, `f`, supports these operations:\n\n* `f.add(node)`: Add a node to the Frontier.\n\n* `f.pop()`: Remove and return the \"best\" node from the frontier.\n\n* `f.replace(node)`: add this node and remove a previous node with the same state.\n\n* `state in f`: Test if some node in the frontier has arrived at state.\n\n* `f[state]`: returns the node corresponding to this state in frontier.\n\n* `len(f)`: The number of Nodes in the frontier. When the frontier is empty, `f` is *false*.\n\nWe provide two kinds of frontiers: One for \"regular\" queues, either first-in-first-out (for breadth-first search) or last-in-first-out (for depth-first search), and one for priority queues, where you can specify what cost function on nodes you are trying to minimize.", "_____no_output_____" ] ], [ [ "from collections import OrderedDict\nimport heapq\n\nclass FrontierQ(OrderedDict):\n \"A Frontier that supports FIFO or LIFO Queue ordering.\"\n \n def __init__(self, initial, LIFO=False):\n \"\"\"Initialize Frontier with an initial Node.\n If LIFO is True, pop from the end first; otherwise from front first.\"\"\"\n self.LIFO = LIFO\n self.add(initial)\n \n def add(self, node):\n \"Add a node to the frontier.\"\n self[node.state] = node\n \n def pop(self):\n \"Remove and return the next Node in the frontier.\"\n (state, node) = self.popitem(self.LIFO)\n return node\n \n def replace(self, node):\n \"Make this node replace the nold node with the same state.\"\n del self[node.state]\n self.add(node)", "_____no_output_____" ], [ "class FrontierPQ:\n \"A Frontier ordered by a cost function; a Priority Queue.\"\n \n def __init__(self, initial, costfn=lambda node: node.path_cost):\n \"Initialize Frontier with an initial Node, and specify a cost function.\"\n self.heap = []\n self.states = {}\n self.costfn = costfn\n self.add(initial)\n \n def add(self, node):\n \"Add node to the frontier.\"\n cost = self.costfn(node)\n heapq.heappush(self.heap, (cost, node))\n self.states[node.state] = node\n \n def pop(self):\n \"Remove and return the Node with minimum cost.\"\n (cost, node) = heapq.heappop(self.heap)\n self.states.pop(node.state, None) # remove state\n return node\n \n def replace(self, node):\n \"Make this node replace a previous node with the same state.\"\n if node.state not in self:\n raise ValueError('{} not there to replace'.format(node.state))\n for (i, (cost, old_node)) in enumerate(self.heap):\n if old_node.state == node.state:\n self.heap[i] = (self.costfn(node), node)\n heapq._siftdown(self.heap, 0, i)\n return\n\n def __contains__(self, state): return state in self.states\n \n def __len__(self): return len(self.heap)", "_____no_output_____" ] ], [ [ "# Search Problems\n\n`Problem` is the abstract class for all search problems. You can define your own class of problems as a subclass of `Problem`. You will need to override the `actions` and `result` method to describe how your problem works. You will also have to either override `is_goal` or pass a collection of goal states to the initialization method. If actions have different costs, you should override the `step_cost` method. ", "_____no_output_____" ] ], [ [ "class Problem(object):\n \"\"\"The abstract class for a search problem.\"\"\"\n\n def __init__(self, initial=None, goals=(), **additional_keywords):\n \"\"\"Provide an initial state and optional goal states.\n A subclass can have additional keyword arguments.\"\"\"\n self.initial = initial # The initial state of the problem.\n self.goals = goals # A collection of possibe goal states.\n self.__dict__.update(**additional_keywords)\n\n def actions(self, state):\n \"Return a list of actions executable in this state.\"\n raise NotImplementedError # Override this!\n\n def result(self, state, action):\n \"The state that results from executing this action in this state.\"\n raise NotImplementedError # Override this!\n\n def is_goal(self, state):\n \"True if the state is a goal.\" \n return state in self.goals # Optionally override this!\n\n def step_cost(self, state, action, result=None):\n \"The cost of taking this action from this state.\"\n return 1 # Override this if actions have different costs ", "_____no_output_____" ], [ "def action_sequence(node):\n \"The sequence of actions to get to this node.\"\n actions = []\n while node.previous:\n actions.append(node.action)\n node = node.previous\n return actions[::-1]\n\ndef state_sequence(node):\n \"The sequence of states to get to this node.\"\n states = [node.state]\n while node.previous:\n node = node.previous\n states.append(node.state)\n return states[::-1]", "_____no_output_____" ] ], [ [ "# Two Location Vacuum World", "_____no_output_____" ] ], [ [ "dirt = '*'\nclean = ' '\n\nclass TwoLocationVacuumProblem(Problem):\n \"\"\"A Vacuum in a world with two locations, and dirt.\n Each state is a tuple of (location, dirt_in_W, dirt_in_E).\"\"\"\n\n def actions(self, state): return ('W', 'E', 'Suck')\n \n def is_goal(self, state): return dirt not in state\n \n def result(self, state, action):\n \"The state that results from executing this action in this state.\" \n (loc, dirtW, dirtE) = state\n if action == 'W': return ('W', dirtW, dirtE)\n elif action == 'E': return ('E', dirtW, dirtE)\n elif action == 'Suck' and loc == 'W': return (loc, clean, dirtE)\n elif action == 'Suck' and loc == 'E': return (loc, dirtW, clean) \n else: raise ValueError('unknown action: ' + action)", "_____no_output_____" ], [ "problem = TwoLocationVacuumProblem(initial=('W', dirt, dirt))\nresult = uniform_cost_search(problem)\nresult", "_____no_output_____" ], [ "action_sequence(result)", "_____no_output_____" ], [ "state_sequence(result)", "_____no_output_____" ], [ "problem = TwoLocationVacuumProblem(initial=('E', clean, dirt))\nresult = uniform_cost_search(problem)\naction_sequence(result)", "_____no_output_____" ] ], [ [ "# Water Pouring Problem\n\nHere is another problem domain, to show you how to define one. The idea is that we have a number of water jugs and a water tap and the goal is to measure out a specific amount of water (in, say, ounces or liters). You can completely fill or empty a jug, but because the jugs don't have markings on them, you can't partially fill them with a specific amount. You can, however, pour one jug into another, stopping when the seconfd is full or the first is empty.", "_____no_output_____" ] ], [ [ "class PourProblem(Problem):\n \"\"\"Problem about pouring water between jugs to achieve some water level.\n Each state is a tuples of levels. In the initialization, provide a tuple of \n capacities, e.g. PourProblem(capacities=(8, 16, 32), initial=(2, 4, 3), goals={7}), \n which means three jugs of capacity 8, 16, 32, currently filled with 2, 4, 3 units of \n water, respectively, and the goal is to get a level of 7 in any one of the jugs.\"\"\"\n \n def actions(self, state):\n \"\"\"The actions executable in this state.\"\"\"\n jugs = range(len(state))\n return ([('Fill', i) for i in jugs if state[i] != self.capacities[i]] +\n [('Dump', i) for i in jugs if state[i] != 0] +\n [('Pour', i, j) for i in jugs for j in jugs if i != j])\n\n def result(self, state, action):\n \"\"\"The state that results from executing this action in this state.\"\"\"\n result = list(state)\n act, i, j = action[0], action[1], action[-1]\n if act == 'Fill': # Fill i to capacity\n result[i] = self.capacities[i]\n elif act == 'Dump': # Empty i\n result[i] = 0\n elif act == 'Pour':\n a, b = state[i], state[j]\n result[i], result[j] = ((0, a + b) \n if (a + b <= self.capacities[j]) else\n (a + b - self.capacities[j], self.capacities[j]))\n else:\n raise ValueError('unknown action', action)\n return tuple(result)\n\n def is_goal(self, state):\n \"\"\"True if any of the jugs has a level equal to one of the goal levels.\"\"\"\n return any(level in self.goals for level in state)", "_____no_output_____" ], [ "p7 = PourProblem(initial=(2, 0), capacities=(5, 13), goals={7})\np7.result((2, 0), ('Fill', 1))", "_____no_output_____" ], [ "result = uniform_cost_search(p7)\naction_sequence(result)", "_____no_output_____" ] ], [ [ "# Visualization Output", "_____no_output_____" ] ], [ [ "def showpath(searcher, problem):\n \"Show what happens when searcvher solves problem.\"\n problem = Instrumented(problem)\n print('\\n{}:'.format(searcher.__name__))\n result = searcher(problem)\n if result:\n actions = action_sequence(result)\n state = problem.initial\n path_cost = 0\n for steps, action in enumerate(actions, 1):\n path_cost += problem.step_cost(state, action, 0)\n result = problem.result(state, action)\n print(' {} =={}==> {}; cost {} after {} steps'\n .format(state, action, result, path_cost, steps,\n '; GOAL!' if problem.is_goal(result) else ''))\n state = result\n msg = 'GOAL FOUND' if result else 'no solution'\n print('{} after {} results and {} goal checks'\n .format(msg, problem._counter['result'], problem._counter['is_goal']))\n \nfrom collections import Counter\n\nclass Instrumented:\n \"Instrument an object to count all the attribute accesses in _counter.\"\n def __init__(self, obj):\n self._object = obj\n self._counter = Counter()\n def __getattr__(self, attr):\n self._counter[attr] += 1\n return getattr(self._object, attr) ", "_____no_output_____" ], [ "showpath(uniform_cost_search, p7)", "_____no_output_____" ], [ "p = PourProblem(initial=(0, 0), capacities=(7, 13), goals={2})\nshowpath(uniform_cost_search, p)", "_____no_output_____" ], [ "class GreenPourProblem(PourProblem): \n def step_cost(self, state, action, result=None):\n \"The cost is the amount of water used in a fill.\"\n if action[0] == 'Fill':\n i = action[1]\n return self.capacities[i] - state[i]\n return 0", "_____no_output_____" ], [ "p = GreenPourProblem(initial=(0, 0), capacities=(7, 13), goals={2})\nshowpath(uniform_cost_search, p)", "_____no_output_____" ], [ "def compare_searchers(problem, searchers=None):\n \"Apply each of the search algorithms to the problem, and show results\"\n if searchers is None: \n searchers = (breadth_first_search, uniform_cost_search)\n for searcher in searchers:\n showpath(searcher, problem)", "_____no_output_____" ], [ "compare_searchers(p)", "_____no_output_____" ] ], [ [ "# Random Grid\n\nAn environment where you can move in any of 4 directions, unless there is an obstacle there.\n\n\n\n", "_____no_output_____" ] ], [ [ "import random\n\nN, S, E, W = DIRECTIONS = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n\ndef Grid(width, height, obstacles=0.1):\n \"\"\"A 2-D grid, width x height, with obstacles that are either a collection of points,\n or a fraction between 0 and 1 indicating the density of obstacles, chosen at random.\"\"\"\n grid = {(x, y) for x in range(width) for y in range(height)}\n if isinstance(obstacles, (float, int)):\n obstacles = random.sample(grid, int(width * height * obstacles))\n def neighbors(x, y):\n for (dx, dy) in DIRECTIONS:\n (nx, ny) = (x + dx, y + dy)\n if (nx, ny) not in obstacles and 0 <= nx < width and 0 <= ny < height:\n yield (nx, ny)\n return {(x, y): list(neighbors(x, y))\n for x in range(width) for y in range(height)}\n\nGrid(5, 5)", "_____no_output_____" ], [ "class GridProblem(Problem):\n \"Create with a call like GridProblem(grid=Grid(10, 10), initial=(0, 0), goal=(9, 9))\"\n def actions(self, state): return DIRECTIONS\n def result(self, state, action):\n #print('ask for result of', state, action)\n (x, y) = state\n (dx, dy) = action\n r = (x + dx, y + dy)\n return r if r in self.grid[state] else state", "_____no_output_____" ], [ "gp = GridProblem(grid=Grid(5, 5, 0.3), initial=(0, 0), goals={(4, 4)})\nshowpath(uniform_cost_search, gp)\n", "_____no_output_____" ] ], [ [ "# Finding a hard PourProblem\n\nWhat solvable two-jug PourProblem requires the most steps? We can define the hardness as the number of steps, and then iterate over all PourProblems with capacities up to size M, keeping the hardest one.", "_____no_output_____" ] ], [ [ "def hardness(problem):\n L = breadth_first_search(problem)\n #print('hardness', problem.initial, problem.capacities, problem.goals, L)\n return len(action_sequence(L)) if (L is not None) else 0", "_____no_output_____" ], [ "hardness(p7)", "_____no_output_____" ], [ "action_sequence(breadth_first_search(p7))", "_____no_output_____" ], [ "C = 9 # Maximum capacity to consider\n\nphard = max((PourProblem(initial=(a, b), capacities=(A, B), goals={goal})\n for A in range(C+1) for B in range(C+1)\n for a in range(A) for b in range(B)\n for goal in range(max(A, B))),\n key=hardness)\n\nphard.initial, phard.capacities, phard.goals", "_____no_output_____" ], [ "showpath(breadth_first_search, PourProblem(initial=(0, 0), capacities=(7, 9), goals={8}))", "_____no_output_____" ], [ "showpath(uniform_cost_search, phard)", "_____no_output_____" ], [ "class GridProblem(Problem):\n \"\"\"A Grid.\"\"\"\n\n def actions(self, state): return ['N', 'S', 'E', 'W'] \n \n def result(self, state, action):\n \"\"\"The state that results from executing this action in this state.\"\"\" \n (W, H) = self.size\n if action == 'N' and state > W: return state - W\n if action == 'S' and state + W < W * W: return state + W\n if action == 'E' and (state + 1) % W !=0: return state + 1\n if action == 'W' and state % W != 0: return state - 1\n return state", "_____no_output_____" ], [ "compare_searchers(GridProblem(initial=0, goals={44}, size=(10, 10)))", "_____no_output_____" ], [ "def test_frontier():\n \n #### Breadth-first search with FIFO Q\n f = FrontierQ(Node(1), LIFO=False)\n assert 1 in f and len(f) == 1\n f.add(Node(2))\n f.add(Node(3))\n assert 1 in f and 2 in f and 3 in f and len(f) == 3\n assert f.pop().state == 1\n assert 1 not in f and 2 in f and 3 in f and len(f) == 2\n assert f\n assert f.pop().state == 2\n assert f.pop().state == 3\n assert not f\n \n #### Depth-first search with LIFO Q\n f = FrontierQ(Node('a'), LIFO=True)\n for s in 'bcdef': f.add(Node(s))\n assert len(f) == 6 and 'a' in f and 'c' in f and 'f' in f\n for s in 'fedcba': assert f.pop().state == s\n assert not f\n\n #### Best-first search with Priority Q\n f = FrontierPQ(Node(''), lambda node: len(node.state))\n assert '' in f and len(f) == 1 and f\n for s in ['book', 'boo', 'bookie', 'bookies', 'cook', 'look', 'b']:\n assert s not in f\n f.add(Node(s))\n assert s in f\n assert f.pop().state == ''\n assert f.pop().state == 'b'\n assert f.pop().state == 'boo'\n assert {f.pop().state for _ in '123'} == {'book', 'cook', 'look'}\n assert f.pop().state == 'bookie'\n \n #### Romania: Two paths to Bucharest; cheapest one found first\n S = Node('S')\n SF = Node('F', S, 'S->F', 99)\n SFB = Node('B', SF, 'F->B', 211)\n SR = Node('R', S, 'S->R', 80)\n SRP = Node('P', SR, 'R->P', 97)\n SRPB = Node('B', SRP, 'P->B', 101)\n f = FrontierPQ(S)\n f.add(SF); f.add(SR), f.add(SRP), f.add(SRPB); f.add(SFB)\n def cs(n): return (n.path_cost, n.state) # cs: cost and state\n assert cs(f.pop()) == (0, 'S')\n assert cs(f.pop()) == (80, 'R')\n assert cs(f.pop()) == (99, 'F')\n assert cs(f.pop()) == (177, 'P')\n assert cs(f.pop()) == (278, 'B')\n return 'test_frontier ok'\n\ntest_frontier()", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\np = plt.plot([i**2 for i in range(10)])\nplt.savefig('destination_path.eps', format='eps', dpi=1200)", "_____no_output_____" ], [ "import itertools\nimport random\n# http://stackoverflow.com/questions/10194482/custom-matplotlib-plot-chess-board-like-table-with-colored-cells\n\nfrom matplotlib.table import Table\n\ndef main():\n grid_table(8, 8)\n plt.axis('scaled')\n plt.show()\n\ndef grid_table(nrows, ncols):\n fig, ax = plt.subplots()\n ax.set_axis_off()\n colors = ['white', 'lightgrey', 'dimgrey']\n tb = Table(ax, bbox=[0,0,2,2])\n for i,j in itertools.product(range(ncols), range(nrows)):\n tb.add_cell(i, j, 2./ncols, 2./nrows, text='{:0.2f}'.format(0.1234), \n loc='center', facecolor=random.choice(colors), edgecolor='grey') # facecolors=\n ax.add_table(tb)\n #ax.plot([0, .3], [.2, .2])\n #ax.add_line(plt.Line2D([0.3, 0.5], [0.7, 0.7], linewidth=2, color='blue'))\n return fig\n\nmain()", "_____no_output_____" ], [ "import collections\nclass defaultkeydict(collections.defaultdict):\n \"\"\"Like defaultdict, but the default_factory is a function of the key.\n >>> d = defaultkeydict(abs); d[-42]\n 42\n \"\"\"\n def __missing__(self, key):\n self[key] = self.default_factory(key)\n return self[key]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6d7c7c7a491f54be866b8c8587e6ccd987c6c7
23,536
ipynb
Jupyter Notebook
R_Notebooks/r-notebook-experiment-on-orthogonal-learning.ipynb
JannisKueck/14.38_Causal_ML
b6c0560550190b736f1760a3983c61bcf90d578d
[ "MIT" ]
null
null
null
R_Notebooks/r-notebook-experiment-on-orthogonal-learning.ipynb
JannisKueck/14.38_Causal_ML
b6c0560550190b736f1760a3983c61bcf90d578d
[ "MIT" ]
null
null
null
R_Notebooks/r-notebook-experiment-on-orthogonal-learning.ipynb
JannisKueck/14.38_Causal_ML
b6c0560550190b736f1760a3983c61bcf90d578d
[ "MIT" ]
null
null
null
92.298039
8,814
0.826011
[ [ [ "# Simulation Design\n\nlibrary(hdm)\n\nset.seed(1)\nB= 100 # trials\nNaive = rep(0, B)\nOrthogonal = rep(0, B)\n\nfor (i in 1:B){\n \nn=10000\np= 100\nbeta = 1/(1:p)^2\ngamma =1/(1:p)^2\n\nX=matrix(rnorm(n*p), n, p)\n \n \nD= X%*%gamma + rnorm(n)/4\n\nY = D+ X%*%beta + rnorm(n)\n\n# single selection method\n\nSX.IDs = which(rlasso(Y~ D+X)$coef[-c(1,2)] !=0) #select covariates by Lasso\n\n\nif (sum(SX.IDs)==0) {Naive[i] = lm(Y~ D)$coef[2]}\n\nif (sum(SX.IDs)>0) {Naive[i] = lm(Y~ D + X[,SX.IDs])$coef[2]}\n\n\n\n#partialling out\n\nresY = rlasso(Y~ X, Post=F)$res\nresD = rlasso(D~ X, Post=F)$res\nOrthogonal[i]= lm(resY ~ resD)$coef[2]\n\n}\n", "_____no_output_____" ], [ "hist(Orthogonal-1,col=4, freq=F, xlim= c(-2, 2), xlab= \"Orhtogonal -True \", main=\"Orthogonal\")\nhist(Naive-1, col=2, freq=F, xlim= c(-2,2), xlab= \"Naive- True\", main = \"Naive\")\n", "_____no_output_____" ], [ "library(hdm)\n\nset.seed(1)\nB= 1000 # trials \nNaive = rep(0, B)\nOrthogonal = rep(0, B)\n\nfor (i in 1:B){\n \nn=100\np= 100\nbeta = 1/(1:p)^2\ngamma =1/(1:p)^2\n\nX=matrix(rnorm(n*p), n, p)\n \n \nD= X%*%gamma + rnorm(n)/4\n\nY = D+ X%*%beta + rnorm(n)\n\n# single selection method\n\nSX.IDs = which(rlasso(Y~ D+X)$coef[-c(1,2)] !=0) #select covariates by Lasso\n\n\nif (sum(SX.IDs)==0) {Naive[i] = lm(Y~ D)$coef[2]}\n\nif (sum(SX.IDs)>0) {Naive[i] = lm(Y~ D + X[,SX.IDs])$coef[2]}\n\n\n\n#partialling out\n\nresY = rlasso(Y~ X, Post=T)$res\nresD = rlasso(D~ X, Post=T)$res\nOrthogonal[i]= lm(resY ~ resD)$coef[2]\n\n}\n", "_____no_output_____" ], [ "hist(Orthogonal-1,col=4, freq=F, xlim= c(-2, 2), xlab= \"Orhtogonal -True \", main=\"Orthogonal\")\nhist(Naive-1, col=2, freq=F, xlim= c(-2,2), xlab= \"Naive- True\", main = \"Naive\")\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a6d80881d19c5267d7e52fa672c60f33d1356d0
15,007
ipynb
Jupyter Notebook
ForrestCoverType2018/hyper-parameter-optimisation.ipynb
mlisovyi/KagglePotpourri
79f9b07da53d023a2ebd80fcef1b7fa12d56d93a
[ "MIT" ]
2
2018-10-19T12:07:25.000Z
2022-01-21T05:03:47.000Z
ForrestCoverType2018/hyper-parameter-optimisation.ipynb
mlisovyi/KagglePotpourri
79f9b07da53d023a2ebd80fcef1b7fa12d56d93a
[ "MIT" ]
null
null
null
ForrestCoverType2018/hyper-parameter-optimisation.ipynb
mlisovyi/KagglePotpourri
79f9b07da53d023a2ebd80fcef1b7fa12d56d93a
[ "MIT" ]
null
null
null
15,007
15,007
0.618112
[ [ [ "# Hyper parameters\nThe goal here is to demonstrate how to optimise hyper-parameters of various models\n\nThe kernel is a short version of https://www.kaggle.com/mlisovyi/featureengineering-basic-model", "_____no_output_____" ] ], [ [ "max_events = None", "_____no_output_____" ], [ "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D # needed for 3D scatter plots\n%matplotlib inline \nimport seaborn as sns\nimport gc\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nPATH='../input/'\n\nimport os\nprint(os.listdir(PATH))", "_____no_output_____" ] ], [ [ "Read in data", "_____no_output_____" ] ], [ [ "train = pd.read_csv('{}/train.csv'.format(PATH), nrows=max_events)\ntest = pd.read_csv('{}/test.csv'.format(PATH), nrows=max_events)\n\ny = train['Cover_Type']\ntrain.drop('Cover_Type', axis=1, inplace=True)\n\ntrain.drop('Id', axis=1, inplace=True)\ntest.drop('Id', axis=1, inplace=True)", "_____no_output_____" ], [ "print('Train shape: {}'.format(train.shape))\nprint('Test shape: {}'.format(test.shape))", "_____no_output_____" ], [ "train.info(verbose=False)", "_____no_output_____" ] ], [ [ "## OHE into LE", "_____no_output_____" ], [ "Helper function to transfer One-Hot Encoding (OHE) into a Label Encoding (LE). It was taken from https://www.kaggle.com/mlisovyi/lighgbm-hyperoptimisation-with-f1-macro\n\nThe reason to convert OHE into LE is that we plan to use a tree-based model and such models are dealing well with simple interger-label encoding. Note, that this way we introduce an ordering between categories, which is not there in reality, but in practice in most use cases GBMs handle it well anyway.", "_____no_output_____" ] ], [ [ "def convert_OHE2LE(df):\n tmp_df = df.copy(deep=True)\n for s_ in ['Soil_Type', 'Wilderness_Area']:\n cols_s_ = [f_ for f_ in df.columns if f_.startswith(s_)]\n sum_ohe = tmp_df[cols_s_].sum(axis=1).unique()\n #deal with those OHE, where there is a sum over columns == 0\n if 0 in sum_ohe:\n print('The OHE in {} is incomplete. A new column will be added before label encoding'\n .format(s_))\n # dummy colmn name to be added\n col_dummy = s_+'_dummy'\n # add the column to the dataframe\n tmp_df[col_dummy] = (tmp_df[cols_s_].sum(axis=1) == 0).astype(np.int8)\n # add the name to the list of columns to be label-encoded\n cols_s_.append(col_dummy)\n # proof-check, that now the category is complete\n sum_ohe = tmp_df[cols_s_].sum(axis=1).unique()\n if 0 in sum_ohe:\n print(\"The category completion did not work\")\n tmp_df[s_ + '_LE'] = tmp_df[cols_s_].idxmax(axis=1).str.replace(s_,'').astype(np.uint16)\n tmp_df.drop(cols_s_, axis=1, inplace=True)\n return tmp_df\n\n\n\ndef train_test_apply_func(train_, test_, func_):\n xx = pd.concat([train_, test_])\n xx_func = func_(xx)\n train_ = xx_func.iloc[:train_.shape[0], :]\n test_ = xx_func.iloc[train_.shape[0]:, :]\n\n del xx, xx_func\n return train_, test_", "_____no_output_____" ], [ "train_x, test_x = train_test_apply_func(train, test, convert_OHE2LE)", "_____no_output_____" ] ], [ [ "One little caveat: looking through the OHE, `Soil_Type 7, 15`, are present in the test, but not in the training data", "_____no_output_____" ], [ "The head of the training dataset", "_____no_output_____" ] ], [ [ "train_x.head()", "_____no_output_____" ] ], [ [ "# Let's do some feature engineering", "_____no_output_____" ] ], [ [ "def preprocess(df_):\n df_['fe_E_Min_02HDtH'] = (df_['Elevation']- df_['Horizontal_Distance_To_Hydrology']*0.2).astype(np.float32)\n df_['fe_Distance_To_Hydrology'] = np.sqrt(df_['Horizontal_Distance_To_Hydrology']**2 + \n df_['Vertical_Distance_To_Hydrology']**2).astype(np.float32)\n \n feats_sub = [('Elevation_Min_VDtH', 'Elevation', 'Vertical_Distance_To_Hydrology'),\n ('HD_Hydrology_Min_Roadways', 'Horizontal_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways'),\n ('HD_Hydrology_Min_Fire', 'Horizontal_Distance_To_Hydrology', 'Horizontal_Distance_To_Fire_Points')]\n feats_add = [('Elevation_Add_VDtH', 'Elevation', 'Vertical_Distance_To_Hydrology')]\n \n for f_new, f1, f2 in feats_sub:\n df_['fe_' + f_new] = (df_[f1] - df_[f2]).astype(np.float32)\n for f_new, f1, f2 in feats_add:\n df_['fe_' + f_new] = (df_[f1] + df_[f2]).astype(np.float32)\n \n # The feature is advertised in https://douglas-fraser.com/forest_cover_management.pdf\n df_['fe_Shade9_Mul_VDtH'] = (df_['Hillshade_9am'] * df_['Vertical_Distance_To_Hydrology']).astype(np.float32)\n \n # this mapping comes from https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info\n climatic_zone = {}\n geologic_zone = {}\n for i in range(1,41):\n if i <= 6:\n climatic_zone[i] = 2\n geologic_zone[i] = 7\n elif i <= 8:\n climatic_zone[i] = 3\n geologic_zone[i] = 5\n elif i == 9:\n climatic_zone[i] = 4\n geologic_zone[i] = 2\n elif i <= 13:\n climatic_zone[i] = 4\n geologic_zone[i] = 7\n elif i <= 15:\n climatic_zone[i] = 5\n geologic_zone[i] = 1\n elif i <= 17:\n climatic_zone[i] = 6\n geologic_zone[i] = 1\n elif i == 18:\n climatic_zone[i] = 6\n geologic_zone[i] = 7\n elif i <= 21:\n climatic_zone[i] = 7\n geologic_zone[i] = 1\n elif i <= 23:\n climatic_zone[i] = 7\n geologic_zone[i] = 2\n elif i <= 34:\n climatic_zone[i] = 7\n geologic_zone[i] = 7\n else:\n climatic_zone[i] = 8\n geologic_zone[i] = 7\n \n df_['Climatic_zone_LE'] = df_['Soil_Type_LE'].map(climatic_zone).astype(np.uint8)\n df_['Geologic_zone_LE'] = df_['Soil_Type_LE'].map(geologic_zone).astype(np.uint8)\n return df_", "_____no_output_____" ], [ "train_x = preprocess(train_x)\ntest_x = preprocess(test_x)", "_____no_output_____" ] ], [ [ "# Optimise various classifiers", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nimport lightgbm as lgb", "_____no_output_____" ] ], [ [ "We subtract 1 to have the labels starting with 0, which is required for LightGBM", "_____no_output_____" ] ], [ [ "y = y-1", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(train_x, y, test_size=0.15, random_state=315, stratify=y)", "_____no_output_____" ] ], [ [ "Parameters to be used in optimisation for various models", "_____no_output_____" ] ], [ [ "def learning_rate_decay_power_0995(current_iter):\n base_learning_rate = 0.15\n lr = base_learning_rate * np.power(.995, current_iter)\n return lr if lr > 1e-2 else 1e-2\n\nclfs = {'rf': (RandomForestClassifier(n_estimators=200, max_depth=1, random_state=314, n_jobs=4),\n {'max_depth': [20,25,30,35,40,45,50]}, \n {}),\n 'xt': (ExtraTreesClassifier(n_estimators=200, max_depth=1, max_features='auto',random_state=314, n_jobs=4),\n {'max_depth': [20,25,30,35,40,45,50]},\n {}),\n 'lgbm': (lgb.LGBMClassifier(max_depth=-1, min_child_samples=400, \n random_state=314, silent=True, metric='None', \n n_jobs=4, n_estimators=5000, learning_rate=0.1), \n {'colsample_bytree': [0.75], 'min_child_weight': [0.1,1,10], 'num_leaves': [18, 20,22], 'subsample': [0.75]}, \n {'eval_set': [(X_test, y_test)], \n 'eval_metric': 'multi_error', 'verbose':500, 'early_stopping_rounds':100, \n 'callbacks':[lgb.reset_parameter(learning_rate=learning_rate_decay_power_0995)]}\n )\n }", "_____no_output_____" ], [ "gss = {}\nfor name, (clf, clf_pars, fit_pars) in clfs.items():\n print('--------------- {} -----------'.format(name))\n gs = GridSearchCV(clf, param_grid=clf_pars,\n scoring='accuracy',\n cv=5,\n n_jobs=1,\n refit=True,\n verbose=True)\n gs = gs.fit(X_train, y_train, **fit_pars)\n print('{}: train = {:.4f}, test = {:.4f}+-{:.4f} with best params {}'.format(name, \n gs.cv_results_['mean_train_score'][gs.best_index_],\n gs.cv_results_['mean_test_score'][gs.best_index_],\n gs.cv_results_['std_test_score'][gs.best_index_],\n gs.best_params_\n ))\n print(\"Valid+-Std Train : Parameters\")\n for i in np.argsort(gs.cv_results_['mean_test_score'])[-5:]:\n print('{1:.3f}+-{3:.3f} {2:.3f} : {0}'.format(gs.cv_results_['params'][i], \n gs.cv_results_['mean_test_score'][i], \n gs.cv_results_['mean_train_score'][i],\n gs.cv_results_['std_test_score'][i]))\n gss[name] = gs", "_____no_output_____" ], [ "# gss = {}\n# for name, (clf, clf_pars, fit_pars) in clfs.items():\n# if name == 'lgbm':\n# continue\n# print('--------------- {} -----------'.format(name))\n# gs = GridSearchCV(clf, param_grid=clf_pars,\n# scoring='accuracy',\n# cv=5,\n# n_jobs=1,\n# refit=True,\n# verbose=True)\n# gs = gs.fit(X_train, y_train, **fit_pars)\n# print('{}: train = {:.4f}, test = {:.4f}+-{:.4f} with best params {}'.format(name, \n# gs.cv_results_['mean_train_score'][gs.best_index_],\n# gs.cv_results_['mean_test_score'][gs.best_index_],\n# gs.cv_results_['std_test_score'][gs.best_index_],\n# gs.best_params_\n# ))\n# print(\"Valid+-Std Train : Parameters\")\n# for i in np.argsort(gs.cv_results_['mean_test_score'])[-5:]:\n# print('{1:.3f}+-{3:.3f} {2:.3f} : {0}'.format(gs.cv_results_['params'][i], \n# gs.cv_results_['mean_test_score'][i], \n# gs.cv_results_['mean_train_score'][i],\n# gs.cv_results_['std_test_score'][i]))\n# gss[name] = gs", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a6d846307e56b06eb7e70aad45272335b01dc87
11,574
ipynb
Jupyter Notebook
nbs/07_clean.ipynb
hgzech/nbdev
5b6ae509bc63c1bd8afe0c8818a58484046bd35e
[ "Apache-2.0" ]
3,334
2019-11-18T17:43:16.000Z
2022-03-31T19:58:54.000Z
nbs/07_clean.ipynb
hgzech/nbdev
5b6ae509bc63c1bd8afe0c8818a58484046bd35e
[ "Apache-2.0" ]
572
2019-11-26T14:46:17.000Z
2022-03-28T01:41:56.000Z
nbs/07_clean.ipynb
hgzech/nbdev
5b6ae509bc63c1bd8afe0c8818a58484046bd35e
[ "Apache-2.0" ]
432
2019-11-20T05:17:00.000Z
2022-03-20T15:08:27.000Z
31.196765
434
0.497581
[ [ [ "#hide\n#default_exp clean\nfrom nbdev.showdoc import show_doc", "_____no_output_____" ], [ "#export\nimport io,sys,json,glob,re\nfrom fastcore.script import call_parse,Param,bool_arg\nfrom fastcore.utils import ifnone\nfrom nbdev.imports import Config\nfrom nbdev.export import nbglob\nfrom pathlib import Path", "_____no_output_____" ], [ "#hide\n#For tests only\nfrom nbdev.imports import *", "_____no_output_____" ] ], [ [ "# Clean notebooks\n\n> Strip notebooks from superfluous metadata", "_____no_output_____" ], [ "To avoid pointless conflicts while working with jupyter notebooks (with different execution counts or cell metadata), it is recommended to clean the notebooks before committing anything (done automatically if you install the git hooks with `nbdev_install_git_hooks`). The following functions are used to do that.", "_____no_output_____" ], [ "## Utils", "_____no_output_____" ] ], [ [ "#export\ndef rm_execution_count(o):\n \"Remove execution count in `o`\"\n if 'execution_count' in o: o['execution_count'] = None", "_____no_output_____" ], [ "#export\ncolab_json = \"application/vnd.google.colaboratory.intrinsic+json\"\ndef clean_output_data_vnd(o):\n \"Remove `application/vnd.google.colaboratory.intrinsic+json` in data entries\"\n if 'data' in o:\n data = o['data']\n if colab_json in data:\n new_data = {k:v for k,v in data.items() if k != colab_json}\n o['data'] = new_data", "_____no_output_____" ], [ "#export\ndef clean_cell_output(cell):\n \"Remove execution count in `cell`\"\n if 'outputs' in cell:\n for o in cell['outputs']:\n rm_execution_count(o)\n clean_output_data_vnd(o)\n o.get('metadata', o).pop('tags', None)", "_____no_output_____" ], [ "#export\ncell_metadata_keep = [\"hide_input\"]\nnb_metadata_keep = [\"kernelspec\", \"jekyll\", \"jupytext\", \"doc\"]", "_____no_output_____" ], [ "#export\ndef clean_cell(cell, clear_all=False):\n \"Clean `cell` by removing superfluous metadata or everything except the input if `clear_all`\"\n rm_execution_count(cell)\n if 'outputs' in cell:\n if clear_all: cell['outputs'] = []\n else: clean_cell_output(cell)\n if cell['source'] == ['']: cell['source'] = []\n cell['metadata'] = {} if clear_all else {k:v for k,v in cell['metadata'].items() if k in cell_metadata_keep}", "_____no_output_____" ], [ "tst = {'cell_type': 'code',\n 'execution_count': 26,\n 'metadata': {'hide_input': True, 'meta': 23},\n 'outputs': [{'execution_count': 2, \n 'data': {\n 'application/vnd.google.colaboratory.intrinsic+json': {\n 'type': 'string'},\n 'plain/text': ['sample output',]\n },\n 'output': 'super'}],\n \n 'source': 'awesome_code'}\ntst1 = tst.copy()\n\nclean_cell(tst)\ntest_eq(tst, {'cell_type': 'code',\n 'execution_count': None,\n 'metadata': {'hide_input': True},\n 'outputs': [{'execution_count': None, \n 'data': {'plain/text': ['sample output',]},\n 'output': 'super'}],\n 'source': 'awesome_code'})\n\nclean_cell(tst1, clear_all=True)\ntest_eq(tst1, {'cell_type': 'code',\n 'execution_count': None,\n 'metadata': {},\n 'outputs': [],\n 'source': 'awesome_code'})", "_____no_output_____" ], [ "tst2 = {\n 'metadata': {'tags':[]},\n 'outputs': [{\n 'metadata': {\n 'tags':[]\n }}],\n \n \"source\": [\n \"\"\n ]}\nclean_cell(tst2, clear_all=False)\ntest_eq(tst2, {\n 'metadata': {},\n 'outputs': [{\n 'metadata':{}}],\n 'source': []})", "_____no_output_____" ], [ "#export\ndef clean_nb(nb, clear_all=False):\n \"Clean `nb` from superfluous metadata, passing `clear_all` to `clean_cell`\"\n for c in nb['cells']: clean_cell(c, clear_all=clear_all)\n nb['metadata'] = {k:v for k,v in nb['metadata'].items() if k in nb_metadata_keep }", "_____no_output_____" ], [ "tst = {'cell_type': 'code',\n 'execution_count': 26,\n 'metadata': {'hide_input': True, 'meta': 23},\n 'outputs': [{'execution_count': 2,\n 'data': {\n 'application/vnd.google.colaboratory.intrinsic+json': {\n 'type': 'string'},\n 'plain/text': ['sample output',]\n },\n 'output': 'super'}],\n 'source': 'awesome_code'}\nnb = {'metadata': {'kernelspec': 'some_spec', 'jekyll': 'some_meta', 'meta': 37},\n 'cells': [tst]}\n\nclean_nb(nb)\ntest_eq(nb['cells'][0], {'cell_type': 'code',\n 'execution_count': None,\n 'metadata': {'hide_input': True},\n 'outputs': [{'execution_count': None, \n 'data': { 'plain/text': ['sample output',]},\n 'output': 'super'}],\n 'source': 'awesome_code'})\ntest_eq(nb['metadata'], {'kernelspec': 'some_spec', 'jekyll': 'some_meta'})", "_____no_output_____" ], [ "#export\ndef _print_output(nb):\n \"Print `nb` in stdout for git things\"\n _output_stream = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False)\n _output_stream.write(x)\n _output_stream.write(\"\\n\")\n _output_stream.flush()", "_____no_output_____" ] ], [ [ "## Main function", "_____no_output_____" ] ], [ [ "#export\n@call_parse\ndef nbdev_clean_nbs(fname:Param(\"A notebook name or glob to convert\", str)=None,\n clear_all:Param(\"Clean all metadata and outputs\", bool_arg)=False,\n disp:Param(\"Print the cleaned outputs\", bool_arg)=False,\n read_input_stream:Param(\"Read input stram and not nb folder\")=False):\n \"Clean all notebooks in `fname` to avoid merge conflicts\"\n #Git hooks will pass the notebooks in the stdin\n if read_input_stream and sys.stdin:\n input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')\n nb = json.load(input_stream)\n clean_nb(nb, clear_all=clear_all)\n _print_output(nb)\n return\n path = None\n if fname is None:\n try: path = get_config().path(\"nbs_path\")\n except Exception as e: path = Path.cwd()\n \n files = nbglob(fname=ifnone(fname,path))\n for f in files:\n if not str(f).endswith('.ipynb'): continue\n nb = json.loads(open(f, 'r', encoding='utf-8').read())\n clean_nb(nb, clear_all=clear_all)\n if disp: _print_output(nb)\n else:\n x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False)\n with io.open(f, 'w', encoding='utf-8') as f:\n f.write(x)\n f.write(\"\\n\")", "_____no_output_____" ] ], [ [ "By default (`fname` left to `None`), the all the notebooks in `lib_folder` are cleaned. You can opt in to fully clean the notebook by removing every bit of metadata and the cell outputs by passing `clear_all=True`. `disp` is only used for internal use with git hooks and will print the clean notebook instead of saving it. Same for `read_input_stream` that will read the notebook from the input stream instead of the file names.", "_____no_output_____" ], [ "## Export -", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.export import notebook2script\nnotebook2script()", "Converted 00_export.ipynb.\nConverted 01_sync.ipynb.\nConverted 02_showdoc.ipynb.\nConverted 03_export2html.ipynb.\nConverted 04_test.ipynb.\nConverted 05_merge.ipynb.\nConverted 06_cli.ipynb.\nConverted 07_clean.ipynb.\nConverted 99_search.ipynb.\nConverted example.ipynb.\nConverted index.ipynb.\nConverted nbdev_comments.ipynb.\nConverted tutorial.ipynb.\nConverted tutorial_colab.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a6d895f544593617c6ee3b78e06ab496dbaac6c
7,424
ipynb
Jupyter Notebook
notebook/openCV Setup.ipynb
ricklon/opencvraspberrypi
4f748be523f3de7009dddb1edb4e45cac8f5c2db
[ "Apache-2.0" ]
1
2019-03-15T18:31:12.000Z
2019-03-15T18:31:12.000Z
notebook/openCV Setup.ipynb
ricklon/opencvraspberrypi
4f748be523f3de7009dddb1edb4e45cac8f5c2db
[ "Apache-2.0" ]
null
null
null
notebook/openCV Setup.ipynb
ricklon/opencvraspberrypi
4f748be523f3de7009dddb1edb4e45cac8f5c2db
[ "Apache-2.0" ]
2
2017-02-17T00:18:03.000Z
2021-05-23T14:49:25.000Z
25.166102
105
0.520609
[ [ [ "# openCV Configure for Raspberry PI\n\nWhat is openCV?\n* Collection of computer vision tools in one place\n* Computational photography to object detection\n\nWhere is openCV?\n* http://opencv.org/\n\nWhat resources did I use?\n* http://www.pyimagesearch.com/2016/04/18/install-guide-raspberry-pi-3-raspbian-jessie-opencv-3/\n* http://www.pyimagesearch.com/2016/11/21/raspbian-opencv-pre-configured-and-pre-installed/\n\nThe step by step of getting it going.\n\n1. Make sure we have enough room.\n* ```df -h```\n* expand the file system with\n* ```sudo raspi-config```\n\n1. Make room with removing the wolfram image\n* ```sudo apt-get purge wolfram-engine```\n\n## Install the tools\n1. Dependencies \n```\nsudo apt-get update\nsudo apt-get upgrade\n```\n\n\nMake sure all the dev depencies for Python are installed\n```\nsudo apt-get install python3-dev\nsudo apt install python3-matplotlib\n\n```\n\n```\nsudo pip3 opencv-contrib-python\n\n```\n\nScripts\n\nInitial\n\n```\nsudo apt-get install build-essential cmake pkg-config\nsudo apt-get install libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev\nsudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev\nsudo apt-get install libxvidcore-dev libx264-dev\nsudo apt-get install libgtk2.0-dev libgtk-3-dev\nsudo apt-get install libatlas-base-dev gfortran\n\n```\n\nExtras just in case for camera and qt\n```\nsudo apt-get install libqtgui4\nsudo modprobe bcm2835-v4l2\nsudo apt-get install libqt4-test\n```\n\nRequired but not clearly needed until runtime\n```\nsudo apt-get install libhdf5-dev\nsudo apt-get install libhdf5-serial-dev\n\n```\n\n\n### Old origninal\n-----\n\nCMake is needed\n\n```\nsudo apt-get install build-essential cmake pkg-config\n```\n\nImage file support\n```\nsudo apt-get install libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev\n```\n\nVideo I/O packages\n```\nsudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev\nsudo apt-get install libxvidcore-dev libx264-dev\n```\n\nhighGUI gto depedencies\n```\nsudo apt-get install libgtk2.0-dev\n```\n\nFORTRAN optimation matrix \n```\nsudo apt-get install libatlas-base-dev gfortran\n```\n\n\n", "_____no_output_____" ] ], [ [ "## Get the source code openCV 3.2\n\nCreate a directory\n```\ncd ~\nmkdir opencv\n```\n\n```\nwget -O opencv.zip https://github.com/Itseez/opencv/archive/3.2.0.zip\nunzip opencv.zip\n```\n\n```\nwget -O opencv_contrib.zip https://github.com/Itseez/opencv_contrib/archive/3.2.0.zip\nunzip opencv_contrib.zip\n\n```\n", "_____no_output_____" ] ], [ [ "# setup virtualenv\n\n```\nsudo pip3 install virtualenv virtualenvwrapper\nsudo rm -rf ~/.cache/pip\n```\n\nAdd this to your .profile\n```\n# virtualenv and virtualenvwrapper\nexport WORKON_HOME=$HOME/.virtualenvs\nsource /usr/local/bin/virtualenvwrapper.sh\n\n```\n\nCreate the virtualenv for opencv for python3\n```\nmkvirtualenv cv -p python3\n\n```\n\nUpdate the environment\n```\nsource ~/.profile\nworkon cv\n```\n\nNow you are ready to start compiling.\n\n", "_____no_output_____" ], [ "#Set up python in the virtualenv\n* Good place to start running tmux\n\nMake sure you see the prompt:\n```\n(cv) pi@cvpi:~/opencv $\n```\n\nInstall numpy\n```\npip3 install numpy\n```\n\n\n\n", "_____no_output_____" ], [ "#compile and isntall opencv\n* get tmux going\n\n```\nworkon cv\ncd ~/opencv/opencv-3.2.0/\n$ mkdir build\n$ cd build\n$ cmake -D CMAKE_BUILD_TYPE=RELEASE \\\n -D CMAKE_INSTALL_PREFIX=/usr/local \\\n -D INSTALL_PYTHON_EXAMPLES=ON \\\n -D OPENCV_EXTRA_MODULES_PATH=~/opencv/opencv_contrib-3.2.0/modules \\\n -D BUILD_EXAMPLES=ON ..\n```\nfinally nmake it\n\n```\nmake -j4\n\n```\n", "_____no_output_____" ], [ "```\nsudo make install\nsudo ldconfig\n```\n\n```\n(cv) pi@cvpi:~/opencv/opencv-3.2.0/build $ ls -l /usr/local/lib/python3.4/site-p\nackages/\ntotal 3212\n-rw-r--r-- 1 root staff 3287708 Feb 12 04:35 cv2.cpython-34m.so\n```\n\n```\nDo you really want to exit ([y]/n)? y [11/1984]\n(cv) pi@cvpi:~/opencv/opencv-3.2.0/build $ cd /usr/local/lib/python3.4/site-pack\nages/\n(cv) pi@cvpi:/usr/local/lib/python3.4/site-packages $ sudo mv cv2.cpython-34m.so\n cv2.so\n(cv) pi@cvpi:/usr/local/lib/python3.4/site-packages $ cd ~/.virtualenvs/cv/lib/p\nython3.4/site-packages/\n(cv) pi@cvpi:~/.virtualenvs/cv/lib/python3.4/site-packages $ ln -s /usr/local/li\nb/python3.4/site-packages/cv2.so cv2.so\n(cv) pi@cvpi:~/.virtualenvs/cv/lib/python3.4/site-packages $ source ~/.profile\npi@cvpi:~/.virtualenvs/cv/lib/python3.4/site-packages $ cd\npi@cvpi:~ $ workon cv\n```\n\n```\nbject? -> Details about 'object', use 'object??' for extra details.\n\nIn [1]: import cv2\n\nIn [2]: cv2.__version__\nOut[2]: '3.2.0'\n\n```\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
4a6d8a14fd97769189380cf70283a542f528046a
105,709
ipynb
Jupyter Notebook
experiments/.ipynb_checkpoints/comparing_concept_RF_DT-checkpoint.ipynb
DataSystemsGroupUT/ACDTE
cf51f3b325dfca686b22df0c56444b01772347dc
[ "MIT" ]
null
null
null
experiments/.ipynb_checkpoints/comparing_concept_RF_DT-checkpoint.ipynb
DataSystemsGroupUT/ACDTE
cf51f3b325dfca686b22df0c56444b01772347dc
[ "MIT" ]
null
null
null
experiments/.ipynb_checkpoints/comparing_concept_RF_DT-checkpoint.ipynb
DataSystemsGroupUT/ACDTE
cf51f3b325dfca686b22df0c56444b01772347dc
[ "MIT" ]
null
null
null
30.72043
477
0.514705
[ [ [ "%tensorflow_version 1.x", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport sklearn\nimport sklearn.metrics\nfrom sklearn import tree\nfrom matplotlib import pyplot as plt\n\n%load_ext autoreload\n%autoreload 2\n\nimport torch\nfrom torch.autograd import Variable as V\nimport torchvision.models as models\nfrom torchvision import transforms as trn\nfrom torch.nn import functional as F\nimport torch.nn as nn\nimport os\nfrom PIL import Image\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "file_name = 'categories_places365.txt'\nif not os.access(file_name, os.W_OK):\n synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt'\n os.system('wget ' + synset_url)\nclasses = list()\nwith open(file_name) as class_file:\n for line in class_file:\n classes.append(line.strip().split(' ')[0][3:])\nclasses = np.array(classes)", "_____no_output_____" ], [ "arch = 'resnet50'\nmodel_file = f'{arch}_places365.pth.tar'\nif not os.access(model_file, os.W_OK):\n weight_url = 'http://places2.csail.mit.edu/models_places365/' + model_file\n os.system('wget ' + weight_url)\n\nmodel = models.__dict__[arch](num_classes=365)\ncheckpoint = torch.load(model_file, map_location=lambda storage, loc: storage)\nstate_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}\nmodel.load_state_dict(state_dict)", "_____no_output_____" ], [ "model.eval()\nfor param in model.parameters():\n param.requires_grad = False\n'convert model to evaluation mode with no grad'", "_____no_output_____" ], [ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ], [ "!cp drive/'My Drive'/'Colab Notebooks'/'Automate local TCAV'/'final notebooks'/resized_imgs.pkl /content/\n!cp drive/'My Drive'/'Colab Notebooks'/'Automate local TCAV'/'final notebooks'/masks.pkl /content/\n!cp drive/'My Drive'/'Colab Notebooks'/'Automate local TCAV'/'final notebooks'/classes.pkl /content/", "_____no_output_____" ], [ "import pickle\nwith open('masks.pkl', 'rb') as f:\n masks = pickle.load(f)\nwith open('resized_imgs.pkl', 'rb') as f:\n imgs = pickle.load(f)\nwith open('classes.pkl', 'rb') as f:\n labels = np.array(pickle.load(f))", "_____no_output_____" ], [ "def get_segments(img, mask, threshold = 0.05):\n ade_classes = pd.read_csv('https://raw.githubusercontent.com/CSAILVision/sceneparsing/master/objectInfo150.csv')\n segs = np.unique(mask)\n segments = []\n total = mask.shape[0]*mask.shape[1]\n segments_classes = []\n for seg in segs:\n idxs = mask==seg\n sz = np.sum(idxs)\n if sz < threshold*total:\n continue\n segment = img*idxs[..., None]\n w, h, _ = np.nonzero(segment)\n segment = segment[np.min(w):np.max(w),np.min(h):np.max(h),:]\n segments.append(segment)\n segments_classes.append(ade_classes['Name'].loc[ade_classes['Idx']==seg].iloc[0])\n return segments, segments_classes", "_____no_output_____" ], [ "img_segments = []\nimg_segments_classes = []\nfor img, msk in zip(imgs, masks):\n segss, seg_class = get_segments(np.array(img), msk, threshold = 0.005)\n img_segments_classes.append(seg_class)\n img_segments.append(segss)", "_____no_output_____" ], [ "centre_crop = trn.Compose([\n trn.Resize((256,256)),\n trn.CenterCrop(224),\n trn.ToTensor(),\n # trn.Normalize([0, 0, 0], [255, 255, 225]),\n trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])", "_____no_output_____" ], [ "outputs = []\ndef hook(module, input, output):\n x = nn.AdaptiveAvgPool2d(1)(output)\n outputs.append(x.cpu().numpy().squeeze())\nfull_model = model\nhandle = full_model.layer4[2].register_forward_hook(hook)\nfull_model.to(device)\n'finished the full model with hook attached'", "_____no_output_____" ], [ "chosen_classes = ['street', 'bedroom', 'living_room', 'bathroom', 'kitchen', \n 'skyscraper', 'highway', 'conference_room', 'mountain_snowy', 'office',\n 'corridor', 'airport_terminal', 'attic', 'mountain', 'park', 'coast', \n 'alley','beach', 'childs_room', 'art_gallery','castle', 'dorm_room', \n 'nursery', 'lobby', 'reception', 'bar', 'house', 'bridge', 'classroom']\nnum_classes = len(chosen_classes)\nidxs = []\nc = 0\nfor ccls in chosen_classes:\n idx = np.argwhere(classes == ccls)\n if len(idx) == 0:\n print(f'class {ccls} is not found in places365, so we will use places365 alternate')\n c+=1\n else:\n idxs.append(idx[0][0])", "_____no_output_____" ], [ "idxs = []\nc = 0\nfor ccls in chosen_classes:\n idx = np.argwhere(classes == ccls)\n if len(idx) == 0:\n print(f'class {ccls} is not found in places365, so we will use places365 alternate')\n c+=1\n else:\n idxs.append(idx[0][0])", "_____no_output_____" ], [ "del outputs\noutputs = []\ny_model = []\nfor img in imgs:\n input_img = V(centre_crop(img).unsqueeze(0))\n input_img = input_img.to(device)\n pred = full_model.forward(input_img)\n y_model.append(classes[idxs][np.argmax((pred.cpu().detach().numpy()[:,idxs]))])\noutputs = np.array(outputs)\nimg_vectors = np.copy(outputs)", "_____no_output_____" ], [ "del outputs\noutputs = []\nimg_segmentsid = []\nsegments_classes = []\nfor i, img_seg in enumerate(img_segments):\n img_segmentsid.append((img_segmentsid[-1] if i>0 else 0) + len(img_seg))\n for seg in img_seg:\n img = Image.fromarray(seg, 'RGB')\n input_img = V(centre_crop(img).unsqueeze(0))\n input_img = input_img.to(device)\n pred = full_model.forward(input_img)\n segments_classes.append(classes[idxs][np.argmax((pred.cpu().detach().numpy()[:,idxs]))])\noutputs = np.array(outputs)\nall_vectors = np.copy(outputs)", "_____no_output_____" ], [ "feature_vectors = []\nfor i in range(len(img_segmentsid)):\n feature_vectors.append(all_vectors[(img_segmentsid[i-1] if i>0 else 0):img_segmentsid[i]])", "_____no_output_____" ], [ "segment_img = {}\nc = 0\nfor j, fvec in enumerate(feature_vectors): \n c_old = c\n c += len(fvec) if len(fvec) != 1024 else 1\n for i in range(c_old, c):\n segment_img[i] = j", "_____no_output_____" ], [ "import sklearn.metrics\nfrom sklearn.cluster import KMeans\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ], [ "def cluster_top_k(cluster_id, k = 40):\n instances_ids = np.argwhere(clusters_labels==cluster_id).squeeze()\n instances = X[instances_ids]\n scores = []\n for inst in instances:\n scores.append(-kmeans.score([inst]))\n final_ids = np.argsort(scores)[:k]\n return instances_ids[final_ids]\n\ncluster_instances_id = lambda cid: np.argwhere(clusters_labels==cid).squeeze()\n\ndef is_single_img_clusters(cls_id):\n instances_ids = cluster_instances_id(cls_id)\n # instances_ids = cluster_representatives[cls_id]\n source_imgs = [segment_img[inst_id] for inst_id in instances_ids]\n unique_imgs = len(np.unique(source_imgs))\n return 0 if unique_imgs > 1 else 1", "_____no_output_____" ], [ "img_distances = sklearn.metrics.pairwise.euclidean_distances(img_vectors)", "_____no_output_____" ], [ "print('clustering the dataset ................')\nX = np.array(all_vectors)\nnum_segs = X.shape[0]\nk = int(num_segs**0.5)\nkmeans = KMeans(n_clusters=k, random_state=0).fit(X)\nclusters_labels = kmeans.labels_\nprint('\\ndone')\n\ncluster_names, cluster_counts = np.unique(clusters_labels, return_counts=1)\nprint('removing small clusters ................')\ncount_threshold = int(k*0.4)\nids = cluster_counts > count_threshold\ngood_clusters = cluster_names[ids]\ncluster_counts = cluster_counts[ids]\nprint(f'\\ntotal number of clusters {len(cluster_names)}')\nprint(f'total number of good clusters is {len(good_clusters)}')\nprint('\\ndone')\n\nprint('removing single image clusters ............')\nfinal_clusters = []\nfor j, cluster_id in enumerate(good_clusters):\n if is_single_img_clusters(cluster_id):\n continue\n final_clusters.append(cluster_id)\nfinal_clusters = np.array(final_clusters)\nprint('\\ndone')\n\nprint(\"getting clusters' representatives ............\")\ncluster_representatives = {}\nfor cluster_id in final_clusters:\n cluster_representatives[cluster_id] = cluster_top_k(cluster_id, k=40)\nprint('\\ndone')\n\nprint('building linear models for each concept/cluster ............')\nlinear_models = {}\nmodel_score = {}\nfor cluster in final_clusters:\n positive_instances = cluster_instances_id(cluster)\n negative_instances = np.argwhere(clusters_labels!=cluster).squeeze()\n num_samples = min(len(positive_instances), len(negative_instances))\n selected_pos = np.random.choice(positive_instances, num_samples, replace=False)\n selected_neg = np.random.choice(negative_instances, num_samples, replace=False)\n train_x = np.append(X[selected_pos], X[selected_neg], axis=0)\n train_y = np.array([1] * num_samples + [0] * num_samples)\n train_x, val_x, train_y, val_y = train_test_split(train_x, train_y)\n n_components = min(256, len(train_x))\n pca_model = PCA(n_components=n_components)\n lr_model = LogisticRegression()\n pca_lr_model = Pipeline(steps = [('pca', pca_model), ('lr', lr_model)])\n pca_lr_model.fit(train_x, train_y)\n linear_models[cluster] = pca_lr_model\n model_score[cluster] = pca_lr_model.score(val_x, val_y)\nprint('\\ndone')\n\nprint('removing low scoring clusters based on holdout accuracy ...............\\n')\nc = 0\nfor k, v in list(model_score.items()):\n if v < 0.75:\n c+=1\n print(f'removed cluster {k}')\n linear_models.pop(k, 'None')\n model_score.pop(k, 'None')\nprint(f'\\ndone\\nremoved {c} clusters\\n')\n\nprint('converting feature vectors to binary concept vectors .................')\nfor i, lm in enumerate(linear_models.keys()):\n if i == 0:\n concept_vecs = linear_models[lm].predict(img_vectors)[:, None]\n else:\n concept_vecs = np.concatenate((concept_vecs, linear_models[lm].predict(img_vectors)[:, None]), axis = -1)\nprint(f'\\nconcept vector dimension is\\t {concept_vecs.shape[0]}x{concept_vecs.shape[1]}')\nprint(f'original dimension is\\t\\t {img_vectors.shape[0]}x{img_vectors.shape[1]}')\nprint('\\ndone\\n')", "_____no_output_____" ], [ "random_seed = 0", "_____no_output_____" ], [ "print('training concept tree ..............')\ntrain_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2, random_state=random_seed)\nsurrogate_tree = DecisionTreeClassifier(max_depth = 20, random_state=random_seed)\nsurrogate_tree.fit(train_x, train_y)\ntrain_score = surrogate_tree.score(train_x, train_y)\ntest_score = surrogate_tree.score(val_x, val_y)\nprint(f'train accuracy:\\t {train_score}')\nprint(f'test accuracy:\\t {test_score}')\nprint('\\ndone\\n')", "training concept tree ..............\ntrain accuracy:\t 0.9308719560094265\ntest accuracy:\t 0.774294670846395\n\ndone\n\n" ], [ "print('training concept forest ..............')\ntrain_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2, random_state=random_seed)\nsurrogate_tree = RandomForestClassifier(max_depth = 20, random_state=random_seed)\nsurrogate_tree.fit(train_x, train_y)\ntrain_score = surrogate_tree.score(train_x, train_y)\ntest_score = surrogate_tree.score(val_x, val_y)\nprint(f'train accuracy:\\t {train_score}')\nprint(f'test accuracy:\\t {test_score}')\nprint('\\ndone\\n')", "training concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8181818181818182\n\ndone\n\n" ], [ "print('training tree on original vectors ..............')\ntrain_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2, random_state=random_seed)\nsurrogate_tree = DecisionTreeClassifier(max_depth = 20, random_state=random_seed)\nsurrogate_tree.fit(train_x, train_y)\ntrain_score = surrogate_tree.score(train_x, train_y)\ntest_score = surrogate_tree.score(val_x, val_y)\nprint(f'train accuracy:\\t {train_score}')\nprint(f'test accuracy:\\t {test_score}')\nprint('\\ndone\\n')", "training tree on original vectors ..............\ntrain accuracy:\t 0.8373919874312648\ntest accuracy:\t 0.6990595611285266\n\ndone\n\n" ], [ "print('training forest on original vectors ..............')\ntrain_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2, random_state=random_seed)\nsurrogate_tree = RandomForestClassifier(max_depth = 20, random_state=random_seed)\nsurrogate_tree.fit(train_x, train_y)\ntrain_score = surrogate_tree.score(train_x, train_y)\ntest_score = surrogate_tree.score(val_x, val_y)\nprint(f'train accuracy:\\t {train_score}')\nprint(f'test accuracy:\\t {test_score}')\nprint('\\ndone\\n')", "training forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9216300940438872\n\ndone\n\n" ], [ "trials = 100\ndtc = []\nrfc = []\ndto = []\nrfo = []\nfor i in range(trials):\n print('training concept tree ..............')\n train_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2)\n surrogate_tree = DecisionTreeClassifier(max_depth = 20)\n surrogate_tree.fit(train_x, train_y)\n train_score = surrogate_tree.score(train_x, train_y)\n test_score = surrogate_tree.score(val_x, val_y)\n print(f'train accuracy:\\t {train_score}')\n print(f'test accuracy:\\t {test_score}')\n dtc.append([train_score, test_score])\n print('\\ndone\\n')\n print('training concept forest ..............')\n train_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2)\n surrogate_tree = RandomForestClassifier(max_depth = 20)\n surrogate_tree.fit(train_x, train_y)\n train_score = surrogate_tree.score(train_x, train_y)\n test_score = surrogate_tree.score(val_x, val_y)\n print(f'train accuracy:\\t {train_score}')\n print(f'test accuracy:\\t {test_score}')\n rfc.append([train_score, test_score])\n print('\\ndone\\n')\n print('training tree on original vectors ..............')\n train_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2)\n surrogate_tree = DecisionTreeClassifier(max_depth = 20)\n surrogate_tree.fit(train_x, train_y)\n train_score = surrogate_tree.score(train_x, train_y)\n test_score = surrogate_tree.score(val_x, val_y)\n print(f'train accuracy:\\t {train_score}')\n print(f'test accuracy:\\t {test_score}')\n dto.append([train_score, test_score])\n print('\\ndone\\n')\n print('training forest on original vectors ..............')\n train_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2)\n surrogate_tree = RandomForestClassifier(max_depth = 20)\n surrogate_tree.fit(train_x, train_y)\n train_score = surrogate_tree.score(train_x, train_y)\n test_score = surrogate_tree.score(val_x, val_y)\n print(f'train accuracy:\\t {train_score}')\n print(f'test accuracy:\\t {test_score}')\n rfo.append([train_score, test_score])\n print('\\ndone\\n')", "training concept tree ..............\ntrain accuracy:\t 0.9332285938727416\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.8307210031347962\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8915946582875098\ntest accuracy:\t 0.7554858934169278\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9216300940438872\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9379418695993715\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8845247446975648\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9528672427336999\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.786833855799373\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8350353495679497\ntest accuracy:\t 0.677115987460815\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9467084639498433\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9363707776904949\ntest accuracy:\t 0.6990595611285266\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9717203456402199\ntest accuracy:\t 0.8025078369905956\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9253731343283582\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9278996865203761\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9450117831893166\ntest accuracy:\t 0.7492163009404389\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.974076983503535\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9293008641005499\ntest accuracy:\t 0.7836990595611285\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9489395129615082\ntest accuracy:\t 0.7115987460815048\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8884524744697565\ntest accuracy:\t 0.7084639498432602\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.896551724137931\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9214454045561665\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.7931034482758621\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8279654359780048\ntest accuracy:\t 0.7115987460815048\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.896551724137931\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9245875883739199\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.974076983503535\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9591516103692066\ntest accuracy:\t 0.7586206896551724\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9332285938727416\ntest accuracy:\t 0.7774294670846394\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8213166144200627\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9638648860958366\ntest accuracy:\t 0.8181818181818182\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8871473354231975\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9395129615082483\ntest accuracy:\t 0.7366771159874608\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9002356637863315\ntest accuracy:\t 0.7586206896551724\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9216300940438872\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9308719560094265\ntest accuracy:\t 0.6896551724137931\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9732914375490966\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8656716417910447\ntest accuracy:\t 0.6833855799373041\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9435736677115988\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9041633935585232\ntest accuracy:\t 0.6802507836990596\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8213166144200627\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8782403770620582\ntest accuracy:\t 0.6990595611285266\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9404388714733543\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9591516103692066\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8672427336999214\ntest accuracy:\t 0.6959247648902821\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9489395129615082\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8798114689709348\ntest accuracy:\t 0.7084639498432602\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9402985074626866\ntest accuracy:\t 0.7115987460815048\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.8213166144200627\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9654359780047133\ntest accuracy:\t 0.7523510971786834\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9278996865203761\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9520816967792616\ntest accuracy:\t 0.7241379310344828\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9732914375490966\ntest accuracy:\t 0.8244514106583072\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8672427336999214\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9310344827586207\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9402985074626866\ntest accuracy:\t 0.6896551724137931\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9732914375490966\ntest accuracy:\t 0.8213166144200627\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9002356637863315\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9373040752351097\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9371563236449332\ntest accuracy:\t 0.7053291536050157\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.8307210031347962\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9002356637863315\ntest accuracy:\t 0.6896551724137931\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9310344827586207\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9371563236449332\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8432601880877743\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9065200314218381\ntest accuracy:\t 0.7429467084639498\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9347996857816182\ntest accuracy:\t 0.7429467084639498\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.8119122257053292\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8130400628436764\ntest accuracy:\t 0.6332288401253918\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9404388714733543\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9418695993715632\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8463949843260188\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9245875883739199\ntest accuracy:\t 0.8119122257053292\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9489395129615082\ntest accuracy:\t 0.7492163009404389\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.7879025923016496\ntest accuracy:\t 0.6269592476489029\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9379418695993715\ntest accuracy:\t 0.6677115987460815\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.8181818181818182\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9183032207384132\ntest accuracy:\t 0.768025078369906\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9340141398271798\ntest accuracy:\t 0.7115987460815048\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.7586206896551724\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9230164964650432\ntest accuracy:\t 0.7554858934169278\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9049489395129615\ntest accuracy:\t 0.6959247648902821\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9732914375490966\ntest accuracy:\t 0.8307210031347962\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8318931657501963\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9167321288295365\ntest accuracy:\t 0.7241379310344828\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9732914375490966\ntest accuracy:\t 0.7962382445141066\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9080911233307148\ntest accuracy:\t 0.7523510971786834\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9293008641005499\ntest accuracy:\t 0.6927899686520376\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.786833855799373\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9528672427336999\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9373040752351097\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9269442262372348\ntest accuracy:\t 0.7523510971786834\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9795758051846033\ntest accuracy:\t 0.7836990595611285\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8711704634721131\ntest accuracy:\t 0.6739811912225705\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9310344827586207\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9300864100549883\ntest accuracy:\t 0.7241379310344828\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9732914375490966\ntest accuracy:\t 0.786833855799373\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9654359780047133\ntest accuracy:\t 0.8119122257053292\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9316575019638649\ntest accuracy:\t 0.7241379310344828\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.774294670846395\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8185388845247447\ntest accuracy:\t 0.670846394984326\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9059561128526645\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.950510604870385\ntest accuracy:\t 0.6739811912225705\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.7962382445141066\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8695993715632364\ntest accuracy:\t 0.7366771159874608\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9435736677115988\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9410840534171249\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.8369905956112853\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8766692851531814\ntest accuracy:\t 0.6802507836990596\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.890282131661442\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9630793401413983\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9827179890023566\ntest accuracy:\t 0.7962382445141066\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8664571877454831\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9269442262372348\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.974076983503535\ntest accuracy:\t 0.7962382445141066\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9128043990573449\ntest accuracy:\t 0.7617554858934169\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9216300940438872\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9489395129615082\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.7774294670846394\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9033778476040848\ntest accuracy:\t 0.7147335423197492\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8996865203761756\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9222309505106049\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9795758051846033\ntest accuracy:\t 0.7648902821316614\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.845247446975648\ntest accuracy:\t 0.677115987460815\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.890282131661442\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9112333071484682\ntest accuracy:\t 0.6865203761755486\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.7931034482758621\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.835820895522388\ntest accuracy:\t 0.6300940438871473\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9308719560094265\ntest accuracy:\t 0.7210031347962382\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9277297721916732\ntest accuracy:\t 0.768025078369906\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9059561128526645\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9473684210526315\ntest accuracy:\t 0.7429467084639498\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.7931034482758621\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9512961508248232\ntest accuracy:\t 0.7648902821316614\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9341692789968652\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9347996857816182\ntest accuracy:\t 0.7366771159874608\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.8025078369905956\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9457973291437549\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.896551724137931\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9410840534171249\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.786833855799373\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8892380204241949\ntest accuracy:\t 0.7554858934169278\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9310344827586207\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9450117831893166\ntest accuracy:\t 0.7586206896551724\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8318931657501963\ntest accuracy:\t 0.6363636363636364\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9104477611940298\ntest accuracy:\t 0.6802507836990596\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.8557993730407524\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8970934799685781\ntest accuracy:\t 0.7053291536050157\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9529780564263323\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9355852317360566\ntest accuracy:\t 0.7460815047021944\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9732914375490966\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8790259230164965\ntest accuracy:\t 0.6990595611285266\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9059561128526645\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9261586802827966\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.8338557993730408\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8311076197957581\ntest accuracy:\t 0.6677115987460815\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9373040752351097\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9206598586017282\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9269442262372348\ntest accuracy:\t 0.7617554858934169\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.896551724137931\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9300864100549883\ntest accuracy:\t 0.7147335423197492\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.974076983503535\ntest accuracy:\t 0.8369905956112853\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8695993715632364\ntest accuracy:\t 0.677115987460815\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9544383346425765\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.8087774294670846\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9080911233307148\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9489395129615082\ntest accuracy:\t 0.7836990595611285\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.7774294670846394\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9261586802827966\ntest accuracy:\t 0.7836990595611285\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9059561128526645\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9175176747839748\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.768025078369906\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8743126472898665\ntest accuracy:\t 0.6426332288401254\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.890282131661442\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9583660644147682\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8025078369905956\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9238020424194815\ntest accuracy:\t 0.7899686520376176\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9059561128526645\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9489395129615082\ntest accuracy:\t 0.7210031347962382\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9803613511390417\ntest accuracy:\t 0.8119122257053292\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9654359780047133\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9373040752351097\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9426551453260016\ntest accuracy:\t 0.6990595611285266\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.7617554858934169\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9387274155538099\ntest accuracy:\t 0.7554858934169278\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9238020424194815\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8087774294670846\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9528672427336999\ntest accuracy:\t 0.7460815047021944\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9059561128526645\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9410840534171249\ntest accuracy:\t 0.7492163009404389\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8275862068965517\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9261586802827966\ntest accuracy:\t 0.7429467084639498\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8996865203761756\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.950510604870385\ntest accuracy:\t 0.7492163009404389\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9795758051846033\ntest accuracy:\t 0.786833855799373\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9112333071484682\ntest accuracy:\t 0.7053291536050157\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8934169278996865\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9465828750981933\ntest accuracy:\t 0.7429467084639498\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8397486252945797\ntest accuracy:\t 0.6865203761755486\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9528672427336999\ntest accuracy:\t 0.7147335423197492\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.768025078369906\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8821681068342498\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9467084639498433\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9512961508248232\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8656716417910447\ntest accuracy:\t 0.6520376175548589\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8871473354231975\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9465828750981933\ntest accuracy:\t 0.7366771159874608\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.8119122257053292\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8311076197957581\ntest accuracy:\t 0.664576802507837\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9096622152395916\ntest accuracy:\t 0.7053291536050157\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8244514106583072\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9033778476040848\ntest accuracy:\t 0.7523510971786834\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9135899450117831\ntest accuracy:\t 0.7178683385579937\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.7962382445141066\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8931657501963864\ntest accuracy:\t 0.6739811912225705\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9418695993715632\ntest accuracy:\t 0.7178683385579937\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.7836990595611285\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8876669285153181\ntest accuracy:\t 0.7429467084639498\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9529780564263323\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9544383346425765\ntest accuracy:\t 0.7523510971786834\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.7711598746081505\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9662215239591516\ntest accuracy:\t 0.8025078369905956\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9310344827586207\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9497250589159466\ntest accuracy:\t 0.7492163009404389\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8609583660644148\ntest accuracy:\t 0.7147335423197492\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9300864100549883\ntest accuracy:\t 0.6959247648902821\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.768025078369906\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8499607227022781\ntest accuracy:\t 0.64576802507837\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9560094265514533\ntest accuracy:\t 0.7460815047021944\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.7962382445141066\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9457973291437549\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9310344827586207\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.94344069128044\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.8025078369905956\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8397486252945797\ntest accuracy:\t 0.6802507836990596\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9238020424194815\ntest accuracy:\t 0.7210031347962382\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9025923016496465\ntest accuracy:\t 0.7554858934169278\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9341692789968652\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9426551453260016\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.7899686520376176\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9465828750981933\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9285153181461115\ntest accuracy:\t 0.7115987460815048\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9717203456402199\ntest accuracy:\t 0.7962382445141066\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9222309505106049\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9373040752351097\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9442262372348782\ntest accuracy:\t 0.7084639498432602\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8766692851531814\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9418695993715632\ntest accuracy:\t 0.7115987460815048\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8947368421052632\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9481539670070699\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8790259230164965\ntest accuracy:\t 0.7084639498432602\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9404388714733543\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9222309505106049\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8798114689709348\ntest accuracy:\t 0.7241379310344828\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8871473354231975\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.950510604870385\ntest accuracy:\t 0.6959247648902821\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.786833855799373\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9567949725058916\ntest accuracy:\t 0.8025078369905956\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9347996857816182\ntest accuracy:\t 0.6896551724137931\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9709347996857817\ntest accuracy:\t 0.8338557993730408\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8798114689709348\ntest accuracy:\t 0.6802507836990596\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9278996865203761\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9457973291437549\ntest accuracy:\t 0.6959247648902821\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.7554858934169278\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.826394344069128\ntest accuracy:\t 0.6927899686520376\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8934169278996865\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9567949725058916\ntest accuracy:\t 0.6896551724137931\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8119122257053292\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9465828750981933\ntest accuracy:\t 0.7586206896551724\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9347996857816182\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8181818181818182\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8656716417910447\ntest accuracy:\t 0.6959247648902821\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9261586802827966\ntest accuracy:\t 0.6865203761755486\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8181818181818182\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8507462686567164\ntest accuracy:\t 0.7210031347962382\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9473684210526315\ntest accuracy:\t 0.6927899686520376\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.8307210031347962\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9018067556952082\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9216300940438872\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9300864100549883\ntest accuracy:\t 0.6865203761755486\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.8087774294670846\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9497250589159466\ntest accuracy:\t 0.7774294670846394\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.896551724137931\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9387274155538099\ntest accuracy:\t 0.7366771159874608\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8025078369905956\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8703849175176748\ntest accuracy:\t 0.7586206896551724\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9402985074626866\ntest accuracy:\t 0.7241379310344828\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9795758051846033\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8798114689709348\ntest accuracy:\t 0.7210031347962382\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9341692789968652\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9481539670070699\ntest accuracy:\t 0.677115987460815\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.7711598746081505\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8947368421052632\ntest accuracy:\t 0.7210031347962382\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9575805184603299\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.974076983503535\ntest accuracy:\t 0.8463949843260188\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9387274155538099\ntest accuracy:\t 0.7648902821316614\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.877742946708464\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9528672427336999\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8476040848389631\ntest accuracy:\t 0.6614420062695925\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9253731343283582\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.8056426332288401\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8138256087981147\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9269442262372348\ntest accuracy:\t 0.7084639498432602\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9709347996857817\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8782403770620582\ntest accuracy:\t 0.6896551724137931\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8934169278996865\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9324430479183032\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9795758051846033\ntest accuracy:\t 0.7523510971786834\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9049489395129615\ntest accuracy:\t 0.7460815047021944\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9379418695993715\ntest accuracy:\t 0.6677115987460815\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.7774294670846394\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.779261586802828\ntest accuracy:\t 0.6332288401253918\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9418695993715632\ntest accuracy:\t 0.7523510971786834\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8978790259230165\ntest accuracy:\t 0.799373040752351\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.8871473354231975\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9293008641005499\ntest accuracy:\t 0.7398119122257053\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9748625294579732\ntest accuracy:\t 0.8181818181818182\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9324430479183032\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9090909090909091\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9316575019638649\ntest accuracy:\t 0.7554858934169278\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9787902592301649\ntest accuracy:\t 0.786833855799373\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8491751767478397\ntest accuracy:\t 0.7053291536050157\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9184952978056427\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9183032207384132\ntest accuracy:\t 0.7210031347962382\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9780047132757266\ntest accuracy:\t 0.7931034482758621\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9497250589159466\ntest accuracy:\t 0.7304075235109718\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9153605015673981\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9442262372348782\ntest accuracy:\t 0.7084639498432602\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.7899686520376176\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9395129615082483\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9247648902821317\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9395129615082483\ntest accuracy:\t 0.6739811912225705\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9756480754124116\ntest accuracy:\t 0.8119122257053292\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9410840534171249\ntest accuracy:\t 0.8150470219435737\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9028213166144201\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9175176747839748\ntest accuracy:\t 0.7335423197492164\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.9772191673212883\ntest accuracy:\t 0.8213166144200627\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.9426551453260016\ntest accuracy:\t 0.780564263322884\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\ntraining concept tree ..............\ntrain accuracy:\t 0.9418695993715632\ntest accuracy:\t 0.7272727272727273\n\ndone\n\ntraining concept forest ..............\ntrain accuracy:\t 0.97643362136685\ntest accuracy:\t 0.8181818181818182\n\ndone\n\ntraining tree on original vectors ..............\ntrain accuracy:\t 0.8538884524744698\ntest accuracy:\t 0.7021943573667712\n\ndone\n\ntraining forest on original vectors ..............\ntrain accuracy:\t 1.0\ntest accuracy:\t 0.9122257053291536\n\ndone\n\n" ], [ "print('\\nFor decision tree on concept vectors:')\nprint(f'\\ttrain accuracy: {np.mean(np.array(dtc)[:,0])} +- {np.std(np.array(dtc)[:,0])}')\nprint(f'\\ttest accuracy: {np.mean(np.array(dtc)[:,1])} +- {np.std(np.array(dtc)[:,1])}')\n\nprint('\\nFor random forest on concept vectors:')\nprint(f'\\ttrain accuracy: {np.mean(np.array(rfc)[:,0])} +- {np.std(np.array(rfc)[:,0])}')\nprint(f'\\ttest accuracy: {np.mean(np.array(rfc)[:,1])} +- {np.std(np.array(rfc)[:,1])}')\n\nprint('\\nFor decision tree on original vectors:')\nprint(f'\\ttrain accuracy: {np.mean(np.array(dto)[:,0])} +- {np.std(np.array(dto)[:,0])}')\nprint(f'\\ttest accuracy: {np.mean(np.array(dto)[:,1])} +- {np.std(np.array(dto)[:,1])}')\n\nprint('\\nFor random forest on original vectors:')\nprint(f'\\ttrain accuracy: {np.mean(np.array(rfo)[:,0])} +- {np.std(np.array(rfo)[:,0])}')\nprint(f'\\ttest accuracy: {np.mean(np.array(rfo)[:,1])} +- {np.std(np.array(rfo)[:,1])}')", "\nFor decision tree on concept vectors:\n\ttrain accuracy: 0.9373134328358208 +- 0.012822458697716078\n\ttest accuracy: 0.7214733542319749 +- 0.02454517429387165\n\nFor random forest on concept vectors:\n\ttrain accuracy: 0.9764650432050277 +- 0.002113454418953676\n\ttest accuracy: 0.8025078369905956 +- 0.02133504642905775\n\nFor decision tree on original vectors:\n\ttrain accuracy: 0.8929615082482324 +- 0.043437583077835774\n\ttest accuracy: 0.7262382445141066 +- 0.04636119883122126\n\nFor random forest on original vectors:\n\ttrain accuracy: 1.0 +- 0.0\n\ttest accuracy: 0.915423197492163 +- 0.01642693052106462\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6d928004bec567512c8f1d4461fd23a35d7330
848,633
ipynb
Jupyter Notebook
SEM-EDS/EDS/Image_overlapping_Manual_correction.ipynb
xiaoyanLi629/coffee-ring-effect-method-optimization
92d8b74acbcb428262644255a4b1574438870c02
[ "MIT" ]
null
null
null
SEM-EDS/EDS/Image_overlapping_Manual_correction.ipynb
xiaoyanLi629/coffee-ring-effect-method-optimization
92d8b74acbcb428262644255a4b1574438870c02
[ "MIT" ]
null
null
null
SEM-EDS/EDS/Image_overlapping_Manual_correction.ipynb
xiaoyanLi629/coffee-ring-effect-method-optimization
92d8b74acbcb428262644255a4b1574438870c02
[ "MIT" ]
null
null
null
1,625.733716
395,276
0.958524
[ [ [ "### Load SEM image", "_____no_output_____" ], [ "Import packages", "_____no_output_____" ] ], [ [ "from PIL import Image\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport cv2\nimport copy", "_____no_output_____" ], [ "# from skimage import io\n# from skimage.io import imread, imshow\n# from skimage.filters import threshold_otsu\n# from skimage import color\n# from skimage.color import label2rgb\n# from numpy import percentile\n# from spade.detection_2d import spade2d\n# from spade.shapes.examples import potatoids5x5_smallest4px", "_____no_output_____" ] ], [ [ "## Read image as Pillow image format and convert to gray scale", "_____no_output_____" ], [ "Could use the ndarray imported in previous code and conver RGB to gray scale but need equation (gray = 0.2989 * r + 0.5870 * g + 0.1140 * b)", "_____no_output_____" ] ], [ [ "cmap = copy.copy(plt.cm.get_cmap(\"gray\"))\ncmap.set_bad(color='black')", "_____no_output_____" ], [ "folder = 'Combined_'\n\n# for folder_index in range(67, 68):\nfolder_index = 65\nfolder_name = 'Combined_' + chr(folder_index) + '/'\n# for num in range(10, 11):\nnum = 4\n\nsem_name = folder_name + 'SEM_' + str(num) + '.jpg'\nimage_name = folder_name + 'Image_' + str(num) + '.jpg'\nphoto_pattern_noise_removal = folder_name + 'Photo_pattern_' + str(num) + '.jpg'\n\nSEM_gray = Image.open(sem_name).convert('LA')\nphoto_gray = Image.open(image_name).convert('LA')\n\n# print('SEM_gray type:', type(SEM_gray))\n# print('SEM_gray size:', SEM_gray.size)\n\n# print('photo_gray:', type(photo_gray))\n# print('photo_gray:', photo_gray.size)\n\n# The converted ndarray from gray scale image will produce one extra layer\nSEM_gray_array = np.array(SEM_gray)[:, :, 0]\nphoto_gray_array = np.array(photo_gray)[:, :, 0]\n\n# np.rot90: counter clock wise\nphoto_gray_array = np.rot90(photo_gray_array)\nphoto_gray_array = np.rot90(photo_gray_array)\nphoto_gray_array = np.rot90(photo_gray_array)\n\n# # Crop the pattern from image\ntop_row = 370\nbot_row = 1000\n\nleft_col = 260\nright_col = 720\n\n# photo_gray_array_crop = photo_gray_array[330:920, 180:740]\nphoto_gray_array_crop = photo_gray_array[top_row:bot_row, left_col:right_col]\n# # Show SEM image with photo cropped in gray\n# plt.subplot(121)\n# plt.imshow(SEM_gray_array, cmap = cmap)\n# plt.subplot(122)\n# plt.imshow(photo_gray_array_crop, cmap = cmap)\n# plt.subplots_adjust(bottom=0.5, right=2, top=2)\n\n# print('SEM_gray_array:', SEM_gray_array.shape)\n# print('photo_gray_array_crop:', photo_gray_array_crop.shape)\n# # The image hasn't been resized\n\n# Resize the sem and image to overlap\n# Resizing ratio: photo image needs to be resized to match SEM image\nwidth = 2.72\nheight = 2.66\n\nSEM_gray_array_resize = cv2.resize(SEM_gray_array, dsize=(640, 480), interpolation=cv2.INTER_NEAREST)\nphoto_gray_array_crop_resize = cv2.resize(photo_gray_array_crop, dsize=(int((right_col-left_col)/4*width), int((bot_row-top_row)/4*height)), interpolation=cv2.INTER_NEAREST)\n\n# print('SEM_gray_array_resize type:', type(SEM_gray_array_resize))\n# print('photo_gray_array_crop_resize:', type(photo_gray_array_crop_resize))\n\n# # Show the SEM gray resized and photo gray ropped resized image\n\n# plt.subplot(121)\n# plt.imshow(SEM_gray_array_resize, cmap = cmap)\n# plt.subplot(122)\n# plt.imshow(photo_gray_array_crop_resize, cmap = cmap)\n# plt.subplots_adjust(bottom=0.5, right=2, top=2)\n\n# print('SEM_gray_array_resize size:', SEM_gray_array_resize.shape)\n# print('photo_gray_array_crop_resize size:', photo_gray_array_crop_resize.shape)\n\n# Convert sem and image to BW to overlap\nSEM_gray_array_resize_bw = np.where(SEM_gray_array_resize > 80, 1, 0)\nphoto_gray_array_crop_resize_bw = np.where(photo_gray_array_crop_resize > 40, 1, 0)\n\n# plt.subplot(121)\n# plt.imshow(SEM_gray_array_resize_bw, cmap = cmap)\n# plt.subplot(122)\n# plt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap)\n\n# plt.subplots_adjust(bottom=0.5, right=2, top=2)\n\n# print('SEM_gray_array_resize_bw type:', type(SEM_gray_array_resize_bw))\n# print('SEM_gray_array_resize_bw size:', SEM_gray_array_resize_bw.shape)\n# print('photo_gray_array_crop_resize_bw type:', type(photo_gray_array_crop_resize_bw))\n# print('photo_gray_array_crop_resize_bw size:', photo_gray_array_crop_resize_bw.shape)\n\n# Remove noise of the SEM and Image file\n\nplt.subplot(121)\nplt.imshow(SEM_gray_array_resize_bw, cmap = cmap)\nplt.subplot(122)\nplt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap)\nplt.subplots_adjust(bottom=0.5, right=2, top=2)\n\n\n\n# Find the overlapping location", "_____no_output_____" ], [ "SEM_gray_array_resize_bw = 1 - SEM_gray_array_resize_bw\nplt.subplot(121)\nplt.imshow(SEM_gray_array_resize_bw, cmap = cmap)\nplt.subplot(122)\nplt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap)\nplt.subplots_adjust(bottom=0.5, right=2, top=2)", "_____no_output_____" ], [ "# diff = float('inf')\n# result = (0, 0)\n# data = []\n# count = 1\n# for i in range(0, SEM_gray_array_resize_bw.shape[0]-photo_gray_array_crop_resize_bw.shape[0], 1):\n# for j in range(0, SEM_gray_array_resize_bw.shape[1]-photo_gray_array_crop_resize_bw.shape[1], 1):\n\n# temp = np.array(SEM_gray_array_resize_bw)\n\n# temp[i:i+photo_gray_array_crop_resize_bw.shape[0], j:j+photo_gray_array_crop_resize_bw.shape[1]] = (\n# temp[i:i+photo_gray_array_crop_resize_bw.shape[0], j:j+photo_gray_array_crop_resize_bw.shape[1]] - \n# photo_gray_array_crop_resize_bw)\n\n# temp_diff = np.trace(temp @ temp.transpose())\n# data.append(temp_diff)\n# if count % 1000 == 0:\n# print('count:', count, 'row:', i, 'col:', j)\n# count = count + 1\n\n# if temp_diff < diff:\n# diff = temp_diff\n# result = i, j\n# # print('number of test:', len(data))\n# # print('Minimum of difference:', min(data))\n# # plt.plot(data)", "_____no_output_____" ], [ "result = [7, 150]\nrow = result[0]\ncol = result[1]\n\n# Show the SEM BW, image cropped BW, Combination of SEM and image cropped\n# sem_photo_combine = np.array(SEM_gray_array_resize_bw)\n# sem_photo_combine[result[0]:result[0]+photo_gray_array_crop_resize_bw.shape[0], result[1]:result[1]+photo_gray_array_crop_resize_bw.shape[1]] = photo_gray_array_crop_resize_bw\n\n# ax1 = plt.subplot(131)\n# plt.imshow(SEM_gray_array_resize_bw, cmap = cmap)\n# ax1.set_title(\"SEM black-white resized image\")\n\n# ax2 = plt.subplot(132)\n# plt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap)\n# ax2.set_title(\"Photo cropped black white resized \")\n\n# ax3 = plt.subplot(133)\n# plt.imshow(sem_photo_combine, cmap = cmap)\n# ax3.set_title(\"Combination of SEM and photo image\")\n\n# plt.subplots_adjust(bottom=0, right=2.5, top=2)\n\n# Creating the photo pattern 3D image\nSEM = Image.open(sem_name)\nphoto = Image.open(image_name)\n\nSEM_array = np.array(SEM)\nphoto_array = np.array(photo)\n\n# Crop the pattern in photo image and resize\nphoto_array = np.rot90(photo_array)\nphoto_array = np.rot90(photo_array)\nphoto_array = np.rot90(photo_array)\n\nphoto_array_crop = photo_array[top_row:bot_row, left_col:right_col, :]\n\nphoto_array_crop_resize = cv2.resize(photo_array_crop, dsize=(int((right_col-left_col)*width), int((bot_row-top_row)*height)), interpolation=cv2.INTER_NEAREST)\n\n# ax = plt.subplot(111)\n# plt.imshow(photo_array_crop_resize)\n# ax.set_title(\"Photo cropped resized 1600*1600\")\n\n# plt.subplots_adjust(bottom=0, right=2.5, top=2)\n# Insert photo pattern data into created photo_pattern_match image\nphoto_pattern_match = np.zeros(SEM_array.shape)\nphoto_pattern_match[row*4:row*4+photo_array_crop_resize.shape[0], col*4:col*4+photo_array_crop_resize.shape[1], :] = photo_array_crop_resize\nphoto_pattern_match = photo_pattern_match.astype(np.uint8)\nplt.imshow(photo_pattern_match)\n\n\n\n# plt.imshow(photo_pattern_match)\n\n# plt.subplots_adjust(bottom=0, right=2.5, top=2)\n\n# Show the SEM, photo pattern match and combined SEM and photo pattern match image\n# combine_sem_photo_pattern_match = 0.5 * SEM_array + 0.5 * photo_pattern_match\n\n# combine_sem_photo_pattern_match = combine_sem_photo_pattern_match.astype(np.uint8)\n\n# ax1 = plt.subplot(131)\n# plt.imshow(SEM_array)\n# ax1.set_title(\"SEM image\")\n\n# ax2 = plt.subplot(132)\n# plt.imshow(photo_pattern_match)\n# ax2.set_title(\"Photo pattern match image\")\n# # This image is created by cropping the droplet pattern (800*800) from photograph image\n# # and placed at the location as SEM image\n# # The cropped image was resized to 1600*1600 to fit the size\n# # The background are 0\n\n# ax3 = plt.subplot(133)\n# plt.imshow(combine_sem_photo_pattern_match, cmap = cmap)\n# ax3.set_title(\"Combination of SEM and photo image\")\n\n# plt.subplots_adjust(bottom=0, right=2.5, top=2)\n\nfactor = 3\nphoto_pattern_match_noise_remove = cv2.medianBlur(photo_pattern_match, factor)\n\n# ax1 = plt.subplot(121)\n# plt.imshow(photo_pattern_match)\n# ax1.set_title(\"Photo pattern match image\")\n\n# ax2 = plt.subplot(122)\n# plt.imshow(photo_pattern_match_noise_remove)\n# ax2.set_title(\"Photo pattern noise Medium filter removed match image\")\n\n# plt.subplots_adjust(bottom=0, right=2.5, top=2)\nImage.fromarray(photo_pattern_match_noise_remove).save(photo_pattern_noise_removal)\nprint('Finish sample ', str(num), ' in folder ', chr(folder_index))\n\n# photo_array_crop_resize = cv2.resize(photo_array_crop, dsize=(int((right_col-left_col)/4*width), int((bot_row-top_row)/4*height)), interpolation=cv2.INTER_NEAREST)\n# print(photo_array_crop_resize.shape)\n# print(photo_array_crop.shape)\n# print(590/4*2.66, 560/4*2.72)\n\n# print(photo_pattern_match.shape)\n# print(row, col)\n# print(photo_array_crop_resize.shape)\n# print(2560 - col*4)\n\n# print(SEM_gray_array_resize_bw.shape, photo_gray_array_crop_resize_bw.shape)", "Finish sample 4 in folder A\n" ] ], [ [ "#### Convert gray image to black and white to overlap", "_____no_output_____" ], [ "### Noise removal for EDS image", "_____no_output_____" ] ], [ [ "# factor = 3\n# Cl = cv2.imread('Cl Kα1.png')\n# # use INTER_NEAREST method to resize sem-eds image\n# # temp = np.zeros(Cl.shape)\n# # temp[:, :, 0] = cv2.resize(Cl[:, :, 0], dsize=(2560, 1920), interpolation=cv2.INTER_NEAREST)\n# # temp[:, :, 1] = cv2.resize(Cl[:, :, 1], dsize=(2560, 1920), interpolation=cv2.INTER_NEAREST)\n# # temp[:, :, 2] = cv2.resize(Cl[:, :, 2], dsize=(2560, 1920), interpolation=cv2.INTER_NEAREST)\n# # Cl = temp\n\n# Cl_noise_remove = cv2.medianBlur(Cl, factor)\n\n# ax1 = plt.subplot(121)\n# plt.imshow(Cl, cmap=cmap)\n# ax1.set_title(\"Cl EDS image\")\n\n# ax2 = plt.subplot(122)\n# plt.imshow(Cl_noise_remove, cmap=cmap)\n# ax2.set_title(\"Cl EDS median noise removal image\")\n\n# plt.subplots_adjust(bottom=0, right=2.5, top=2)", "_____no_output_____" ], [ "# Cl_denoised = np.zeros(Cl.shape)\n\n# layer_0 = Cl[:, :, 0]\n# layer_1 = Cl[:, :, 1]\n# layer_2 = Cl[:, :, 2]\n\n\n# u, s, vh = np.linalg.svd(layer_0, full_matrices=False)\n# s_cleaned = np.diag(np.array([si if si > 60 else 0 for si in s]))\n# layer_0_denoised = np.array(np.matmul(np.matmul(u, s_cleaned), vh), dtype=int)\n# Cl_denoised[:, :, 0] = layer_0_denoised\n\n# u, s, vh = np.linalg.svd(layer_1, full_matrices=False)\n# s_cleaned = np.diag(np.array([si if si > 60 else 0 for si in s]))\n# layer_1_denoised = np.array(np.matmul(np.matmul(u, s_cleaned), vh), dtype=int)\n# Cl_denoised[:, :, 1] = layer_1_denoised\n\n# u, s, vh = np.linalg.svd(layer_2, full_matrices=False)\n# s_cleaned = np.diag(np.array([si if si > 60 else 0 for si in s]))\n# layer_2_denoised = np.array(np.matmul(np.matmul(u, s_cleaned), vh), dtype=int)\n# Cl_denoised[:, :, 2] = layer_2_denoised\n\n# Cl_denoised = Cl_denoised.astype(np.uint8)\n\n# ax1 = plt.subplot(121)\n# plt.imshow(Cl, cmap = cmap)\n# ax1.set_title(\"Cl EDS image\")\n\n# ax2 = plt.subplot(122)\n# plt.imshow(Cl_denoised)\n# ax2.set_title(\"Cl noise SVD removed match image\")\n\n# plt.subplots_adjust(bottom=0, right=2.5, top=2)", "_____no_output_____" ], [ "print('process completed')", "process completed\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
4a6d9df7abd785309faa5b75a4f78ed692ba98cf
15,695
ipynb
Jupyter Notebook
zufall/mat/aufgaben13.ipynb
HBOMAT/AglaUndZufall
3976fecf024a5e4e771d37a6b8056ca4f7eb0da1
[ "Apache-2.0" ]
null
null
null
zufall/mat/aufgaben13.ipynb
HBOMAT/AglaUndZufall
3976fecf024a5e4e771d37a6b8056ca4f7eb0da1
[ "Apache-2.0" ]
null
null
null
zufall/mat/aufgaben13.ipynb
HBOMAT/AglaUndZufall
3976fecf024a5e4e771d37a6b8056ca4f7eb0da1
[ "Apache-2.0" ]
null
null
null
23.080882
934
0.468939
[ [ [ "# Materialien zu <i>zufall</i>\n\nAutor: Holger Böttcher - [email protected]\n\n## Aufgaben 13 - Simulation (Probleme von Leibniz <br>und de Méré)", "_____no_output_____" ], [ "<br>\n### Problem von Leibniz\nLeibniz nahm fälschlicherweise an, dass beim Werfen von 2 Würfeln die Augensumme<br>\n11 genau so oft auftritt wie die Augensumme 12<br>\n", "_____no_output_____" ] ], [ [ "%run zufall\\start", "_____no_output_____" ] ], [ [ "Die <b>exakten Wahrscheinlichkeiten</b> können z.B. so ermittellt werden", "_____no_output_____" ] ], [ [ "W2 = Würfel(2)", "Erzeugung eines ZufallsGröße-Objektes 'AugenSumme'\n" ], [ "p11 = W2.P(11); p11, W2.P(11, d=4)", "_____no_output_____" ], [ "p12 = W2.P(12); p12, W2.P(12, d=4)", "_____no_output_____" ] ], [ [ "Zur <b>Simulation</b> wird zunächst eine kleine Versuchsanzahl angenommen", "_____no_output_____" ] ], [ [ "n = 10", "_____no_output_____" ] ], [ [ "Einmaliges Werfen von zwei Würfeln wird so simuliert (die Funktion <i>zuf_zahl</i> <br>\nliefert hier zwei Zahlen, die jeweils dem Ergebnis eines Würfels entsprechen)\n</div>", "_____no_output_____" ] ], [ [ "zuf_zahl((1, 6), (1, 6)) # Anweisung mehrfach ausführen", "_____no_output_____" ] ], [ [ "$n$-maliges Werfen entsprechend", "_____no_output_____" ] ], [ [ "sim = zuf_zahl((1, 6), (1, 6), n); sim", "_____no_output_____" ] ], [ [ "Für jeden Wurf wird die Augensumme ermittelt", "_____no_output_____" ] ], [ [ "sim1 = [summe(x) for x in sim]; sim1", "_____no_output_____" ] ], [ [ "gezählt, wie oft 11 bzw. 12 auftritt und die entprechenden relativen Häufigkeiten<br>\nberechnet", "_____no_output_____" ] ], [ [ "anz11 = anzahl(11)(sim1); h11 = anz11 / n \nanz12 = anzahl(12)(sim1); h12 = anz12 / n", "_____no_output_____" ], [ "anz11, h11", "_____no_output_____" ], [ "anz12, h12", "_____no_output_____" ] ], [ [ "Zur Simulation mit großem $n$ können die obigen Anweisungen wiederholt werden, <br>\nnachdem $n$ auf den gewünschten Wert gesetzt wurde (die langen Ausgaben sind zu<br>\nunterdrücken)<br><br>\nHier werden sie zur bequemeren Handhabung in eine Prozedur geschrieben, wobei <br>\nauch die exakten Werte angegeben werden", "_____no_output_____" ] ], [ [ "def simulation1(n):\n sim = zuf_zahl((1, 6), (1, 6), n)\n sim = [summe(x) for x in sim]\n anz11 = anzahl(11)(sim)\n h11 = anz11 / n \n anz12 = anzahl(12)(sim)\n h12 = anz12 / n\n print('11: ' +str(N(h11, 6)) + ' exakt ' + str(N(p11, 6)))\n print('12: ' +str(N(h12, 6)) + ' exakt ' + str(N(p12, 6)))", "_____no_output_____" ], [ "simulation1(10000) # Anweisung mehrfach ausführen, auch mit größerem n", "11: 0.0546000 exakt 0.0555556\n12: 0.0258000 exakt 0.0277778\n" ] ], [ [ "<br>\n### Problem von de Méré\nEr glaubte, dass man bei 4-maligem Werfen eines Würfels ebenso oft eine 6 erhält<br>\nwie eine Doppelsechs bei 24 Würfen mit 2 Würfeln (die Annahme ist falsch)\n<br>", "_____no_output_____" ], [ "Die <b>exakten Werte</b> sind folgende<br>\n$P(\\text{mindestens eine Sechs}) = 1 - P(\\text{keine Sechs}) = 1-\\dfrac{5^4}{6^4} \\approx 0.518\\qquad$ <br>\nbeim 4-maligen Werfen eines Würfels <br>\n\n$P(\\text{mindestens eine Doppelsechs}) = 1 - P(\\text{keine Doppelsechs}) = 1-\\dfrac{35^{24}}{36^{24}} \\approx 0.491\\qquad$ <br>\nbeim 24-maligen Werfen von zwei Würfeln\n<br><br><br>\nZur <b>Simulation</b> wird zunächst ein kleiner Wert für $n$ angenommen und <br>\ndie Simulation entworfen", "_____no_output_____" ] ], [ [ "n = 5", "_____no_output_____" ] ], [ [ "### 1. \n4-maliges Werfen eines Würfels</b> und $n$ solche Versuche werden so simuliert", "_____no_output_____" ] ], [ [ "zuf_zahl((1, 6), 4)", "_____no_output_____" ], [ "sim = [ zuf_zahl((1, 6), 4) for i in range(n) ]; sim", "_____no_output_____" ] ], [ [ "Ermittlung der Anzahl von Versuchen, bei denen mindestens eine 6 aufgetreten ist<br>\nsowie der relativen Häufigkeit", "_____no_output_____" ] ], [ [ "sim1 = [ x for x in sim if anzahl(6)(x) > 0 ]; sim1 ", "_____no_output_____" ], [ "anzahl(sim1) / n ", "_____no_output_____" ] ], [ [ "Die Anweisungen als Prozedur für große $n$", "_____no_output_____" ] ], [ [ "def simulation2(n):\n sim = [ zuf_zahl((1, 6), 4) for i in range(n) ]\n sim = [ x for x in sim if anzahl(6)(x) > 0 ]\n print('4-mal 1 Würfel ' + str(N(anzahl(sim)/n, 6)) + ' exakt ' \\\n + str(N(1-5**4/6^4, 6)))", "_____no_output_____" ], [ "simulation2(10000)", "4-mal 1 Wuerfel 0.511600 exakt 0.517747\n" ] ], [ [ "### 2. \n24-maliges Werfen von 2 Würfeln</b> ", "_____no_output_____" ] ], [ [ "w24 = zuf_zahl((1, 6), (1, 6), 24); w24", "_____no_output_____" ], [ "anzahl([x for x in w24 if summe(x) == 12])", "_____no_output_____" ] ], [ [ "Prozedur für $n$ Versuche ", "_____no_output_____" ] ], [ [ "def simulation3(n):\n sim = [zuf_zahl((1, 6), (1, 6), 24) for i in range(n)]\n sim = [anzahl([x for x in y if summe(x) == 12]) for y in sim]\n anz = anzahl([x for x in sim if x > 0])\n print('24-mal 2 Würfel ' + str(N(anz/n, 6)) + ' exakt ' + str(N(1-35^24/36^24, 6)))", "_____no_output_____" ], [ "simulation3(10000)", "24-mal 2 Wuerfel 0.493500 exakt 0.491404\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6d9e3fd7949190d759321890f728c6a51330ad
30,522
ipynb
Jupyter Notebook
09-Photutils/photutils_overview.ipynb
eteq/astropy-workshop
2f1c04fb3fa0e5772a181ebe5e666cf805abc268
[ "BSD-3-Clause" ]
1
2019-12-10T19:45:03.000Z
2019-12-10T19:45:03.000Z
09-Photutils/photutils_overview.ipynb
eteq/astropy-workshop
2f1c04fb3fa0e5772a181ebe5e666cf805abc268
[ "BSD-3-Clause" ]
null
null
null
09-Photutils/photutils_overview.ipynb
eteq/astropy-workshop
2f1c04fb3fa0e5772a181ebe5e666cf805abc268
[ "BSD-3-Clause" ]
2
2019-09-30T01:37:34.000Z
2019-10-31T18:19:54.000Z
23.105223
214
0.557631
[ [ [ "<img src=\"data/photutils_banner.svg\">", "_____no_output_____" ], [ "## Photutils\n\n- Code: https://github.com/astropy/photutils\n- Documentation: http://photutils.readthedocs.org/en/stable/\n- Issue Tracker: https://github.com/astropy/photutils/issues", "_____no_output_____" ], [ "## Photutils Overview\n\n- Background and background noise estimation\n- Source Detection and Extraction\n - DAOFIND and IRAF's starfind\n - **Image segmentation**\n - local peak finder\n- **Aperture photometry**\n- PSF photometry\n- PSF matching\n- Centroids\n- Morphological properties\n- Elliptical isophote analysis\n\n", "_____no_output_____" ], [ "## Preliminaries", "_____no_output_____" ] ], [ [ "# initial imports\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# change some default plotting parameters\nimport matplotlib as mpl\nmpl.rcParams['image.origin'] = 'lower'\nmpl.rcParams['image.interpolation'] = 'nearest'\nmpl.rcParams['image.cmap'] = 'viridis'\n\n# Run the %matplotlib magic command to enable inline plotting\n# in the current notebook. Choose one of these:\n%matplotlib inline\n# %matplotlib notebook", "_____no_output_____" ] ], [ [ "### Load the data", "_____no_output_____" ], [ "We'll start by reading data and error arrays from FITS files. These are cutouts from the HST Extreme-Deep Field (XDF) taken with WFC3/IR in the F160W filter.", "_____no_output_____" ] ], [ [ "from astropy.io import fits\nsci_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_sci.fits'\nrms_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_rms.fits'\nsci_hdulist = fits.open(sci_fn)\nrms_hdulist = fits.open(rms_fn)\n\nsci_hdulist[0].header['BUNIT'] = 'electron/s'", "_____no_output_____" ] ], [ [ "Print some info about the data.", "_____no_output_____" ] ], [ [ "sci_hdulist.info()", "_____no_output_____" ] ], [ [ "Define the data and error arrays.", "_____no_output_____" ] ], [ [ "data = sci_hdulist[0].data.astype(np.float)\nerror = rms_hdulist[0].data.astype(np.float)", "_____no_output_____" ] ], [ [ "Extract the data header and create a WCS object.", "_____no_output_____" ] ], [ [ "from astropy.wcs import WCS\n\nhdr = sci_hdulist[0].header\nwcs = WCS(hdr)", "_____no_output_____" ] ], [ [ "Display the data.", "_____no_output_____" ] ], [ [ "from astropy.visualization import simple_norm\nnorm = simple_norm(data, 'sqrt', percent=99.5)\nplt.imshow(data, norm=norm)\nplt.title('XDF F160W Cutout')", "_____no_output_____" ] ], [ [ "## Part 1: Aperture Photometry", "_____no_output_____" ], [ "Photutils provides circular, elliptical, and rectangular aperture shapes (plus annulus versions of each). These are names of the aperture classes, defined in pixel coordinates:\n\n* `CircularAperture`\n* `CircularAnnulus`\n\n* `EllipticalAperture`\n* `EllipticalAnnulus`\n\n* `RectangularAperture`\n* `RectangularAnnulus`\n\nAlong with variants of each, defined in celestial coordinates:\n\n* `SkyCircularAperture`\n* `SkyCircularAnnulus`\n\n* `SkyEllipticalAperture`\n* `SkyEllipticalAnnulus`\n\n* `SkyRectangularAperture`\n* `SkyRectangularAnnulus`", "_____no_output_____" ], [ "## Methods for handling aperture/pixel intersection", "_____no_output_____" ], [ "In general, the apertures will only partially overlap some of the pixels in the data.\n\nThere are three methods for handling the aperture overlap with the pixel grid of the data array.", "_____no_output_____" ], [ "<img src=\"data/photutils_aperture_methods.svg\">", "_____no_output_____" ], [ "NOTE: the `subpixels` keyword is ignored for the **'exact'** and **'center'** methods.", "_____no_output_____" ], [ "### Perform circular-aperture photometry on some sources in the XDF", "_____no_output_____" ], [ "First, we define a circular aperture at a given position and radius (in pixels).", "_____no_output_____" ] ], [ [ "from photutils import CircularAperture\n\nposition = (90.73, 59.43) # (x, y) pixel position\nradius = 5. # pixels\naperture = CircularAperture(position, r=radius)", "_____no_output_____" ], [ "aperture", "_____no_output_____" ], [ "print(aperture)", "_____no_output_____" ] ], [ [ "We can plot the aperture on the data using the aperture `plot()` method:", "_____no_output_____" ] ], [ [ "plt.imshow(data, norm=norm)\naperture.plot(color='red', lw=2)", "_____no_output_____" ] ], [ [ "Now let's perform photometry on the data using the `aperture_photometry()` function. **The default aperture method is 'exact'.**\n\nAlso note that the input data is assumed to have zero background. If that is not the case, please see the documentation for the `photutils.background` subpackage for tools to help subtract the background.\n\nSee the `photutils_local_background.ipynb` notebook for examples of local background subtraction.\n\nThe background was already subtracted for our XDF example data.", "_____no_output_____" ] ], [ [ "from photutils import aperture_photometry\n\nphot = aperture_photometry(data, aperture)\nphot", "_____no_output_____" ] ], [ [ "The output is an Astropy `QTable` (Quantity Table) with sum of data values within the aperture (using the defined pixel overlap method).\n\nThe table also contains metadata, which is accessed by the `meta` attribute of the table. The metadata is stored as a python (ordered) dictionary:", "_____no_output_____" ] ], [ [ "phot.meta", "_____no_output_____" ], [ "phot.meta['version']", "_____no_output_____" ] ], [ [ "Aperture photometry using the **'center'** method gives a slightly different (and less accurate) answer:", "_____no_output_____" ] ], [ [ "phot = aperture_photometry(data, aperture, method='center')\nphot", "_____no_output_____" ] ], [ [ "Now perform aperture photometry using the **'subpixel'** method with `subpixels=5`:\n\nThese parameters are equivalent to SExtractor aperture photometry.", "_____no_output_____" ] ], [ [ "phot = aperture_photometry(data, aperture, method='subpixel', subpixels=5)\nphot", "_____no_output_____" ] ], [ [ "## Photometric Errors", "_____no_output_____" ], [ "We can also input an error array to get the photometric errors.", "_____no_output_____" ] ], [ [ "phot = aperture_photometry(data, aperture, error=error)\nphot", "_____no_output_____" ] ], [ [ "The error array in our XDF FITS file represents only the background error. If we want to include the Poisson error of the source we need to calculate the **total** error:\n\n$\\sigma_{\\mathrm{tot}} = \\sqrt{\\sigma_{\\mathrm{b}}^2 +\n \\frac{I}{g}}$\n \nwhere $\\sigma_{\\mathrm{b}}$ is the background-only error,\n$I$ are the data values, and $g$ is the \"effective gain\".\n\nThe \"effective gain\" is the value (or an array if it's variable across an image) needed to convert the data image to count units (e.g. electrons or photons), where Poisson statistics apply.\n\nPhotutils provides a `calc_total_error()` function to perform this calculation.", "_____no_output_____" ] ], [ [ "# this time include the Poisson error of the source\nfrom photutils.utils import calc_total_error\n\n# our data array is in units of e-/s\n# so the \"effective gain\" should be the exposure time\neff_gain = hdr['TEXPTIME']\ntot_error = calc_total_error(data, error, eff_gain)\n\nphot = aperture_photometry(data, aperture, error=tot_error)\nphot", "_____no_output_____" ] ], [ [ "The total error increased only slightly because this is a small faint source.", "_____no_output_____" ], [ "## Units", "_____no_output_____" ], [ "We can also input the data (and error) units via the `unit` keyword.", "_____no_output_____" ] ], [ [ "# input the data units\nimport astropy.units as u\n\nunit = u.electron / u.s\nphot = aperture_photometry(data, aperture, error=tot_error, unit=unit)\nphot", "_____no_output_____" ], [ "phot['aperture_sum']", "_____no_output_____" ] ], [ [ "Instead of inputting units via the units keyword, `Quantity` inputs for data and error are also allowed.", "_____no_output_____" ] ], [ [ "phot = aperture_photometry(data * unit, aperture, error=tot_error * u.adu)\nphot", "_____no_output_____" ] ], [ [ "The `unit` will not override the data or error unit.", "_____no_output_____" ] ], [ [ "phot = aperture_photometry(data * unit, aperture, error=tot_error * u.adu, unit=u.photon)\nphot", "_____no_output_____" ] ], [ [ "## Performing aperture photometry at multiple positions", "_____no_output_____" ], [ "Now let's perform aperture photometry for three sources (all with the same aperture size). We simply define three (x, y) positions.", "_____no_output_____" ] ], [ [ "positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]\nradius = 5.\napertures = CircularAperture(positions, r=radius)", "_____no_output_____" ] ], [ [ "Let's plot these three apertures on the data.", "_____no_output_____" ] ], [ [ "plt.imshow(data, norm=norm)\napertures.plot(color='red', lw=2)", "_____no_output_____" ] ], [ [ "Now let's perform aperture photometry.", "_____no_output_____" ] ], [ [ "phot = aperture_photometry(data, apertures, error=tot_error, unit=unit)\nphot", "_____no_output_____" ] ], [ [ "Each source is a row in the table and is given a unique **id** (the first column).", "_____no_output_____" ], [ "## Adding columns to the photometry table", "_____no_output_____" ], [ "We can add columns to the photometry table. Let's calculate the signal-to-noise (SNR) ratio of our sources and add it as a new column to the table.", "_____no_output_____" ] ], [ [ "snr = phot['aperture_sum'] / phot['aperture_sum_err'] # units will cancel\n\nphot['snr'] = snr\nphot", "_____no_output_____" ] ], [ [ "Now calculate the F160W AB magnitude and add it to the table.", "_____no_output_____" ] ], [ [ "f160w_zpt = 25.9463\n\n# NOTE that the log10() function can be applied only to dimensionless quantities\n# so we use the value() method to get the number value of the aperture sum\nabmag = -2.5 * np.log10(phot['aperture_sum'].value) + f160w_zpt\n\nphot['abmag'] = abmag\nphot", "_____no_output_____" ] ], [ [ "Now, using the WCS defined above, calculate the sky coordinates for these objects and add it to the table.", "_____no_output_____" ] ], [ [ "from astropy.wcs.utils import pixel_to_skycoord\n\n# convert pixel positions to sky coordinates\nx, y = np.transpose(positions)\ncoord = pixel_to_skycoord(x, y, wcs)\n\n# we can add the astropy SkyCoord object directly to the table\nphot['sky coord'] = coord\nphot", "_____no_output_____" ] ], [ [ "We can also add separate RA and Dec columns, if preferred.", "_____no_output_____" ] ], [ [ "phot['ra_icrs'] = coord.icrs.ra\nphot['dec_icrs'] = coord.icrs.dec\nphot", "_____no_output_____" ] ], [ [ "If we write the table to an ASCII file using the ECSV format we can read it back in preserving all of the units, metadata, and SkyCoord objects.", "_____no_output_____" ] ], [ [ "phot.write('my_photometry.txt', format='ascii.ecsv')", "_____no_output_____" ], [ "# view the table on disk\n!cat my_photometry.txt", "_____no_output_____" ] ], [ [ "Now read the table in ECSV format.", "_____no_output_____" ] ], [ [ "from astropy.table import QTable\ntbl = QTable.read('my_photometry.txt', format='ascii.ecsv')\ntbl", "_____no_output_____" ], [ "tbl.meta", "_____no_output_____" ], [ "tbl['aperture_sum'] # Quantity array", "_____no_output_____" ], [ "tbl['sky coord'] # SkyCoord array", "_____no_output_____" ] ], [ [ "## Aperture photometry using Sky apertures", "_____no_output_____" ], [ "First, let's define the sky coordinates by converting our pixel coordinates.", "_____no_output_____" ] ], [ [ "positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]\nx, y = np.transpose(positions)\ncoord = pixel_to_skycoord(x, y, wcs)\ncoord", "_____no_output_____" ] ], [ [ "Now define circular apertures in sky coordinates.\n\nFor sky apertures, the aperture radius must be a `Quantity`, in either pixel or angular units.", "_____no_output_____" ] ], [ [ "from photutils import SkyCircularAperture\n\nradius = 5. * u.pix\nsky_apers = SkyCircularAperture(coord, r=radius)\nsky_apers.r", "_____no_output_____" ], [ "radius = 0.5 * u.arcsec\nsky_apers = SkyCircularAperture(coord, r=radius)\nsky_apers.r", "_____no_output_____" ] ], [ [ "When using a sky aperture in angular units, `aperture_photometry` needs the WCS transformation, which can be provided in two ways.", "_____no_output_____" ] ], [ [ "# via the wcs keyword\nphot = aperture_photometry(data, sky_apers, wcs=wcs)\nphot", "_____no_output_____" ], [ "# or via a FITS hdu (i.e. header and data) as the input \"data\"\nphot = aperture_photometry(sci_hdulist[0], sky_apers)\nphot", "_____no_output_____" ] ], [ [ "## More on Aperture Photometry in the Extended notebook:\n\n- Bad pixel masking\n- Encircled flux\n- Aperture photometry at multiple positions using multiple apertures\n\nAlso see the local background subtraction notebook (`photutils_local_backgrounds.ipynb`).", "_____no_output_____" ], [ "## Part 2: Image Segmentation", "_____no_output_____" ], [ "Image segmentation is the process where sources are identified and labeled in an image.\n\nThe sources are detected by using a S/N threshold level and defining the minimum number of pixels required within a source.\n\nFirst, let's define a threshold image at 2$\\sigma$ (per pixel) above the background.", "_____no_output_____" ] ], [ [ "bkg = 0. # background level in this image\nnsigma = 2.\nthreshold = bkg + (nsigma * error) # this should be background-only error", "_____no_output_____" ] ], [ [ "Now let's detect \"8-connected\" sources of minimum size 5 pixels where each pixel is 2$\\sigma$ above the background.\n\n\"8-connected\" pixels touch along their edges or corners. \"4-connected\" pixels touch along their edges. For reference, SExtractor uses \"8-connected\" pixels.\n\nThe result is a segmentation image (`SegmentationImage` object). The segmentation image is the isophotal footprint of each source above the threshold.", "_____no_output_____" ] ], [ [ "from photutils import detect_sources\n\nnpixels = 5\nsegm = detect_sources(data, threshold, npixels)\n\nprint('Found {0} sources'.format(segm.nlabels))", "_____no_output_____" ] ], [ [ "Display the segmentation image.", "_____no_output_____" ] ], [ [ "from photutils.utils import random_cmap\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))\nax1.imshow(data, norm=norm)\nlbl1 = ax1.set_title('Data')\nax2.imshow(segm, cmap=segm.cmap())\nlbl2 = ax2.set_title('Segmentation Image')", "_____no_output_____" ] ], [ [ "It is better to filter (smooth) the data prior to source detection.\n\nLet's use a 5x5 Gaussian kernel with a FWHM of 2 pixels.", "_____no_output_____" ] ], [ [ "from astropy.convolution import Gaussian2DKernel\nfrom astropy.stats import gaussian_fwhm_to_sigma\n\nsigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2 pixels\nkernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)\nkernel.normalize()\n\nssegm = detect_sources(data, threshold, npixels, filter_kernel=kernel)", "_____no_output_____" ], [ "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))\nax1.imshow(segm, cmap=segm.cmap())\nlbl1 = ax1.set_title('Original Data')\nax2.imshow(ssegm, cmap=ssegm.cmap())\nlbl2 = ax2.set_title('Smoothed Data')", "_____no_output_____" ] ], [ [ "### Source deblending", "_____no_output_____" ], [ "Note above that some of our detected sources were blended. We can deblend them using the `deblend_sources()` function, which uses a combination of multi-thresholding and watershed segmentation.", "_____no_output_____" ] ], [ [ "from photutils import deblend_sources\n\nsegm2 = deblend_sources(data, ssegm, npixels, filter_kernel=kernel,\n contrast=0.001, nlevels=32)\n\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 8))\nax1.imshow(data, norm=norm)\nax1.set_title('Data')\nax2.imshow(ssegm, cmap=ssegm.cmap())\nax2.set_title('Original Segmentation Image')\nax3.imshow(segm2, cmap=segm2.cmap())\nax3.set_title('Deblended Segmentation Image')\n\nprint('Found {0} sources'.format(segm2.max))", "_____no_output_____" ] ], [ [ "## Measure the photometry and morphological properties of detected sources", "_____no_output_____" ] ], [ [ "from photutils import source_properties\ncatalog = source_properties(data, segm2, error=error, wcs=wcs)", "_____no_output_____" ] ], [ [ "`catalog` is a `SourceCatalog` object. It behaves like a list of `SourceProperties` objects, one for each source.", "_____no_output_____" ] ], [ [ "catalog", "_____no_output_____" ], [ "catalog[0] # the first source", "_____no_output_____" ], [ "catalog[0].xcentroid # the xcentroid of the first source", "_____no_output_____" ] ], [ [ "Please go [here](http://photutils.readthedocs.org/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties) to see the complete list of available source properties.", "_____no_output_____" ], [ "We can create a Table of isophotal photometry and morphological properties using the ``to_table()`` method of `SourceCatalog`:", "_____no_output_____" ] ], [ [ "tbl = catalog.to_table()\ntbl", "_____no_output_____" ] ], [ [ "Additional properties (not stored in the table) can be accessed directly via the `SourceCatalog` object.", "_____no_output_____" ] ], [ [ "# get a single object (id=12)\nobj = catalog[11]\nobj.id", "_____no_output_____" ], [ "obj", "_____no_output_____" ] ], [ [ "Let's plot the cutouts of the data and error images for this source.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(12, 8), ncols=3)\nax[0].imshow(obj.make_cutout(segm2.data))\nax[0].set_title('Source id={} Segment'.format(obj.id))\nax[1].imshow(obj.data_cutout_ma)\nax[1].set_title('Source id={} Data'.format(obj.id))\nax[2].imshow(obj.error_cutout_ma)\nax[2].set_title('Source id={} Error'.format(obj.id))", "_____no_output_____" ] ], [ [ "## More on Image Segmentation in the Extended notebook:\n\n- Define a subset of source labels\n- Define a subset of source properties\n- Additional sources properties, such a cutout images\n- Define the approximate isophotal ellipses for each source", "_____no_output_____" ], [ "## Also see the two notebooks on Photutils PSF-fitting photometry:\n\n- `gaussian_psf_photometry.ipynb`\n- `image_psf_photometry_withNIRCam.ipynb`", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a6db8ad0129eba8403ecf377126c0e5a832adce
132
ipynb
Jupyter Notebook
analysis/05.Buckle_closure_TLT/.ipynb_checkpoints/Dataanalysis_280220_MB_v05-checkpoint.ipynb
raphaFanti/multiSensor
dbe75f6950671b7c4406e035c4b79f60e7c9e6e7
[ "MIT" ]
null
null
null
analysis/05.Buckle_closure_TLT/.ipynb_checkpoints/Dataanalysis_280220_MB_v05-checkpoint.ipynb
raphaFanti/multiSensor
dbe75f6950671b7c4406e035c4b79f60e7c9e6e7
[ "MIT" ]
1
2021-03-16T09:10:28.000Z
2021-03-24T17:11:30.000Z
analysis/05.Buckle_closure_TLT/.ipynb_checkpoints/Dataanalysis_280220_MB_v05-checkpoint.ipynb
raphaFanti/bootSensing
8319f7d6582e445594691adf35769ec1eef69d33
[ "MIT" ]
null
null
null
33
75
0.886364
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a6dc7a032e2ce2662bfd4507a585f89364184a3
290,188
ipynb
Jupyter Notebook
MNEPython/.ipynb_checkpoints/Conditions-checkpoint.ipynb
Lei-I-Zhang/FLUX
5ccc424900e584feb9ded31d65a52bad3be1c007
[ "BSD-3-Clause" ]
1
2022-03-19T17:46:04.000Z
2022-03-19T17:46:04.000Z
MNEPython/Conditions.ipynb
Lei-I-Zhang/FLUX
5ccc424900e584feb9ded31d65a52bad3be1c007
[ "BSD-3-Clause" ]
null
null
null
MNEPython/Conditions.ipynb
Lei-I-Zhang/FLUX
5ccc424900e584feb9ded31d65a52bad3be1c007
[ "BSD-3-Clause" ]
null
null
null
526.656987
133,488
0.938977
[ [ [ "# Extracting condtion-specific trials\n\nThe aim of this section is to extract the trials according to the trigger channel. We will explain how the events can be generated from the stimulus channels and how to extract condition specific trials (epochs). Once the trials are extracted, bad epochs will be identified and excluded on based on their peak-to-peak signal amplitude.\n\n## Preparation\n\nImport the relevant Python modules:", "_____no_output_____" ] ], [ [ "import os.path as op\nimport os\nimport sys\nimport numpy as np\n\nimport mne\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "Set the paths for the data and results. Note that these will depend on your local setup.", "_____no_output_____" ] ], [ [ "data_path = r'C:\\Users\\JensenO\\Dropbox\\FLUX\\Development\\dataRaw'\nresult_path = r'C:\\Users\\JensenO\\Dropbox\\FLUX\\Development\\dataResults'\n\nfile_name = 'training_raw'", "_____no_output_____" ] ], [ [ "## Reading the events from the stimulus channels\nFirst read all the events from the stimulus channel (in our case, STI01). We will loop over the 2 fif-files created in the previous step.", "_____no_output_____" ] ], [ [ "for subfile in range(1, 3):\n path_file = os.path.join(result_path,file_name + 'ica-' + str(subfile) + '.fif') \n raw = mne.io.read_raw_fif(path_file,allow_maxshield=True,verbose=True,preload=True)\n events = mne.find_events(raw, stim_channel='STI101',min_duration=0.001001)\n \n\n # Save the events in a dedicted FIF-file: \n filename_events = op.join(result_path,file_name + 'eve-' + str(subfile) +'.fif')\n mne.write_events(filename_events,events)", "Opening raw data file C:\\Users\\JensenO\\Dropbox\\FLUX\\Development\\dataResults\\training_rawica-1.fif...\n" ] ], [ [ "The code above extract the events from the trigger channel STI101. This results are represented in the array *events* where the first column is the sample and the third column the corresponding trigger value. Note that the events are concatenated across the 2 subfiles.\n\nTo visualize a snippet of the events-array write:", "_____no_output_____" ] ], [ [ "%matplotlib qt\nplt.stem(events[:,0],events[:,2])\nplt.xlim(1950000,2000000)\nplt.xlabel('samples')\nplt.ylabel('Trigger value (STI101)')\nplt.show()", "_____no_output_____" ] ], [ [ "The figures shows an example for part of the events array. The trigger values indicate specific events of the trials. Here the 'attend left' trials are coded with the trigger '21', whereas the 'attend right' trials with '22'.\n", "_____no_output_____" ], [ "## Defining the epochs (trials) according to the event values\nNext step is to extract the left and right trials ", "_____no_output_____" ] ], [ [ "events_id = {'left':21,'right':22}\n\nraw_list = list()\nevents_list = list()\n\n\nfor subfile in range(1, 3):\n # Read in the data from the Result path\n path_file = os.path.join(result_path,file_name + 'ica-' + str(subfile) + '.fif') \n raw = mne.io.read_raw_fif(path_file, allow_maxshield=True,verbose=True)\n \n filename_events = op.join(result_path,file_name + 'eve-' + str(subfile) +'.fif')\n \n events = mne.read_events(filename_events, verbose=True)\n \n raw_list.append(raw)\n events_list.append(events)\n", "Opening raw data file C:\\Users\\JensenO\\Dropbox\\FLUX\\Development\\dataResults\\training_rawica-1.fif...\n Range : 208000 ... 1664999 = 208.000 ... 1664.999 secs\n" ] ], [ [ "Now concatenate raw instances as if they were continuous - i.e combine over the 2 subfiles.", "_____no_output_____" ] ], [ [ "raw, events = mne.concatenate_raws(raw_list,events_list=events_list)\ndel raw_list ", "_____no_output_____" ] ], [ [ "Set the peak-to-peak amplitude thresholds for trial rejection. These values may change depending on the quality of the data.", "_____no_output_____" ] ], [ [ "reject = dict(grad=5000e-13, # T / m (gradiometers)\n mag=5e-12, # T (magnetometers)\n #eeg=200e-6, # V (EEG channels)\n #eog=150e-6 # V (EOG channels)\n )", "_____no_output_____" ] ], [ [ "We will use time-windows of interest starting 2.5 s prior to the stimulus onset and ending 2 s after. Now perform the epoching using the events and events_id as well as the selected channels:", "_____no_output_____" ] ], [ [ "epochs = mne.Epochs(raw,\n events, events_id,\n tmin=-2.5 , tmax=2,\n baseline=None,\n proj=True,\n picks = 'all',\n detrend = 1,\n reject=reject,\n reject_by_annotation=True,\n preload=True,\n verbose=True)\n\n# Show epochs details\nepochs", "Not setting metadata\nNot setting metadata\n312 matching events found\nNo baseline correction applied\n0 projection items activated\nLoading data for 312 events and 4501 original time points ...\n Rejecting epoch based on MAG : ['MEG2611']\n56 bad epochs dropped\n" ] ], [ [ "By calling *epochs* we can check that the number of events is 305 of which 152 are left attention trials and 153 right attention trials. Moreover, we can see that no baseline correction was applied at this stage.\n\nNow we plot an overview of the rejected epochs:", "_____no_output_____" ] ], [ [ "epochs.plot_drop_log();", "_____no_output_____" ] ], [ [ "A few percent of the trials were rejected due to MEG artifacts in the magnetometers.", "_____no_output_____" ], [ "Now we save the epoched data in an FIF-file. Note this file will include trials from the 2 subfiles.", "_____no_output_____" ] ], [ [ "path_outfile = os.path.join(result_path,'training_epo.fif') \nepochs.save(path_outfile,overwrite=True)", "_____no_output_____" ] ], [ [ "## Plotting the trials\nTo show the trials for the left-condition for the MEG gradiometers write:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nepochs.plot(n_epochs=10,picks=['grad'],event_id={'left':21});", "Using matplotlib as 2D backend.\n" ] ], [ [ "The plot above shows 10 trials of type left; only gradiometers shown.\n\nTo show the trigger (stimulus channels) write:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nepochs.plot(n_epochs=1,picks=['stim'],event_id={'left': 21});", "_____no_output_____" ] ], [ [ "An example of the trigger channels for one trial.\n\nShowing the trigger channels is often useful for verifying that correct trials have been selected. Note that STI001 to STI016 denote the individual trigger lines which are 'on' (1) or 'off' (0). The channel STI101 is a combination of the trigger lines ( STI101 = STI001 + 2 * STI002 + 4 * STI003 + 8 * STI004 + ...)\n\nTo show all the trials belonging to *left* for a representative gradiometer (MEG2343) use the plot_image function. In the following example we also lowpass filter the indvidual trials at 30 Hz and shorten them (crop) to a -100 to 400 ms interval:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nepochs['left'].filter(0.0,30).crop(-0.1,0.4).plot_image(picks=['MEG2343'],vmin=-150,vmax=150);", "Setting up low-pass filter at 30 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal lowpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Upper passband edge: 30.00 Hz\n- Upper transition bandwidth: 7.50 Hz (-6 dB cutoff frequency: 33.75 Hz)\n- Filter length: 441 samples (0.441 sec)\n\nNot setting metadata\nNot setting metadata\n130 matching events found\nNo baseline correction applied\n0 projection items activated\n0 bad epochs dropped\n" ] ], [ [ "## Preregistration and publications\n\nPublication, example:\n\n\"The data were segmented into intervals of 4.5 s, ranging from 2.5 s prior to stimulus onset and 2 s after. To ensure that no artefacts were missed, trials in which the gradiometers values exceeded 5000 fT/cm or magnetometers exceeded 5000 fT were rejected as well as trials previously annotated with muscle artefacts.\"\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6ddeed377d4f8103844c0db1945bb834d6e071
2,997
ipynb
Jupyter Notebook
BashCommands.ipynb
cusey/TypeScriptExamples
261fb3c1287a55eee078af73a8c24be155f13a10
[ "MIT" ]
null
null
null
BashCommands.ipynb
cusey/TypeScriptExamples
261fb3c1287a55eee078af73a8c24be155f13a10
[ "MIT" ]
null
null
null
BashCommands.ipynb
cusey/TypeScriptExamples
261fb3c1287a55eee078af73a8c24be155f13a10
[ "MIT" ]
null
null
null
18.968354
153
0.51952
[ [ [ "# TypeScript Error Messaging\nThe TypeScript transcompile convert the TypeScript to JavaScript. When it is doing the transcompile it type-checks for code and prints out message.", "_____no_output_____" ], [ "### Deleting the JavaScript File", "_____no_output_____" ] ], [ [ "%%bash\nrm TypeScriptInAction.js", "_____no_output_____" ], [ "### List the files", "_____no_output_____" ], [ "%%bash\nls", "BashCommands.ipynb\nREADME.md\nTypeScriptInAction.ts\nTypeScriptSyntax.ipynb\n" ], [ "# %load TypeScriptInAction.ts\nlet num:number;\nnum = 'abc'", "_____no_output_____" ] ], [ [ "### TypeScript Error Messaging\nTypeScript transcompile type-checks and see there is type error that num is set to number but num was assigned a string. ", "_____no_output_____" ] ], [ [ "%%bash\ntsc TypeScriptInAction.ts", "TypeScriptInAction.ts(2,1): error TS2322: Type '\"abc\"' is not assignable to type 'number'.\n" ] ], [ [ "TypeScript transcompile still generated the JavaScript code", "_____no_output_____" ] ], [ [ "%%bash\nls", "BashCommands.ipynb\nREADME.md\nTypeScriptInAction.js\nTypeScriptInAction.ts\nTypeScriptSyntax.ipynb\n" ], [ "# %load TypeScriptInAction.js\nvar num;\nnum = 'abc';\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6de0f48dab3f4dc07d8cad2428027bb21509bf
49,810
ipynb
Jupyter Notebook
complete/lesson_3_complete.ipynb
andreascaudo/data_analysis_lab
27aa4b87a1d50a53fde4faa9bbfb941670de1dc1
[ "MIT" ]
null
null
null
complete/lesson_3_complete.ipynb
andreascaudo/data_analysis_lab
27aa4b87a1d50a53fde4faa9bbfb941670de1dc1
[ "MIT" ]
null
null
null
complete/lesson_3_complete.ipynb
andreascaudo/data_analysis_lab
27aa4b87a1d50a53fde4faa9bbfb941670de1dc1
[ "MIT" ]
null
null
null
32.260363
454
0.483678
[ [ [ "# Parte 3 - Machine Learning Workflow", "_____no_output_____" ], [ "Datasets: [Diamanti](https://www.kaggle.com/shivam2503/diamonds)", "_____no_output_____" ], [ "**OBBIETTVO:** In base alle sue caratteristiche provare a predire il prezzo di un diamante <br>\nUtilizzeremo la libreria python **scikit-learn** per testare alcuni algoritmi di classificiazione!", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\nimport sklearn\nfrom sklearn import svm, preprocessing\n\ndiamond_df = pd.read_csv(\"../datasets/diamonds.csv\", index_col = 0)", "_____no_output_____" ], [ "diamond_df.head()", "_____no_output_____" ] ], [ [ "**Quale modello algoritmo di classificazione dovremmo utilizzare?**\n\n[Come scegliere il corretto algoritmo](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html)", "_____no_output_____" ] ], [ [ "len(diamond_df)", "_____no_output_____" ] ], [ [ "## Linear Regression", "_____no_output_____" ], [ "<img src=\"../images/regression_1.jpeg\" alt=\"Drawing\" style=\"width: 545px;\"/><img src=\"../images/regression_2.jpeg\" alt=\"Drawing\" style=\"width: 500px;\"/> <br> Per utilizzare la Linear Regression deve esserci una relazione lineare tra i dati", "_____no_output_____" ], [ "<img src=\"../images/regression_3.png\" alt=\"Drawing\" style=\"width: 545px;\"/> ", "_____no_output_____" ], [ "[Least Square Method](https://www.varsitytutors.com/hotmath/hotmath_help/topics/line-of-best-fit)", "_____no_output_____" ], [ "Come si definisce la best fit line? <br>", "_____no_output_____" ], [ "$$Y = mx + b$$", "_____no_output_____" ], [ "Data una X dobbiamo trovare la sua Y corrispondente, ma prima dobbiamo risolvere _m_ e _b_:", "_____no_output_____" ], [ "_m_ è la pendenza<br>\n_b_ è l'intersezione di y", "_____no_output_____" ], [ "$$m = \\frac{\\overline{x}\\cdot\\overline{y} - \\overline{xy}}{(\\overline{x})^2-\\overline{x^2}}$$", "_____no_output_____" ], [ "$$b = \\overline{y}-m\\overline{x} $$", "_____no_output_____" ], [ "In questo caso abbiamo dati su 2 dimensioni, ma appena incrementiamo la dimensione dello spazio verriale incrementerà anche la complessità dei calcoli", "_____no_output_____" ], [ "**Per allenare il nostro modello vogliamo utilizzare tutti i paramentri tranne il prezzo**", "_____no_output_____" ] ], [ [ "diamond_df[\"cut\"].unique()", "_____no_output_____" ] ], [ [ "Abbiamo bisogno di categorie numeriche!", "_____no_output_____" ] ], [ [ "diamond_df[\"cut\"].astype(\"category\").cat.codes", "_____no_output_____" ] ], [ [ "**Problema**: Dobbiamo preservare il significato delle labels: per esempio Premium sarà migliore di Fair e così via..", "_____no_output_____" ] ], [ [ "cut_dizionario = {\"Fair\":1, \"Good\":2, \"Very Good\":3, \"Premium\":4, \"Ideal\":5}", "_____no_output_____" ] ], [ [ "Stessa cosa per:", "_____no_output_____" ] ], [ [ "clarity_dizionario = {\"I3\": 1, \"I2\": 2, \"I1\": 3, \"SI2\": 4, \"SI1\": 5, \"VS2\": 6, \"VS1\": 7, \"VVS2\": 8, \"VVS1\": 9, \"IF\": 10, \"FL\": 11}\ncolor_dizionario = {\"J\": 1,\"I\": 2,\"H\": 3,\"G\": 4,\"F\": 5,\"E\": 6,\"D\": 7}", "_____no_output_____" ] ], [ [ "Bisognerà mappare queste classi alle varie colonne del dataset", "_____no_output_____" ] ], [ [ "diamond_df['cut'] = diamond_df['cut'].map(cut_dizionario)", "_____no_output_____" ] ], [ [ "## Esercizio 13:\n- Mappare le colonne \"clarity\" e \"color\" con i rispettivi dizionari!", "_____no_output_____" ] ], [ [ "#Esercizio\ndiamond_df['clarity'] = diamond_df['clarity'].map(clarity_dizionario)\ndiamond_df['color'] = diamond_df['color'].map(color_dizionario)", "_____no_output_____" ], [ "diamond_df.head()", "_____no_output_____" ] ], [ [ "Prima di allenare il nostro modello è importante mescolare i dati per evitare che si formi del biasing analizzando i dati in ordine.\n<br> Per esempio potrebbe essere, come nel nostro caso, che i dati siano ordinati per prezzo.", "_____no_output_____" ] ], [ [ "diamond_df", "_____no_output_____" ], [ "diamond_df = sklearn.utils.shuffle(diamond_df)", "_____no_output_____" ] ], [ [ "Separiamo il set di features dalla label che dobbiamo predire", "_____no_output_____" ] ], [ [ "X = diamond_df.drop(\"price\", axis=1).values #Feature Set --> Ogni label tranne quella che dobbiamo predire\ny = diamond_df[\"price\"].values", "_____no_output_____" ], [ "X", "_____no_output_____" ] ], [ [ "**Bonus**: Avremmo potuto barare caricando il dataframe con l'index, poiché il dataset era ordinato per prezzo e facendo ciò avremmo lasciato un informazione in più che avrebbe compromesso la nostra regressione, poiché l'indice sarebbe stato ordinato come il prezzo.", "_____no_output_____" ], [ "**Preprocessing**: Permette di normalizzare i valori, in questo modo ridurremo la sparsità e il modello lavorerà con dati più uniformi con conseguente miglioramente nelle performance", "_____no_output_____" ] ], [ [ "print(np.mean(X))", "16.59665167058048\n" ], [ "X = preprocessing.scale(X) #Cerca", "_____no_output_____" ], [ "print(np.mean(X))", "2.073404275280678e-16\n" ], [ "test_size = 200", "_____no_output_____" ] ], [ [ "Il **train** è una porzione del dataset per cui il modello viene allenato\n\nIl **test** è una porzione del dataset che il nostro modello non vedrà mai, e sarà usato per valutarne le performance.", "_____no_output_____" ] ], [ [ "X_train = X[:-test_size]\ny_train = y[:-test_size]\n\nX_test = X[-test_size:]\ny_test = y[-test_size:]", "_____no_output_____" ] ], [ [ "Nel caso utilizzassimo il dataset di test per allenare il modello l'accurattezza finale risulterebbe compromessa poiché i dati sono già stati visionati dal modello durante la fase di train, in questo modo non possiamo verificare tramite il test se il modello abbia realmente imparato a predire un valore o abbia solamente imparato a memoria il dataset di train. Per questo motivo i dati di test devono essere utilizzati solo per testare il modello!", "_____no_output_____" ], [ "Andiamo a selezionare il [modello](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html) da utilizzare", "_____no_output_____" ] ], [ [ "clf = svm.SVR(kernel=\"linear\")\nclf.fit(X_train, y_train)", "_____no_output_____" ], [ "clf.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "Coefficiente di determinazione (R quadro): \n- 0.0 Caso peggiore \n- 1.0 Caso migliore\n\nEsso è calcolato partendo dall'errore quadratico medio", "_____no_output_____" ], [ "Fin troppo bello per essere vero!", "_____no_output_____" ], [ "Andiamo a verificare quello che è successo!", "_____no_output_____" ] ], [ [ "for X,y in zip(X_test, y_test):\n print(f\"Model: {clf.predict([X])}, Actual: {y}\")\n ", "Model: [2759.99918279], Actual: 2301\nModel: [68.6001562], Actual: 502\nModel: [558.61638693], Actual: 1000\nModel: [2251.0386317], Actual: 1928\nModel: [1.85988253], Actual: 605\nModel: [3222.6810463], Actual: 2681\nModel: [8719.06071473], Actual: 9557\nModel: [2850.84278289], Actual: 2057\nModel: [4507.0237187], Actual: 4360\nModel: [3369.20090063], Actual: 2763\nModel: [1433.35420337], Actual: 974\nModel: [912.69871768], Actual: 752\nModel: [1196.32607346], Actual: 844\nModel: [138.08955146], Actual: 748\nModel: [6548.81230762], Actual: 6552\nModel: [1758.43962677], Actual: 1356\nModel: [6498.59698363], Actual: 4278\nModel: [9996.72536659], Actual: 11737\nModel: [5277.61065347], Actual: 5706\nModel: [190.47019139], Actual: 776\nModel: [-66.14398432], Actual: 550\nModel: [4352.48547196], Actual: 4381\nModel: [3786.46435075], Actual: 3551\nModel: [5470.73125706], Actual: 6830\nModel: [5610.6528237], Actual: 5008\nModel: [170.2406006], Actual: 590\nModel: [77.90959764], Actual: 561\nModel: [-168.83362517], Actual: 462\nModel: [858.34631588], Actual: 1133\nModel: [6558.47409626], Actual: 5674\nModel: [6214.94372975], Actual: 6955\nModel: [5430.1699588], Actual: 3951\nModel: [3802.10394562], Actual: 4458\nModel: [1735.81127082], Actual: 1250\nModel: [704.80718307], Actual: 826\nModel: [3247.02755671], Actual: 3085\nModel: [-355.13526625], Actual: 506\nModel: [945.10617182], Actual: 1080\nModel: [13705.85817123], Actual: 18706\nModel: [380.75657037], Actual: 603\nModel: [8610.98408161], Actual: 9774\nModel: [4765.80498654], Actual: 4202\nModel: [590.53035733], Actual: 625\nModel: [5497.05536874], Actual: 5124\nModel: [4150.21959307], Actual: 4011\nModel: [869.58629466], Actual: 949\nModel: [5089.31265885], Actual: 4698\nModel: [12370.0463521], Actual: 18153\nModel: [6795.3570829], Actual: 7270\nModel: [4311.42246608], Actual: 3461\nModel: [1842.0952255], Actual: 1389\nModel: [4527.72454954], Actual: 4476\nModel: [1980.96272456], Actual: 1754\nModel: [4963.01608968], Actual: 4398\nModel: [5119.89707739], Actual: 5747\nModel: [1734.39106607], Actual: 1588\nModel: [1181.23957776], Actual: 1287\nModel: [689.81613588], Actual: 957\nModel: [3426.72190189], Actual: 2959\nModel: [325.70644958], Actual: 645\nModel: [920.50548932], Actual: 956\nModel: [10915.92419535], Actual: 10055\nModel: [8501.56362888], Actual: 8858\nModel: [4647.70404951], Actual: 4543\nModel: [1082.17770369], Actual: 863\nModel: [564.91268784], Actual: 544\nModel: [1978.1692631], Actual: 1707\nModel: [573.48377296], Actual: 636\nModel: [4716.06404105], Actual: 3686\nModel: [6386.42916215], Actual: 6713\nModel: [12036.47491747], Actual: 18487\nModel: [4531.30184192], Actual: 4052\nModel: [3240.47814571], Actual: 2940\nModel: [544.02577502], Actual: 485\nModel: [2812.18580516], Actual: 3004\nModel: [270.9159611], Actual: 844\nModel: [4318.19562697], Actual: 5027\nModel: [2204.9298132], Actual: 1948\nModel: [6202.9322518], Actual: 8061\nModel: [12016.4736857], Actual: 14704\nModel: [-70.79407233], Actual: 638\nModel: [2221.00969471], Actual: 1880\nModel: [5427.6026891], Actual: 7597\nModel: [398.42652481], Actual: 666\nModel: [5423.81014855], Actual: 7311\nModel: [1480.14412162], Actual: 1384\nModel: [10338.62246171], Actual: 13250\nModel: [8668.11558831], Actual: 11104\nModel: [4944.12743211], Actual: 5306\nModel: [2929.64945773], Actual: 2792\nModel: [3077.456674], Actual: 2717\nModel: [179.37007162], Actual: 507\nModel: [4689.31059404], Actual: 5616\nModel: [784.42935541], Actual: 783\nModel: [17.93577167], Actual: 675\nModel: [8316.06669417], Actual: 7559\nModel: [2113.62971186], Actual: 1766\nModel: [132.8836351], Actual: 743\nModel: [7263.30885075], Actual: 7714\nModel: [2080.11812976], Actual: 1622\nModel: [811.64349311], Actual: 867\nModel: [1212.53570045], Actual: 1053\nModel: [7662.83964655], Actual: 10575\nModel: [1726.11237782], Actual: 1656\nModel: [1092.33941031], Actual: 863\nModel: [9150.87097765], Actual: 14773\nModel: [-22.65505509], Actual: 516\nModel: [2386.81245003], Actual: 1936\nModel: [3349.40380646], Actual: 2604\nModel: [812.59776208], Actual: 810\nModel: [1522.29167926], Actual: 1265\nModel: [2448.25361143], Actual: 2001\nModel: [6204.74943013], Actual: 7113\nModel: [1132.01341025], Actual: 956\nModel: [3947.0135542], Actual: 2396\nModel: [5487.31132371], Actual: 6271\nModel: [5039.60409983], Actual: 5824\nModel: [18612.73816687], Actual: 6512\nModel: [4831.87158254], Actual: 5183\nModel: [9157.02502894], Actual: 11180\nModel: [1002.16822511], Actual: 795\nModel: [648.60337721], Actual: 706\nModel: [1622.81208153], Actual: 1578\nModel: [5249.46148875], Actual: 4362\nModel: [3538.01499322], Actual: 3150\nModel: [6267.00106931], Actual: 8564\nModel: [6.04336286], Actual: 605\nModel: [12165.90885118], Actual: 17953\nModel: [5170.75293967], Actual: 6271\nModel: [4033.81865711], Actual: 3950\nModel: [922.75068552], Actual: 1024\nModel: [5461.34827119], Actual: 5458\nModel: [1020.21298251], Actual: 956\nModel: [757.13317041], Actual: 710\nModel: [1343.67428302], Actual: 1115\nModel: [335.91723125], Actual: 756\nModel: [890.98391776], Actual: 739\nModel: [537.86721762], Actual: 734\nModel: [1290.96319602], Actual: 1301\nModel: [1518.26738322], Actual: 1569\nModel: [3692.85188197], Actual: 2862\nModel: [328.47201386], Actual: 658\nModel: [7065.95366543], Actual: 9756\nModel: [-275.41930831], Actual: 540\nModel: [1709.26232728], Actual: 1367\nModel: [2800.83060542], Actual: 2543\nModel: [1714.39100579], Actual: 1554\nModel: [11743.78190538], Actual: 10833\nModel: [343.00033135], Actual: 844\nModel: [623.06011241], Actual: 737\nModel: [494.67308145], Actual: 597\nModel: [5286.26153827], Actual: 6126\nModel: [3815.35486047], Actual: 3387\nModel: [386.90614467], Actual: 911\nModel: [2225.85024696], Actual: 1960\nModel: [2748.38770732], Actual: 2316\nModel: [4739.58765499], Actual: 5000\nModel: [3009.74553202], Actual: 2683\nModel: [161.26746322], Actual: 558\nModel: [4516.88979584], Actual: 4759\nModel: [4311.38634915], Actual: 3590\nModel: [12113.1992949], Actual: 11040\nModel: [375.18501631], Actual: 719\nModel: [3815.05925477], Actual: 4258\nModel: [-4.49822265], Actual: 675\nModel: [3845.2805507], Actual: 3442\nModel: [5238.53227757], Actual: 6652\nModel: [4866.90303566], Actual: 4273\nModel: [3656.67103779], Actual: 2732\nModel: [980.9894229], Actual: 882\nModel: [1675.88044904], Actual: 1445\nModel: [1563.34550748], Actual: 1292\nModel: [1547.22978664], Actual: 1654\nModel: [9263.56154139], Actual: 9251\nModel: [8681.57593617], Actual: 12068\nModel: [8987.31353425], Actual: 13110\nModel: [3372.11909329], Actual: 2873\nModel: [8852.91147634], Actual: 10122\nModel: [1832.16723636], Actual: 1689\nModel: [7256.83225138], Actual: 6190\nModel: [2845.90322962], Actual: 1828\nModel: [1104.49403669], Actual: 935\nModel: [5206.93098057], Actual: 4488\nModel: [2171.14232417], Actual: 2066\nModel: [2910.6548055], Actual: 2724\nModel: [2378.84120057], Actual: 2063\nModel: [4148.6301986], Actual: 3802\nModel: [5223.93867085], Actual: 4887\nModel: [2389.2614933], Actual: 1963\nModel: [358.9280953], Actual: 552\nModel: [427.11756355], Actual: 649\nModel: [1548.69170943], Actual: 1569\nModel: [7500.0498095], Actual: 11830\nModel: [3981.05076344], Actual: 4089\nModel: [3600.13565662], Actual: 3618\nModel: [5650.07864398], Actual: 5458\nModel: [2660.2447161], Actual: 2352\nModel: [819.75135082], Actual: 936\nModel: [9567.7335413], Actual: 9901\nModel: [619.92289606], Actual: 929\n" ] ], [ [ "Testiamo un altro modello!", "_____no_output_____" ] ], [ [ "clf = svm.SVR(kernel=\"rbf\")\nclf.fit(X_train, y_train)", "_____no_output_____" ], [ "clf.score(X_test, y_test)", "_____no_output_____" ], [ "for X,y in zip(X_test, y_test):\n print(f\"Model: {clf.predict([X])}, Actual: {y}\")", "Model: [3309.28030506], Actual: 2301\nModel: [833.2631206], Actual: 502\nModel: [1384.52801449], Actual: 1000\nModel: [1995.82907062], Actual: 1928\nModel: [468.36576844], Actual: 605\nModel: [2953.58804944], Actual: 2681\nModel: [5778.00215426], Actual: 9557\nModel: [2541.0010444], Actual: 2057\nModel: [4419.6195572], Actual: 4360\nModel: [3230.41498623], Actual: 2763\nModel: [1176.39301644], Actual: 974\nModel: [1022.73555572], Actual: 752\nModel: [1485.0797959], Actual: 844\nModel: [1521.27167801], Actual: 748\nModel: [5969.38011599], Actual: 6552\nModel: [1364.71210087], Actual: 1356\nModel: [5935.85669044], Actual: 4278\nModel: [6209.74015744], Actual: 11737\nModel: [4054.02797279], Actual: 5706\nModel: [670.48833993], Actual: 776\nModel: [731.54192132], Actual: 550\nModel: [4033.81008915], Actual: 4381\nModel: [3607.4432458], Actual: 3551\nModel: [4814.81467324], Actual: 6830\nModel: [5547.83819048], Actual: 5008\nModel: [1235.8256547], Actual: 590\nModel: [268.69025255], Actual: 561\nModel: [647.5085336], Actual: 462\nModel: [1239.00818628], Actual: 1133\nModel: [5286.30695017], Actual: 5674\nModel: [5717.57928883], Actual: 6955\nModel: [4281.85212255], Actual: 3951\nModel: [3522.24532736], Actual: 4458\nModel: [1850.98166858], Actual: 1250\nModel: [930.07512763], Actual: 826\nModel: [2986.89087506], Actual: 3085\nModel: [1212.94101086], Actual: 506\nModel: [870.68512093], Actual: 1080\nModel: [5469.25924687], Actual: 18706\nModel: [688.77581376], Actual: 603\nModel: [6337.64986328], Actual: 9774\nModel: [3887.34479244], Actual: 4202\nModel: [1181.28876695], Actual: 625\nModel: [4793.58972976], Actual: 5124\nModel: [3983.76338478], Actual: 4011\nModel: [886.20745693], Actual: 949\nModel: [4466.92678384], Actual: 4698\nModel: [6925.58526243], Actual: 18153\nModel: [6518.56239527], Actual: 7270\nModel: [3953.61620815], Actual: 3461\nModel: [1568.35558279], Actual: 1389\nModel: [4053.80510614], Actual: 4476\nModel: [1852.00383519], Actual: 1754\nModel: [4525.93569808], Actual: 4398\nModel: [4047.63264579], Actual: 5747\nModel: [1464.1213871], Actual: 1588\nModel: [1348.52171232], Actual: 1287\nModel: [731.70300415], Actual: 957\nModel: [3484.40596332], Actual: 2959\nModel: [529.76919307], Actual: 645\nModel: [1340.38171383], Actual: 956\nModel: [6962.09636291], Actual: 10055\nModel: [7106.24271817], Actual: 8858\nModel: [4533.48969424], Actual: 4543\nModel: [1061.61937536], Actual: 863\nModel: [743.02366389], Actual: 544\nModel: [1814.81706229], Actual: 1707\nModel: [838.90513776], Actual: 636\nModel: [4725.8796609], Actual: 3686\nModel: [4776.4741321], Actual: 6713\nModel: [6775.7495417], Actual: 18487\nModel: [4607.31357196], Actual: 4052\nModel: [3226.406188], Actual: 2940\nModel: [1206.65584941], Actual: 485\nModel: [2595.31960744], Actual: 3004\nModel: [1004.28955414], Actual: 844\nModel: [3473.06926948], Actual: 5027\nModel: [1874.59803139], Actual: 1948\nModel: [6059.68930607], Actual: 8061\nModel: [6110.87045121], Actual: 14704\nModel: [1060.15387779], Actual: 638\nModel: [2057.59928021], Actual: 1880\nModel: [4982.62224715], Actual: 7597\nModel: [602.57126874], Actual: 666\nModel: [5361.38672654], Actual: 7311\nModel: [1585.73066668], Actual: 1384\nModel: [7039.64693274], Actual: 13250\nModel: [7414.71989178], Actual: 11104\nModel: [5030.57352168], Actual: 5306\nModel: [2792.24748817], Actual: 2792\nModel: [3093.38133708], Actual: 2717\nModel: [1363.57947778], Actual: 507\nModel: [4788.06223767], Actual: 5616\nModel: [1344.73379837], Actual: 783\nModel: [512.67429118], Actual: 675\nModel: [7056.57580257], Actual: 7559\nModel: [1986.27998765], Actual: 1766\nModel: [383.45446993], Actual: 743\nModel: [7138.13317813], Actual: 7714\nModel: [1842.29535619], Actual: 1622\nModel: [1212.49826662], Actual: 867\nModel: [931.65717718], Actual: 1053\nModel: [6892.24882096], Actual: 10575\nModel: [1498.64175952], Actual: 1656\nModel: [1213.37267145], Actual: 863\nModel: [6592.0982323], Actual: 14773\nModel: [609.46764975], Actual: 516\nModel: [1875.34919868], Actual: 1936\nModel: [3087.03971851], Actual: 2604\nModel: [944.42386988], Actual: 810\nModel: [1626.71681149], Actual: 1265\nModel: [1924.06117463], Actual: 2001\nModel: [6475.35774294], Actual: 7113\nModel: [1123.97297615], Actual: 956\nModel: [3393.74791321], Actual: 2396\nModel: [5146.09077038], Actual: 6271\nModel: [5178.89111481], Actual: 5824\nModel: [4062.8793265], Actual: 6512\nModel: [4896.25835553], Actual: 5183\nModel: [7197.58428965], Actual: 11180\nModel: [1267.85968614], Actual: 795\nModel: [620.17005834], Actual: 706\nModel: [1415.20373765], Actual: 1578\nModel: [4957.88327676], Actual: 4362\nModel: [3462.76904326], Actual: 3150\nModel: [5983.23702206], Actual: 8564\nModel: [431.19822676], Actual: 605\nModel: [5330.62114114], Actual: 17953\nModel: [5393.77030603], Actual: 6271\nModel: [4081.40740538], Actual: 3950\nModel: [1148.17318755], Actual: 1024\nModel: [5401.48637028], Actual: 5458\nModel: [1194.90081763], Actual: 956\nModel: [1430.02536251], Actual: 710\nModel: [1094.5822776], Actual: 1115\nModel: [1028.46425675], Actual: 756\nModel: [1267.43757305], Actual: 739\nModel: [967.43708568], Actual: 734\nModel: [1347.52089211], Actual: 1301\nModel: [1145.90514521], Actual: 1569\nModel: [3316.44309733], Actual: 2862\nModel: [656.368793], Actual: 658\nModel: [6844.78453589], Actual: 9756\nModel: [918.20049075], Actual: 540\nModel: [1634.78142013], Actual: 1367\nModel: [2591.68398362], Actual: 2543\nModel: [1689.25041578], Actual: 1554\nModel: [6586.45784576], Actual: 10833\nModel: [711.69963983], Actual: 844\nModel: [660.19580581], Actual: 737\nModel: [783.59485608], Actual: 597\nModel: [5485.91010486], Actual: 6126\nModel: [3571.47854898], Actual: 3387\nModel: [712.40419122], Actual: 911\nModel: [1790.25406376], Actual: 1960\nModel: [2405.38248315], Actual: 2316\nModel: [4282.30018014], Actual: 5000\nModel: [2760.35371955], Actual: 2683\nModel: [691.10995859], Actual: 558\nModel: [4472.43270915], Actual: 4759\nModel: [3586.74092537], Actual: 3590\nModel: [6545.79625336], Actual: 11040\nModel: [572.63327871], Actual: 719\nModel: [3511.7027525], Actual: 4258\nModel: [453.95139455], Actual: 675\nModel: [3559.61961793], Actual: 3442\nModel: [5592.77816662], Actual: 6652\nModel: [3574.55754766], Actual: 4273\nModel: [3336.50706063], Actual: 2732\nModel: [1210.41040754], Actual: 882\nModel: [1635.51408049], Actual: 1445\nModel: [1891.58279881], Actual: 1292\nModel: [1331.93861184], Actual: 1654\nModel: [6000.28432924], Actual: 9251\nModel: [7976.81351539], Actual: 12068\nModel: [7422.12473363], Actual: 13110\nModel: [3203.88175943], Actual: 2873\nModel: [6654.28159814], Actual: 10122\nModel: [1560.67154656], Actual: 1689\nModel: [5649.52227208], Actual: 6190\nModel: [2804.19548579], Actual: 1828\nModel: [922.13659483], Actual: 935\nModel: [4305.18298911], Actual: 4488\nModel: [1949.64584638], Actual: 2066\nModel: [3156.63965983], Actual: 2724\nModel: [2451.4989202], Actual: 2063\nModel: [4046.63681277], Actual: 3802\nModel: [4089.31868568], Actual: 4887\nModel: [2103.64655985], Actual: 1963\nModel: [1359.12188554], Actual: 552\nModel: [638.80717358], Actual: 649\nModel: [1496.008134], Actual: 1569\nModel: [6595.06547343], Actual: 11830\nModel: [3788.94868097], Actual: 4089\nModel: [3139.10062438], Actual: 3618\nModel: [4263.73826265], Actual: 5458\nModel: [2484.75098901], Actual: 2352\nModel: [1020.07269982], Actual: 936\nModel: [6695.93027173], Actual: 9901\nModel: [1311.38042605], Actual: 929\n" ] ], [ [ "**BONUS:** Si potrebbero usare più classifier e alla fine fare una media delle prestazioni!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
4a6de72ae71b71d3c6fceb0fde7f19fd38d1bde7
6,274
ipynb
Jupyter Notebook
Sort student marks and search for them.ipynb
Parv-Joshi/School-Python-Codes
d8dce0f5dbdbc9af3bed8f6a4a9d2fb3035274ec
[ "Unlicense" ]
null
null
null
Sort student marks and search for them.ipynb
Parv-Joshi/School-Python-Codes
d8dce0f5dbdbc9af3bed8f6a4a9d2fb3035274ec
[ "Unlicense" ]
null
null
null
Sort student marks and search for them.ipynb
Parv-Joshi/School-Python-Codes
d8dce0f5dbdbc9af3bed8f6a4a9d2fb3035274ec
[ "Unlicense" ]
null
null
null
25.608163
151
0.464935
[ [ [ "###### Excercise. Create a program that inputs student marks, sorts them in ascending order, and searches for a particular mark and its position.", "_____no_output_____" ] ], [ [ "def input_marks():\n global a, n\n n = int(input(\"Enter no. of students: \"))\n a = list()\n print(\"Enter\", n, \"marks one by one: \")\n for i in range(n):\n a.append(int(input(f\"Enter mark {i+1}: \")))\n print(\"The original list is:\", a)", "_____no_output_____" ], [ "def sort(a, n):\n a.sort()\n print(\"The sorted list is\", a)", "_____no_output_____" ], [ "def search(a, n):\n find = int(input(\"Enter mark to be searched: \"))\n for i, mark in enumerate(a):\n if mark == find:\n print(find, \"is found in list at position\", i+1)\n", "_____no_output_____" ], [ "def main():\n choice = 0\n while choice != 4:\n print(\"\\n********* MAIN MENU *********\")\n print(\"1. Input Marks\")\n print(\"2. Sort Marks in Ascending order\")\n print(\"3. Sreach for a Mark in list\")\n print(\"4. Exit\")\n print(\"*****************************\\n\")\n choice = eval(input(\"Enter your choice: \"))\n if choice == 1:\n input_marks()\n elif choice == 2:\n sort(a, n)\n elif choice == 3:\n search(a, n)\n elif choice == 4:\n print(\"Ending the program!\")", "_____no_output_____" ], [ "main()", "\n********* MAIN MENU *********\n1. Input Marks\n2. Sort Marks in Ascending order\n3. Sreach for a Mark in list\n4. Exit\n*****************************\n\nEnter your choice: 1\nEnter no. of students: 10\nEnter 10 marks one by one: \nEnter mark 1: 100\nEnter mark 2: 67\nEnter mark 3: 49\nEnter mark 4: 27\nEnter mark 5: 33\nEnter mark 6: 99\nEnter mark 7: 88\nEnter mark 8: 76\nEnter mark 9: 49\nEnter mark 10: 95\nThe original list is: [100, 67, 49, 27, 33, 99, 88, 76, 49, 95]\n\n********* MAIN MENU *********\n1. Input Marks\n2. Sort Marks in Ascending order\n3. Sreach for a Mark in list\n4. Exit\n*****************************\n\nEnter your choice: 3\nEnter mark to be searched: 49\n49 is found in list at position 3\n49 is found in list at position 9\n\n********* MAIN MENU *********\n1. Input Marks\n2. Sort Marks in Ascending order\n3. Sreach for a Mark in list\n4. Exit\n*****************************\n\nEnter your choice: 3\nEnter mark to be searched: 22\n\n********* MAIN MENU *********\n1. Input Marks\n2. Sort Marks in Ascending order\n3. Sreach for a Mark in list\n4. Exit\n*****************************\n\nEnter your choice: 2\nThe sorted list is [27, 33, 49, 49, 67, 76, 88, 95, 99, 100]\n\n********* MAIN MENU *********\n1. Input Marks\n2. Sort Marks in Ascending order\n3. Sreach for a Mark in list\n4. Exit\n*****************************\n\nEnter your choice: 4\nEnding the program!\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a6df9b868664e81147d3aac8d037b4af80b251f
62,384
ipynb
Jupyter Notebook
page-blocks0-seed-5/data preprocessing.ipynb
Arnab9Codes/LSTM-based-oversampling
1277c506392a29e337d027bda99a2168abfd68ca
[ "MIT" ]
null
null
null
page-blocks0-seed-5/data preprocessing.ipynb
Arnab9Codes/LSTM-based-oversampling
1277c506392a29e337d027bda99a2168abfd68ca
[ "MIT" ]
null
null
null
page-blocks0-seed-5/data preprocessing.ipynb
Arnab9Codes/LSTM-based-oversampling
1277c506392a29e337d027bda99a2168abfd68ca
[ "MIT" ]
null
null
null
41.3687
7,346
0.526465
[ [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pandas import DataFrame\n\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "df=pd.read_csv('../data/page-blocks0.dat')", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "%matplotlib inline\nsns.countplot(x='Outcome',data=df)", "_____no_output_____" ], [ "df_min=df[df['Outcome']==' positive']", "_____no_output_____" ], [ "df_majority=df[df['Outcome']==' negative']", "_____no_output_____" ], [ "df_min.to_csv('page-blocks0_minority.csv',index=False)\ndf_majority.to_csv('page-blocks0_majority.csv',index=False)", "_____no_output_____" ], [ "data=np.array(df.values)\n\npos=data.shape[1]-1\n\nfor i in range(data.shape[0]):\n if data[i][pos]==' negative':\n data[i][pos]=0\n else:\n data[i][pos]=1\n\n\nmin_data=np.array(df_min)\nmaj_data=np.array(df_majority)", "_____no_output_____" ], [ "validation=0.30\nseed=5", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "df_min.shape", "_____no_output_____" ], [ "min_data.shape", "_____no_output_____" ], [ "maj_data.shape", "_____no_output_____" ], [ "data[:1,:10]", "_____no_output_____" ], [ "X=data[:,:10].astype(float)# getting the feature values\nY=data[:,10]# getting prediction", "_____no_output_____" ], [ "X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=validation,random_state=seed)", "_____no_output_____" ], [ "X_train.shape", "_____no_output_____" ], [ "Y_train=Y_train.reshape((Y_train.shape[0],1))", "_____no_output_____" ], [ "Y_train.shape", "_____no_output_____" ], [ "train_Data=np.concatenate((X_train,Y_train),axis=1)", "_____no_output_____" ], [ "train_Data[:10]", "_____no_output_____" ], [ "Y_test=Y_test.reshape((Y_test.shape[0],1))", "_____no_output_____" ], [ "test_Data=np.concatenate((X_test,Y_test),axis=1)", "_____no_output_____" ], [ "train_Data=DataFrame(train_Data)\ntest_Data=DataFrame(test_Data)", "_____no_output_____" ], [ "%matplotlib inline\nsns.countplot(x=10,data=train_Data)", "_____no_output_____" ], [ "%matplotlib inline\nsns.countplot(x=10,data=test_Data)", "_____no_output_____" ], [ "train_Data", "_____no_output_____" ], [ "train_Data.to_csv('train_Data.csv',index=False)\ntest_Data.to_csv('test_Data.csv',index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6e05c8fe785e23257248e11c3e6add3bf3ba61
1,775
ipynb
Jupyter Notebook
challenges/Dunder Data Challenge 006 - Recreate the Tesla Truck with Matplotlib.ipynb
DunderData/Pandas-Challenges
953dce88ffbcc7d408d3b114090b390698b8b968
[ "BSD-3-Clause" ]
15
2019-02-10T09:20:11.000Z
2019-09-09T20:20:33.000Z
challenges/Dunder Data Challenge 006 - Recreate the Tesla Truck with Matplotlib.ipynb
DunderData/Pandas-Challenges
953dce88ffbcc7d408d3b114090b390698b8b968
[ "BSD-3-Clause" ]
1
2019-09-22T04:57:58.000Z
2019-09-22T11:01:48.000Z
challenges/Dunder Data Challenge 006 - Recreate the Tesla Truck with Matplotlib.ipynb
DunderData/Pandas-Challenges
953dce88ffbcc7d408d3b114090b390698b8b968
[ "BSD-3-Clause" ]
7
2019-03-01T01:19:44.000Z
2019-09-15T18:28:52.000Z
23.051948
276
0.568451
[ [ [ "# Recreate the Tesla Cybertruck with Matplotlib\n\nIn this challenge, you will recreate the new [Tesla Cybertruck][0] unveiled last week using matplotlib.\n\n![](../images/tesla.png)\n\n[0]: https://www.tesla.com/cybertruck", "_____no_output_____" ], [ "### Challenge\n\nUse matplotlib to recreate the image directly above.\n\n### Extra Challenge\n\nAdd animation so that it drives off the screen.", "_____no_output_____" ], [ "# Become a pandas expert\n\nIf you are looking to completely master the pandas library and become a trusted expert for doing data science work, check out my book [Master Data Analysis with Python][1]. It comes with over 300 exercises with detailed solutions covering the pandas library in-depth.\n\n[1]: https://www.dunderdata.com/master-data-analysis-with-python", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
4a6e08a278efabcd6731c6ceb2defb990732ff27
12,992
ipynb
Jupyter Notebook
tutorials/robot-marbles-part-3/robot-marbles-part-3.ipynb
jzaki/demos
87953bc4c7abe6ee8fd22d4af8a147952650d7c4
[ "MIT" ]
1
2021-11-22T23:10:54.000Z
2021-11-22T23:10:54.000Z
tutorials/robot-marbles-part-3/robot-marbles-part-3.ipynb
jzaki/demos
87953bc4c7abe6ee8fd22d4af8a147952650d7c4
[ "MIT" ]
null
null
null
tutorials/robot-marbles-part-3/robot-marbles-part-3.ipynb
jzaki/demos
87953bc4c7abe6ee8fd22d4af8a147952650d7c4
[ "MIT" ]
1
2021-11-22T23:10:45.000Z
2021-11-22T23:10:45.000Z
47.764706
291
0.541256
[ [ [ "# cadCAD Tutorials: The Robot and the Marbles, part 3\nIn parts [1](../robot-marbles-part-1/robot-marbles-part-1.ipynb) and [2](../robot-marbles-part-2/robot-marbles-part-2.ipynb) we introduced the 'language' in which a system must be described in order for it to be interpretable by cadCAD and some of the basic concepts of the library:\n* State Variables\n* Timestep\n* State Update Functions\n* Partial State Update Blocks\n* Simulation Configuration Parameters\n* Policies\n\nIn this notebook we'll look at how subsystems within a system can operate in different frequencies. But first let's copy the base configuration with which we ended Part 2. Here's the description of that system:\n\n__The robot and the marbles__ \n* Picture a box (`box_A`) with ten marbles in it; an empty box (`box_B`) next to the first one; and __two__ robot arms capable of taking a marble from any one of the boxes and dropping it into the other one. \n* The robots are programmed to take one marble at a time from the box containing the largest number of marbles and drop it in the other box. They repeat that process until the boxes contain an equal number of marbles.\n* The robots act simultaneously; in other words, they assess the state of the system at the exact same time, and decide what their action will be based on that information.", "_____no_output_____" ] ], [ [ "%%capture\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# List of all the state variables in the system and their initial values\ngenesis_states = {\n 'box_A': 10, # as per the description of the example, box_A starts out with 10 marbles in it\n 'box_B': 0 # as per the description of the example, box_B starts out empty\n}\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Settings of general simulation parameters, unrelated to the system itself\n# `T` is a range with the number of discrete units of time the simulation will run for;\n# `N` is the number of times the simulation will be run (Monte Carlo runs)\n# In this example, we'll run the simulation once (N=1) and its duration will be of 10 timesteps\n# We'll cover the `M` key in a future article. For now, let's omit it\nsim_config_dict = {\n 'T': range(10),\n 'N': 1,\n #'M': {}\n}\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# We specify the robot arm's logic in a Policy Function\ndef robot_arm(params, step, sH, s):\n add_to_A = 0\n if (s['box_A'] > s['box_B']):\n add_to_A = -1\n elif (s['box_A'] < s['box_B']):\n add_to_A = 1\n return({'add_to_A': add_to_A, 'add_to_B': -add_to_A})\n \n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# We make the state update functions less \"intelligent\",\n# ie. they simply add the number of marbles specified in _input \n# (which, per the policy function definition, may be negative)\ndef increment_A(params, step, sH, s, _input):\n y = 'box_A'\n x = s['box_A'] + _input['add_to_A']\n return (y, x)\n\ndef increment_B(params, step, sH, s, _input):\n y = 'box_B'\n x = s['box_B'] + _input['add_to_B']\n return (y, x)\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# In the Partial State Update Blocks, \n# the user specifies if state update functions will be run in series or in parallel\n# and the policy functions that will be evaluated in that block\npartial_state_update_blocks = [\n { \n 'policies': { # The following policy functions will be evaluated and their returns will be passed to the state update functions\n 'robot_arm_1': robot_arm,\n 'robot_arm_2': robot_arm\n },\n 'variables': { # The following state variables will be updated simultaneously\n 'box_A': increment_A,\n 'box_B': increment_B\n }\n }\n]\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\n\n\n\n\n#imported some addition utilities to help with configuration set-up\nfrom cadCAD.configuration.utils import config_sim\nfrom cadCAD.configuration import Experiment\nfrom cadCAD import configs\n\ndel configs[:] # Clear any prior configs\n\nexp = Experiment()\nc = config_sim(sim_config_dict)\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# The configurations above are then packaged into a `Configuration` object\nexp.append_configs(initial_state=genesis_states, #dict containing variable names and initial values\n partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions\n sim_configs=c #preprocessed dictionaries containing simulation parameters\n )\n\nfrom cadCAD.engine import ExecutionMode, ExecutionContext, Executor\nexec_mode = ExecutionMode()\nlocal_mode_ctx = ExecutionContext(exec_mode.local_mode)\nsimulation = Executor(local_mode_ctx, configs) # Pass the configuration object inside an array\nraw_result, tensor, sessions = simulation.execute() # The `execute()` method returns a tuple; its first elements contains the raw results\n\n%matplotlib inline\nimport pandas as pd\ndf = pd.DataFrame(raw_result)", "_____no_output_____" ], [ "df.plot('timestep', ['box_A', 'box_B'], grid=True, \n xticks=list(df['timestep'].drop_duplicates()), \n colormap = 'RdYlGn',\n yticks=list(range(1+(df['box_A']+df['box_B']).max())));", "_____no_output_____" ] ], [ [ "# Asynchronous Subsystems\nWe have defined that the robots operate simultaneously on the boxes of marbles. But it is often the case that agents within a system operate asynchronously, each having their own operation frequencies or conditions.\n\nSuppose that instead of acting simultaneously, the robots in our examples operated in the following manner:\n* Robot 1: acts once every 2 timesteps\n* Robot 2: acts once every 3 timesteps\n\nOne way to simulate the system with this change is to introduce a check of the current timestep before the robots act, with the definition of separate policy functions for each robot arm.", "_____no_output_____" ] ], [ [ "%%capture\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# We specify each of the robots logic in a Policy Function\nrobots_periods = [2,3] # Robot 1 acts once every 2 timesteps; Robot 2 acts once every 3 timesteps\n\ndef get_current_timestep(cur_substep, s):\n if cur_substep == 1:\n return s['timestep']+1\n return s['timestep']\n\ndef robot_arm_1(params, step, sH, s):\n _robotId = 1\n if get_current_timestep(step, s)%robots_periods[_robotId-1]==0: # on timesteps that are multiple of 2, Robot 1 acts\n return robot_arm(params, step, sH, s)\n else:\n return({'add_to_A': 0, 'add_to_B': 0}) # for all other timesteps, Robot 1 doesn't interfere with the system\n\ndef robot_arm_2(params, step, sH, s):\n _robotId = 2\n if get_current_timestep(step, s)%robots_periods[_robotId-1]==0: # on timesteps that are multiple of 3, Robot 2 acts\n return robot_arm(params, step, sH, s)\n else:\n return({'add_to_A': 0, 'add_to_B': 0}) # for all other timesteps, Robot 2 doesn't interfere with the system\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# In the Partial State Update Blocks, \n# the user specifies if state update functions will be run in series or in parallel\n# and the policy functions that will be evaluated in that block\npartial_state_update_blocks = [\n { \n 'policies': { # The following policy functions will be evaluated and their returns will be passed to the state update functions\n 'robot_arm_1': robot_arm_1,\n 'robot_arm_2': robot_arm_2\n },\n 'variables': { # The following state variables will be updated simultaneously\n 'box_A': increment_A,\n 'box_B': increment_B\n }\n }\n]\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\ndel configs[:] # Clear any prior configs\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# The configurations above are then packaged into a `Configuration` object\nexp.append_configs(initial_state=genesis_states, #dict containing variable names and initial values\n partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions\n sim_configs=c #preprocessed dictionaries containing simulation parameters\n )\n\nexecutor = Executor(local_mode_ctx, configs) # Pass the configuration object inside an array\nraw_result, tensor, sessions = executor.execute() # The `execute()` method returns a tuple; its first elements contains the raw results\n\nsimulation_result = pd.DataFrame(raw_result)", "_____no_output_____" ], [ "simulation_result.plot('timestep', ['box_A', 'box_B'], \n grid=True, \n xticks=list(simulation_result['timestep'].drop_duplicates()), \n yticks=list(range(1+max(simulation_result['box_A'].max(),simulation_result['box_B'].max()))),\n colormap = 'RdYlGn'\n )", "_____no_output_____" ] ], [ [ "Let's take a step-by-step look at what the simulation tells us:\n* Timestep 1: the number of marbles in the boxes does not change, as none of the robots act\n* Timestep 2: Robot 1 acts, Robot 2 doesn't; resulting in one marble being moved from box A to box B\n* Timestep 3: Robot 2 acts, Robot 1 doesn't; resulting in one marble being moved from box A to box B\n* Timestep 4: Robot 1 acts, Robot 2 doesn't; resulting in one marble being moved from box A to box B\n* Timestep 5: the number of marbles in the boxes does not change, as none of the robots act\n* Timestep 6: Robots 1 __and__ 2 act, as 6 is a multiple of 2 __and__ 3; resulting in two marbles being moved from box A to box B and an equilibrium being reached.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a6e1a9bad481b8db6443c0484950f93eaad1e3b
149,089
ipynb
Jupyter Notebook
model/data/Data_preparation.ipynb
XDamianX-coder/seq_to_seq_and_dock_AMU
568c1abf6a4affc89e919239c400da1880832853
[ "MIT" ]
null
null
null
model/data/Data_preparation.ipynb
XDamianX-coder/seq_to_seq_and_dock_AMU
568c1abf6a4affc89e919239c400da1880832853
[ "MIT" ]
null
null
null
model/data/Data_preparation.ipynb
XDamianX-coder/seq_to_seq_and_dock_AMU
568c1abf6a4affc89e919239c400da1880832853
[ "MIT" ]
null
null
null
57.078484
13,468
0.68657
[ [ [ "import numpy as np\nimport pandas as pd\nimport os\nfrom pathlib import Path\nimport selfies as sf\nfrom rdkit import Chem\nimport pandas as pd\n", "_____no_output_____" ] ], [ [ "# Molecule retrieval from Zinc20 smi files", "_____no_output_____" ] ], [ [ "Is_data_prepared = True\nif not Is_data_prepared:\n \n tranche_dirs = ['FK', 'DC', 'BB', 'JA', 'HE', 'GA', 'KG', 'IC', 'CB', 'HJ']", "_____no_output_____" ] ], [ [ "### location of zinc20 files and resulting h5 store", "_____no_output_____" ], [ "### The files containing SMILES from ZINC 20 db can be download by running ```ZINC-downloader-2D-smi.wget``` - these requires a lot of free space", "_____no_output_____" ] ], [ [ "if not Is_data_prepared:\n zinc20_path = Path(\"/storage/hdd1/smiles/zinc20/tranche_2.5_375/\") #Path to place where datasets are downloaded\n tranches_name = \"_\".join(tranche_dirs)\n store_path = Path(\"../data/zinc20_\"+tranches_name+\".h5\")", "_____no_output_____" ] ], [ [ "### main retrieval loop", "_____no_output_____" ], [ "#pip install tables", "_____no_output_____" ] ], [ [ "if not Is_data_prepared:\n from_raw_files = True\n if from_raw_files:\n store = pd.HDFStore(store_path.absolute().as_posix(), \"w\")\n for subdir in tranche_dirs:\n smiles_df = pd.DataFrame(columns=[\"smiles\"])\n dir = (zinc20_path/subdir).absolute().as_posix()\n smiles_files = os.listdir(dir)\n for smiles_file in smiles_files:\n path = (zinc20_path/subdir/smiles_file).absolute().as_posix()\n try:\n df = pd.read_csv(path, sep=\" \").set_index(\"zinc_id\")\n smiles_df = pd.concat([smiles_df, df], axis=0)\n except pd.errors.EmptyDataError:\n pass\n store[subdir] = smiles_df\n store.close()", "_____no_output_____" ] ], [ [ "### data sampling, from each subdirectory certain number of compounds is randomly sampled", "_____no_output_____" ] ], [ [ "if not Is_data_prepared:\n store = pd.HDFStore(store_path.absolute().as_posix(), \"r\")\n smiles_df = pd.DataFrame(columns=[\"smiles\"])\n n_sample = 100000\n for tranche in store.keys():\n df = store[tranche]\n n_sample_ = min(df.shape[0], n_sample)\n df = df.sample(n_sample_)\n smiles_df = pd.concat([smiles_df, df], axis=0)\n store.close()\n del df", "_____no_output_____" ], [ "if not Is_data_prepared:\n resulting_file_name = \"zinc20_\"+tranches_name+\"_processed.parquet\"\n resulting_df_path = Path(\"../data\")/resulting_file_name\n smiles_df.to_parquet(resulting_df_path.absolute().as_posix())", "_____no_output_____" ], [ "if not Is_data_prepared:\n os.remove(\"zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ.h5\")", "_____no_output_____" ] ], [ [ "### Check length of translated SELFIES", "_____no_output_____" ] ], [ [ "data_to_read = pd.read_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed.parquet')", "_____no_output_____" ], [ "data_to_read = data_to_read.reset_index()", "_____no_output_____" ], [ "data_to_read.shape", "_____no_output_____" ], [ "data_to_read = data_to_read.drop_duplicates(subset=['smiles'])\ndata_to_read = data_to_read.reset_index()\ndel data_to_read['level_0']", "_____no_output_____" ], [ "data_to_read.shape", "_____no_output_____" ], [ "data_to_read.head()", "_____no_output_____" ] ], [ [ "## OLD code\nFile_with_cannonical_SMILES_exist = True #change to false if file is not present\nif File_with_cannonical_SMILES_exist:\n data_to_read = pd.read_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed_neutralized.parquet')\n data_to_read['mol'] = [Chem.MolFromSmiles(smi) for smi in data_to_read['smiles']]\n data_to_read['canonical_SMILES'] = data_to_read['mol']\n for i in range(len(data_to_read['mol'])):\n try:\n \n data_to_read['canonical_SMILES'][i] = Chem.MolToSmiles(data_to_read['mol'][i], isomericSmiles=False)\n except:\n data_to_read['canonical_SMILES'][i] = None\n data_to_read = data_to_read[data_to_read.canonical_SMILES != None]\n data_to_read = data_to_read.reset_index() #drop=True\n print(data_to_read.shape)\n data_to_read = data_to_read.drop_duplicates(subset=['canonical_SMILES'])\n data_to_read = data_to_read.reset_index() #drop=True\n print(data_to_read.shape)\n del data_to_read['mol']\n data_to_read.to_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed_canonical_.parquet')\nelse:\n data_to_read = pd.read_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed.parquet')\n data_to_read = data_to_read.reset_index()\n data_to_read['smiles'] = [Chem.MolToSmiles(get_parent_mol(Chem.MolFromSmiles(smiles), neutralize=True, check_exclusion=True, verbose=False)[0]) for smiles in data_to_read['smiles']]\n print(data_to_read.shape)\n data_to_read = data_to_read.drop_duplicates(subset=['smiles'])\n print(data_to_read.shape)\n data_to_read['mol'] = [Chem.MolFromSmiles(smi) for smi in data_to_read['smiles']]\n data_to_read['canonical_SMILES'] = data_to_read['mol']\n for i in range(len(data_to_read['mol'])):\n try:\n \n data_to_read['canonical_SMILES'][i] = Chem.MolToSmiles(data_to_read['mol'][i], isomericSmiles=False)\n except:\n data_to_read['canonical_SMILES'][i] = None\n data_to_read = data_to_read[data_to_read.canonical_SMILES != None]\n data_to_read = data_to_read.reset_index() #drop=True\n print(data_to_read.shape)\n data_to_read = data_to_read.drop_duplicates(subset=['canonical_SMILES'])\n data_to_read = data_to_read.reset_index() #drop=True\n print(data_to_read.shape)\n del data_to_read['mol']\n data_to_read.to_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed_canonical_.parquet')\n ", "_____no_output_____" ] ], [ [ "data_to_read['SELFIES'] = [sf.encoder(smiles) for smiles in data_to_read['smiles']]", "_____no_output_____" ], [ "data_to_read.shape", "_____no_output_____" ], [ "def SELFIES_length(SELFIES_mol):\n length_selfies = []\n try:\n length_selfies.append(SELFIES_mol.count('['))\n except:\n print('Something went wrong, check source code...')\n return max(length_selfies)", "_____no_output_____" ], [ "data_to_read['SELFIES_length'] = [SELFIES_length(SELFIES) for SELFIES in data_to_read['SELFIES']]", "_____no_output_____" ], [ "data_to_read['SELFIES_length'].hist()", "_____no_output_____" ], [ "data_to_read['SELFIES_length'].max()", "_____no_output_____" ], [ "data_to_read['SELFIES_length'][:250000].hist()", "_____no_output_____" ], [ "data_to_read['SELFIES_length'][:250000].max()", "_____no_output_____" ], [ "dataa = data_to_read[(data_to_read['SELFIES_length'] <= 50) & (data_to_read['SELFIES_length'] >= 30)]", "_____no_output_____" ], [ "dataa", "_____no_output_____" ], [ "dataa['SELFIES_length'].hist()", "_____no_output_____" ], [ "data = dataa.sort_values(by=['SELFIES_length'], ascending=True)", "_____no_output_____" ], [ "data = data.reset_index()", "_____no_output_____" ], [ "del data['level_0']", "_____no_output_____" ], [ "data = data.drop_duplicates(subset=['smiles'])", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "data", "_____no_output_____" ] ], [ [ "## Gaussian distribution of tranining data", "_____no_output_____" ] ], [ [ "bins = []\nfor i in range(11):\n bins.append(i)", "_____no_output_____" ], [ "bins[1:]", "_____no_output_____" ], [ "one_part_size = []\nfor element in bins[1:11]:\n one_part_size.append(element*2)\none_part_size.append(11)", "_____no_output_____" ], [ "one_part_size", "_____no_output_____" ], [ "sum(one_part_size)", "_____no_output_____" ], [ "one_size = (2*121000)/sum(one_part_size)\none_size", "_____no_output_____" ], [ "bins_SELFIES_length_half = []\nfor i in range(30,40):\n bins_SELFIES_length_half.append(i)", "_____no_output_____" ], [ "bins_SELFIES_length_half", "_____no_output_____" ], [ "bins_SELFIES_length_half_2 = []\nfor i in range(41,51):\n bins_SELFIES_length_half_2.append(i)", "_____no_output_____" ], [ "bins_SELFIES_length_half_2.reverse()", "_____no_output_____" ], [ "bins_SELFIES_length_half_2", "_____no_output_____" ], [ "center = 11 #size of median value", "_____no_output_____" ], [ "first_bin = data[data['SELFIES_length'] == 30][:int(one_size)]", "_____no_output_____" ], [ "first_bin['SELFIES_length'].hist()", "_____no_output_____" ] ], [ [ "# Dataset to training", "_____no_output_____" ] ], [ [ "bins_SELFIES_length_to_be_used_half = bins_SELFIES_length_half.copy()\nbins_SELFIES_length_to_be_used_half_2 = bins_SELFIES_length_half_2.copy()\n#bins_SELFIES_length_to_be_used_half_2.reverse()\ncenter_h = center", "_____no_output_____" ], [ "for i, element in enumerate(bins[1:11]):\n bins_SELFIES_length_to_be_used_half[i] = data[data['SELFIES_length'] == bins_SELFIES_length_half[i]][:int(one_size*element)]\n bins_SELFIES_length_to_be_used_half_2[i] = data[data['SELFIES_length'] == bins_SELFIES_length_half_2[i]][:int(one_size*element)]\n ", "_____no_output_____" ], [ "bins_SELFIES_length_to_be_used_half[0].shape", "_____no_output_____" ], [ "bins_SELFIES_length_to_be_used_half[2].shape", "_____no_output_____" ], [ "bins_SELFIES_length_to_be_used_half_2[0].shape", "_____no_output_____" ], [ "center_h = data[data['SELFIES_length'] == 40][:int(one_size*center)]", "_____no_output_____" ], [ "center_h.shape", "_____no_output_____" ], [ "frames = (bins_SELFIES_length_to_be_used_half[0],\n bins_SELFIES_length_to_be_used_half[1],\n bins_SELFIES_length_to_be_used_half[2],\n bins_SELFIES_length_to_be_used_half[3],\n bins_SELFIES_length_to_be_used_half[4],\n bins_SELFIES_length_to_be_used_half[5],\n bins_SELFIES_length_to_be_used_half[6],\n bins_SELFIES_length_to_be_used_half[7],\n bins_SELFIES_length_to_be_used_half[8],\n bins_SELFIES_length_to_be_used_half[9],\n bins_SELFIES_length_to_be_used_half_2[0],\n bins_SELFIES_length_to_be_used_half_2[1],\n bins_SELFIES_length_to_be_used_half_2[2],\n bins_SELFIES_length_to_be_used_half_2[3],\n bins_SELFIES_length_to_be_used_half_2[4],\n bins_SELFIES_length_to_be_used_half_2[5],\n bins_SELFIES_length_to_be_used_half_2[6],\n bins_SELFIES_length_to_be_used_half_2[7],\n bins_SELFIES_length_to_be_used_half_2[8],\n bins_SELFIES_length_to_be_used_half_2[9],\n center_h)", "_____no_output_____" ], [ "data_to_training_and_validation = pd.concat(frames)", "_____no_output_____" ], [ "data_to_training_and_validation.shape", "_____no_output_____" ], [ "#data = data_to_training_and_validation.sort_values(by=['SELFIES_length'], ascending=True)", "_____no_output_____" ], [ "data_to_training_and_validation = data_to_training_and_validation.reset_index()", "_____no_output_____" ], [ "del data_to_training_and_validation['level_0']", "_____no_output_____" ], [ "data_to_training_and_validation.shape", "_____no_output_____" ], [ "data_to_training_and_validation['SELFIES_length'].hist(bins=21)", "_____no_output_____" ] ], [ [ "## Make use of canonical form of SMILES", "_____no_output_____" ] ], [ [ "#Prepare molecule", "_____no_output_____" ], [ "mols = [Chem.MolFromSmiles(smi) for smi in data_to_training_and_validation['smiles']]", "_____no_output_____" ], [ "data_to_training_and_validation['SMILES_canonical'] = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in mols]", "_____no_output_____" ], [ "data_to_training_and_validation.head()", "_____no_output_____" ], [ "data_to_training_and_validation['SELFIES_canonical'] = [sf.encoder(smiles) for smiles in data_to_training_and_validation['SMILES_canonical']]", "_____no_output_____" ], [ "data_to_training_and_validation['SELFIES_length_canonical'] = [SELFIES_length(SELFIES) for SELFIES in data_to_training_and_validation['SELFIES_canonical']]", "_____no_output_____" ], [ "data_to_training_and_validation['SELFIES_length_canonical'].hist(bins=21)", "_____no_output_____" ], [ "data_to_training_and_validation.head()", "_____no_output_____" ], [ "#drop duplicates\ndata_to_training_and_validation = data_to_training_and_validation.drop_duplicates(subset=['SMILES_canonical'])\n", "_____no_output_____" ], [ "data_to_training_and_validation = data_to_training_and_validation.reset_index()", "_____no_output_____" ], [ "del data_to_training_and_validation['level_0']", "_____no_output_____" ] ], [ [ "### to be removed\n-> Sn\n\n-> Se\n\n-> B\n\n-> =P\n\n-> P\n", "_____no_output_____" ] ], [ [ "data_to_training_and_validation = data_to_training_and_validation.reset_index()", "_____no_output_____" ], [ "del data_to_training_and_validation['level_0']", "_____no_output_____" ], [ "data_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation[\"SMILES_canonical\"].str.contains(\"Sn\") == False]\nprint(data_to_training_and_validation.shape)\n\ndata_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation[\"SMILES_canonical\"].str.contains(\"Si\") == False]\nprint(data_to_training_and_validation.shape)\n\ndata_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation[\"SELFIES_canonical\"].str.contains(\"P\") == False]\nprint(data_to_training_and_validation.shape)\n\ndata_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation[\"SELFIES_canonical\"].str.contains(\"=P\") == False]\nprint(data_to_training_and_validation.shape)\n\ndata_to_training_and_validation_bromium = data_to_training_and_validation[data_to_training_and_validation[\"SMILES_canonical\"].str.contains(\"Br\") == True]\nprint(data_to_training_and_validation_bromium.shape)\n\ndata_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation[\"SMILES_canonical\"].str.contains(\"B\") == False]\nprint(data_to_training_and_validation.shape)\n\nframes = (data_to_training_and_validation, data_to_training_and_validation_bromium)\ndata_to_training_and_validation_to_be_used = pd.concat(frames)\nprint(data_to_training_and_validation_to_be_used.shape)\n\n\n\ndata_to_training_and_validation_to_be_used = data_to_training_and_validation_to_be_used.reset_index()\ndel data_to_training_and_validation_to_be_used['level_0']\nprint(data_to_training_and_validation_to_be_used.shape)\n\ndata_to_training_and_validation_to_be_used = data_to_training_and_validation_to_be_used.sort_values(by=['SELFIES_length'], ascending=True)", "(233694, 7)\n(233688, 7)\n(208691, 7)\n(208691, 7)\n(15513, 7)\n(193163, 7)\n(208676, 7)\n(208676, 7)\n" ], [ "one_size = 121000/sum(one_part_size)\none_size", "_____no_output_____" ], [ "bins_SELFIES_length_to_be_used_half = bins_SELFIES_length_half.copy()\nbins_SELFIES_length_to_be_used_half_2 = bins_SELFIES_length_half_2.copy()\n#bins_SELFIES_length_to_be_used_half_2.reverse()\ncenter_h = center", "_____no_output_____" ], [ "for i, element in enumerate(bins[1:11]):\n bins_SELFIES_length_to_be_used_half[i] = data_to_training_and_validation_to_be_used[data_to_training_and_validation_to_be_used['SELFIES_length_canonical'] == bins_SELFIES_length_half[i]][:int(one_size*element)]\n bins_SELFIES_length_to_be_used_half_2[i] = data_to_training_and_validation_to_be_used[data_to_training_and_validation_to_be_used['SELFIES_length_canonical'] == bins_SELFIES_length_half_2[i]][:int(one_size*element)]\n ", "_____no_output_____" ], [ "center_h = data_to_training_and_validation_to_be_used[data_to_training_and_validation_to_be_used['SELFIES_length_canonical'] == 40][:int(one_size*center)]", "_____no_output_____" ], [ "frames = (bins_SELFIES_length_to_be_used_half[0],\n bins_SELFIES_length_to_be_used_half[1],\n bins_SELFIES_length_to_be_used_half[2],\n bins_SELFIES_length_to_be_used_half[3],\n bins_SELFIES_length_to_be_used_half[4],\n bins_SELFIES_length_to_be_used_half[5],\n bins_SELFIES_length_to_be_used_half[6],\n bins_SELFIES_length_to_be_used_half[7],\n bins_SELFIES_length_to_be_used_half[8],\n bins_SELFIES_length_to_be_used_half[9],\n bins_SELFIES_length_to_be_used_half_2[0],\n bins_SELFIES_length_to_be_used_half_2[1],\n bins_SELFIES_length_to_be_used_half_2[2],\n bins_SELFIES_length_to_be_used_half_2[3],\n bins_SELFIES_length_to_be_used_half_2[4],\n bins_SELFIES_length_to_be_used_half_2[5],\n bins_SELFIES_length_to_be_used_half_2[6],\n bins_SELFIES_length_to_be_used_half_2[7],\n bins_SELFIES_length_to_be_used_half_2[8],\n bins_SELFIES_length_to_be_used_half_2[9],\n center_h)", "_____no_output_____" ], [ "data_to_training_and_validation = pd.concat(frames)", "_____no_output_____" ], [ "data_to_training_and_validation.head()", "_____no_output_____" ], [ "data_to_training_and_validation['SELFIES_length_canonical'].hist(bins=21)", "_____no_output_____" ], [ "data_to_training_and_validation = data_to_training_and_validation.reset_index()\ndel data_to_training_and_validation['level_0']", "_____no_output_____" ], [ "resulting_file_name = \"zinc20_\"+'selected_to_create_model'+\"_processed.parquet\"\nresulting_df_path = Path(\"../data\")/resulting_file_name ##Important bug\ndata_to_training_and_validation.to_parquet(resulting_df_path.absolute().as_posix())", "_____no_output_____" ] ], [ [ "### RORgamma active compounds : doi: 10.1038/aps.2014.120", "_____no_output_____" ] ], [ [ "list_of_compounds_names = ['20-Hydroxycholesterol', '22(R)-Hydroxy cholesterol', '25-Hydroxycholesterol','Ursolic acid','Digoxin','T0901317', \n 'SR1001', 'SR1078', 'SR-1555', 'SR2211', 'ML209', 'N-(1-(4-(1,1,1,3,3,3-hexafluoro-2-hydroxypropan-2-yl)benzyl)-1,2,3,4-tetrahydroquinolin-6-yl)acetamide', '2,4-difluoro-N-(1-((4-fluorophenyl)sulfonyl)-1,2,3,4-tetrahydroquinolin-7-yl)benzenesulfonamide', \n '2-Chloro-6-fluoro-N-(1-((4-fluorophenyl)sulfonyl)-1,2,3,4-tetrahydroquinolin-7-yl)benzamide', '(S)-2-fluoro-N-(3-methyl-1-(m-tolylsulfonyl)-2,3-dihydro-1H-pyrido[2,3-b][1,4]oxazin-7-yl)-6-(trifluoromethyl)benzamide', \n '(S)-2-fluoro-N-(3-methyl-1-(m-tolylsulfonyl)-2,3-dihydro-1H-pyrido[2,3-b][1,4]oxazin-7-yl)-6-(trifluoromethyl)benzamide',\n '4-(1-(2-Chloro-6-cyclopropylbenzoyl)-7-fluoro-1H-indazol-3-yl)-3-fluorobenzoicacid', '4-(1-(2-Chloro-6-(trifluoromethyl)benzoyl)-7-fluoro-1H-indazol-3-yl)-2-hydroxycyclohex-3-enecarboxylic acid', 'GSK-1a', 'GSK-1b', 'GSK-1c', 'GSK-6a', 'GSK-8h', 'GSK-9g',\n 'GSK-2', 'GSK-13', 'GSK-21', '2-(4-(Ethylsulfonyl)phenyl)-N-(6-(3-fluorophenoxy)-[1,1′-biphenyl]-3-yl)acetamide', 'N-(6-(3,5-difluorophenoxy)-3′-fluoro-[1,1′-biphenyl]-3-yl)-2-(4-(N-methylsulfamoyl)phenyl)acetamide',\n 'N-(4-Ethylphenyl)-3-(hydroxymethyl)-Nisobutyl-4-((tetrahydro-2H-pyran-4-yl)methoxy)benzenesulfonamide', 'N-(4-chlorophenyl)-4-((3,5-dimethylisoxazol-4-yl)methoxy)-N-isobutylbenzenesulfonamide',\n 'N-(2,4-dimethylphenyl)-4-(2-hydroxy2-(pyridin-4-yl)ethoxy)-N-isobutylbenzenesulfonamide', 'N-isobutyl-N-((5-(4-(methylsulfonyl)phenyl)thiophen-2-yl)methyl)-1-phenylmethanesulfonamide',\n 'N-(4-(4-acetylpiperazin-1-yl)benzyl)-Nisobutyl-1-phenylmethanesulfonamide', 'N-(3,4-dimethoxyphenyl)-1-ethyl-2-oxo-1,2-dihydrobenzo[cd]indole-6-sulfonamide', 'JTE-151']", "_____no_output_____" ], [ "list_of_compounds_smiles = ['CC(C)CCC[C@@](C)([C@H]1CC[C@@H]2[C@@]1(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C)O',\n 'C[C@@H]([C@H]1CC[C@@H]2[C@@]1(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C)[C@@H](CCC(C)C)O',\n 'C[C@H](CCCC(C)(C)O)[C@H]1CC[C@@H]2[C@@]1(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C',\n 'C[C@@H]1CC[C@@]2(CC[C@@]3(C(=CC[C@H]4[C@]3(CC[C@@H]5[C@@]4(CC[C@@H](C5(C)C)O)C)C)[C@@H]2[C@H]1C)C)C(=O)O',\n 'C[C@@H]1[C@H]([C@H](C[C@@H](O1)O[C@@H]2[C@H](O[C@H](C[C@@H]2O)O[C@@H]3[C@H](O[C@H](C[C@@H]3O)O[C@H]4CC[C@]5([C@@H](C4)CC[C@@H]6[C@@H]5C[C@H]([C@]7([C@@]6(CC[C@@H]7C8=CC(=O)OC8)O)C)O)C)C)C)O)O',\n 'C1=CC=C(C=C1)S(=O)(=O)N(CC(F)(F)F)C2=CC=C(C=C2)C(C(F)(F)F)(C(F)(F)F)O',\n 'CC1=C(SC(=N1)NC(=O)C)S(=O)(=O)NC2=CC=C(C=C2)C(C(F)(F)F)(C(F)(F)F)O',\n 'C1=CC(=CC=C1C(=O)NC2=CC=C(C=C2)C(C(F)(F)F)(C(F)(F)F)O)C(F)(F)F',\n 'CC(=O)N1CCN(CC1)CC2=CC=C(C=C2)C3=CC=C(C=C3)C(C(F)(F)F)(C(F)(F)F)O',\n 'C1CN(CCN1CC2=CC=C(C=C2)C3=C(C=C(C=C3)C(C(F)(F)F)(C(F)(F)F)O)F)CC4=CC=NC=C4',\n 'C[C@@H]1C[C@@H](CN(C1)C(=O)CC(C2=CC3=C(C=C2)OCO3)C4=C(C=C(C=C4OC)OC)O)C',\n 'CC(=O)NC1=CC2=C(C=C1)N(CC1=CC=C(C=C1)C(O)(C(F)(F)F)C(F)(F)F)CCC2',\n 'C1CC2=C(C=C(C=C2)NS(=O)(=O)C3=C(C=C(C=C3)F)F)N(C1)S(=O)(=O)C4=CC=C(C=C4)F',\n 'C1CC2=C(C=C(C=C2)NC(=O)C3=C(C=CC=C3Cl)F)N(C1)S(=O)(=O)C4=CC=C(C=C4)F',\n 'CC1CN(C2=C(O1)N=CC(NC(=O)C1=C(F)C=CC=C1C(F)(F)F)=C2)S(=O)(=O)C1=CC=CC(C)=C1',\n 'OC(=O)C1=CC(F)=C(C=C1)C1=NN(C(=O)C2=C(C=CC=C2Cl)C2CC2)C2=C1C=CC=C2F',\n 'OC1C=C(CCC1C(O)=O)C1=NN(C(=O)C2=C(Cl)C=CC=C2C(F)(F)F)C2=C1C=CC=C2F',\n 'OC1C=C(CCC1C(O)=O)C1=NN(C(=O)C2=C(Cl)C=CC=C2C(F)(F)F)C2=C(F)C=CC=C12',\n 'CC1=CC=CC(=C1)C(=O)NC1=CC(=NO1)C1=CC=CC=C1',\n 'CC1=C(NC(=O)C2=NC3=C(C)C=C(C)C=C3S2)SC=C1',\n 'CCC(=O)NC1=CC2=NN(N=C2C=C1)C1=CC=C(CC)C=C1',\n 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=NC(=CS2)C2=CC(Cl)=CC=C2Cl)C=C1',\n 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=NC(=C(S2)C(=O)C2=C(Cl)C=CC=C2)C2=CC=CC(Cl)=C2)C=C1',\n 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC(=C(S2)C(=O)C2=CC(F)=CC=C2)C2=CC(Cl)=CC=C2)C=C1',\n 'CCCN(CC1=CC=CC=C1)C1=CC=C(NC(=O)CC2=CC=C(C=C2)S(=O)(=O)CC)C=C1',\n 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC=C3N(CCC4=CC=C(C=C4)C(F)(F)F)C=CC3=C2)C=C1',\n 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC=C3N(CCC4=C(Cl)C=C(Cl)C=C4)C=CC3=C2)C=C1',\n 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC(=C(OC3=CC=CC(F)=C3)C=C2)C2=CC=CC=C2)C=C1',\n 'CNS(=O)(=O)C1=CC=C(CC(=O)NC2=CC(=C(OC3=CC(F)=CC(F)=C3)C=C2)C2=CC=CC(F)=C2)C=C1',\n 'CCC1=CC=C(C=C1)N(CC(C)C)S(=O)(=O)C2=CC(=C(C=C2)OCC3CCOCC3)CO',\n 'CC(C)CN(C1=CC=C(Cl)C=C1)S(=O)(=O)C1=CC=C(OCC2=C(C)ON=C2C)C=C1',\n 'CC(C)CN(C1=CC=C(C)C=C1C)S(=O)(=O)C1=CC=C(OCC(O)C2=CC=NC=C2)C=C1',\n 'CC(C)CN(CC1=CC=C(S1)C1=CC=C(C=C1)S(C)(=O)=O)S(=O)(=O)CC1=CC=CC=C1',\n 'CC(C)CN(CC1=CC=C(C=C1)N1CCN(CC1)C(C)=O)S(=O)(=O)CC1=CC=CC=C1',\n 'CCN1C(=O)C2=CC=CC3=C(C=CC1=C23)S(=O)(=O)NC1=CC(OC)=C(OC)C=C1',\n 'CC(C)CC1=CC(=NO1)C1=C(C2CC2)C(=NO1)C(CCC(O)=O)CC(=O)NC1=CC=C(C)C=C1Cl']", "_____no_output_____" ], [ "compounds_activity = ['RORγ agonist', 'RORγ agonist', 'RORγ agonist','RORγ inverse agonist','RORγ inverse agonist','RORα/γ inverse agonist', \n 'RORα/γ inverse agonist', 'RORα/γ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', \n 'RORγ inverse agonist', 'RORγ inverse agonist', \n 'RORγ inverse agonist',\n 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ agonist', 'RORγ agonist', 'RORγ agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist',\n 'RORγ agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist',\n 'RORγ inverse agonist', 'RORγ inverse agonist',\n 'RORγ inverse agonist', 'RORγ inverse agonist',\n 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ antagonist']", "_____no_output_____" ], [ "len(list_of_compounds_names)", "_____no_output_____" ], [ "len(list_of_compounds_smiles)", "_____no_output_____" ], [ "len(compounds_activity)", "_____no_output_____" ], [ "list_of_compounds_names", "_____no_output_____" ], [ "list_of_compounds_smiles", "_____no_output_____" ], [ "def print_name_and_structure(name, structure):\n print(list_of_compounds_names[name])\n mol = Chem.MolFromSmiles(list_of_compounds_smiles[structure])\n return mol", "_____no_output_____" ], [ "print_name_and_structure(int(input(\"Number of name\")), int(input(\"Number of structure\")))", "Number of name12\nNumber of structure12\n2,4-difluoro-N-(1-((4-fluorophenyl)sulfonyl)-1,2,3,4-tetrahydroquinolin-7-yl)benzenesulfonamide\n" ], [ "def SELFIES_length(SELFIES_mol):\n length_selfies = []\n try:\n length_selfies.append(SELFIES_mol.count('['))\n except:\n print('Something went wrong, check source code...')\n return max(length_selfies)", "_____no_output_____" ] ], [ [ "## Create dataframe from RORgamma drugs", "_____no_output_____" ] ], [ [ "dataframe = pd.DataFrame(data=list_of_compounds_names, columns=['Compound name'])", "_____no_output_____" ], [ "mols = [Chem.MolFromSmiles(smi) for smi in list_of_compounds_smiles]", "_____no_output_____" ], [ "dataframe['SMILES_canonical'] = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in mols]", "_____no_output_____" ], [ "dataframe['SELFIES'] = [sf.encoder(smiles) for smiles in dataframe['SMILES_canonical']]", "_____no_output_____" ], [ "dataframe['Activity type'] = compounds_activity", "_____no_output_____" ], [ "dataframe['SELFIES_length'] = [SELFIES_length(SELFIES) for SELFIES in dataframe['SELFIES']]", "_____no_output_____" ], [ "dataframe['SELFIES_length']", "_____no_output_____" ], [ "dataframe.to_excel('RORgamma_active_compounds.xlsx')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6e39396ea1c60a50ba73b1b16113cd5c6efed3
81,267
ipynb
Jupyter Notebook
python_basics.ipynb
ipfn-hpl/python-basics
2fbc46d69e9dad6e0fce56afb134c8a31e5a2d81
[ "CC0-1.0" ]
null
null
null
python_basics.ipynb
ipfn-hpl/python-basics
2fbc46d69e9dad6e0fce56afb134c8a31e5a2d81
[ "CC0-1.0" ]
null
null
null
python_basics.ipynb
ipfn-hpl/python-basics
2fbc46d69e9dad6e0fce56afb134c8a31e5a2d81
[ "CC0-1.0" ]
null
null
null
93.841801
41,560
0.874045
[ [ [ "# Python: \n## basic features\nhttps://www.python.org/", "_____no_output_____" ] ], [ [ "print(\"Hello, World!\")", "_____no_output_____" ], [ "a = 5\nb = 2", "_____no_output_____" ], [ "a + b", "_____no_output_____" ], [ "1 + a * b", "_____no_output_____" ], [ "a ** b", "_____no_output_____" ], [ "# different in python 3: a//b\n# for same behaviour run: from __future__ import division\na / b", "_____no_output_____" ], [ "a / float(b)", "_____no_output_____" ], [ "a % b", "_____no_output_____" ], [ "min(a, b)", "_____no_output_____" ], [ "a == b", "_____no_output_____" ], [ "a != b", "_____no_output_____" ], [ "a += 3\na", "_____no_output_____" ], [ "# Python Lists\na = [1, \"hello\", 5.5]\na", "_____no_output_____" ], [ "len(a)", "_____no_output_____" ], [ "a[2]", "_____no_output_____" ], [ "a.append(\"how are you?\")\na", "_____no_output_____" ], [ "for x in a:\n print(x)", "_____no_output_____" ], [ "for i, x in enumerate(a):\n print(\"element {}: {}\".format(i, x))", "_____no_output_____" ], [ "a[0] = 10\na", "_____no_output_____" ], [ "# Python Tuples:\nb = (-1, \"bye\", 'c')\nb", "_____no_output_____" ], [ "b[-1]", "_____no_output_____" ], [ "b[0] = 10\nb", "_____no_output_____" ], [ "x, y = b", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "# Python Dictionaries (Keys, values)\na = {\"name\":\"Mary\", \"age\":23, \"sign\":\"capricorn\"}\na", "_____no_output_____" ], [ "a[1]", "_____no_output_____" ], [ "a[\"job\"] = \"student\"\na", "_____no_output_____" ], [ "# Python Funtions\ndef f(a, b=4, c=5):\n if a > 2 and b < 10:\n return a\n elif c == 5:\n return b\n else:\n return a + b + c", "_____no_output_____" ], [ "f(4)", "_____no_output_____" ], [ "f(4, 11)", "_____no_output_____" ], [ "f(4, c=6, b=11)", "_____no_output_____" ] ], [ [ "# NumPy: multi-dimensional arrays and scientific computing\nhttps://www.numpy.org/", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "a = np.array([0, 2, 4, 6, 8, 10, 12, 14, 16])\na", "_____no_output_____" ], [ "a.ndim", "_____no_output_____" ], [ "a.shape", "_____no_output_____" ], [ "a[2]", "_____no_output_____" ], [ "a[2:]", "_____no_output_____" ], [ "a[:4]", "_____no_output_____" ], [ "a[2:7]", "_____no_output_____" ], [ "a[2:7:2]", "_____no_output_____" ], [ "a[-1]", "_____no_output_____" ], [ "a[::-1]", "_____no_output_____" ], [ "a[[0, 4, 5]]", "_____no_output_____" ], [ "b = a > 3\nb", "_____no_output_____" ], [ "a[b]", "_____no_output_____" ], [ "a = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])\na", "_____no_output_____" ], [ "a.ndim", "_____no_output_____" ], [ "a.shape", "_____no_output_____" ], [ "a[1, 2]", "_____no_output_____" ], [ "a[0]", "_____no_output_____" ], [ "a[:, 1:3]", "_____no_output_____" ], [ "a.T", "_____no_output_____" ], [ "a + 10", "_____no_output_____" ], [ "a ** 2", "_____no_output_____" ], [ "a * [10, 20, 30, 40]", "_____no_output_____" ], [ "np.sin(a)", "_____no_output_____" ], [ "np.mean(a)", "_____no_output_____" ], [ "a.mean(axis=1)", "_____no_output_____" ], [ "np.max(a)", "_____no_output_____" ], [ "np.max(a, axis=1)", "_____no_output_____" ], [ "np.arange(10)", "_____no_output_____" ], [ "np.linspace(2, 4, 5)", "_____no_output_____" ], [ "np.zeros((2, 3))", "_____no_output_____" ], [ "np.full((2, 3), 2.5)", "_____no_output_____" ] ], [ [ "# matplotlib: plotting\nhttps://matplotlib.org/", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "#%matplotlib notebook\n%matplotlib inline", "_____no_output_____" ], [ "x = np.linspace(-5, 5, 50)\ny = np.sin(x)\ny2 = y ** 2\ny3 = -x / 5", "_____no_output_____" ], [ "plt.figure()\nplt.plot(x, y, label='sin')\nplt.plot(x, y2, '.', label='$\\sin^{2}$')\nplt.plot(x, y3, linewidth=3)\nplt.annotate('example text', xy=(0.5, -0.75))\nplt.xlabel(\"X axis\")\nplt.ylabel(\"Y axis\")\nplt.title(\"Example plot\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "fig, ax = plt.subplots(2, sharex=True)\nax[0].plot(x, y)\nax[1].plot(x, y2)\nax[1].set_ylabel('y axis')\nplt.show()", "_____no_output_____" ], [ "y, x = np.mgrid[0:20, 0:30]\nz = (x - 4)**2+ y**2\nplt.figure()\nplt.pcolormesh(x, y, z, shading='auto')\nplt.show()", "_____no_output_____" ] ], [ [ "# SciPy: extra modules for scientific computation\nhttps://www.scipy.org/", "_____no_output_____" ] ], [ [ "from scipy.optimize import curve_fit\nimport numpy as np\nimport matplotlib.pyplot as plt\n", "_____no_output_____" ], [ "def f(x, a, b, c):\n return a * np.exp(-b * x) + c", "_____no_output_____" ], [ "n = 60\nx = np.linspace(0, 5, n)\ny = f(x, 5, 2, 0.5) + 2 * np.random.rand(n)", "_____no_output_____" ], [ "popt, pcov = curve_fit(f, x, y)\nperr = np.sqrt(np.diag(pcov))\ny_fit = f(x, *popt)\nmsd = np.sum((y - y_fit) ** 2) / n", "_____no_output_____" ], [ "pnames = ['a', 'b', 'c']\nresults = ''\nfor name, value, error in zip(pnames, popt, perr):\n results += '{} = {:.2f}$\\pm${:.2f}\\n'.format(name, value, error)\nresults += 'MSD = {:.2f}'.format(msd)\n \nplt.plot(x, y, '.', label='data')\nplt.plot(x, y_fit, label='fit: $ae^{-bx} + c$')\nplt.annotate(results, xy=(0.7, 0.55), xycoords='axes fraction')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "%run langmuir_fit.py\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a6e5169a4dc82798dcb59e30a2d972e83a026bd
616
ipynb
Jupyter Notebook
Python-Week 1/10 Aug(Day 2).ipynb
ParadoxPD/Intro-to-machine-learning
3dc4e662e2f7a3ca2099114eb3eb2d80cd5c3f4b
[ "MIT" ]
null
null
null
Python-Week 1/10 Aug(Day 2).ipynb
ParadoxPD/Intro-to-machine-learning
3dc4e662e2f7a3ca2099114eb3eb2d80cd5c3f4b
[ "MIT" ]
null
null
null
Python-Week 1/10 Aug(Day 2).ipynb
ParadoxPD/Intro-to-machine-learning
3dc4e662e2f7a3ca2099114eb3eb2d80cd5c3f4b
[ "MIT" ]
null
null
null
18.117647
48
0.543831
[]
[]
[]
4a6e600713640bf858e832ef5d8318b76524fb0e
203,911
ipynb
Jupyter Notebook
code/Por_Tiempo_Transcurrido.ipynb
SebsPER/EB-2021-1-CC51
c4b8a8d1c42484a9b295432864d2d3e8c593cf7f
[ "MIT" ]
null
null
null
code/Por_Tiempo_Transcurrido.ipynb
SebsPER/EB-2021-1-CC51
c4b8a8d1c42484a9b295432864d2d3e8c593cf7f
[ "MIT" ]
null
null
null
code/Por_Tiempo_Transcurrido.ipynb
SebsPER/EB-2021-1-CC51
c4b8a8d1c42484a9b295432864d2d3e8c593cf7f
[ "MIT" ]
null
null
null
416.144898
81,404
0.928116
[ [ [ "# ¿Cómo ha cambiado el volumen de los videos en tendencia a lo largo del tiempo?", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport json\nimport datetime\n%matplotlib inline", "_____no_output_____" ], [ "Frvids = pd.read_csv('FRvideos_Clean.csv')\nFrvids = Frvids.drop(columns=['Unnamed: 0'])", "_____no_output_____" ], [ "Frvids['trending_date'] = pd.to_datetime(Frvids['trending_date'])\nFrvids['publish_time'] = pd.to_datetime(Frvids['publish_time'])\n#Frvids = Frvids.set_index(Frvids['trending_date'])", "_____no_output_____" ], [ "Frvids.head()", "_____no_output_____" ], [ "Frvids['trending_date'].value_counts()", "_____no_output_____" ] ], [ [ "## Videos tendencia con el tiempo", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(20, 7))\nFrvids.groupby(Frvids['trending_date'].rename('Fechas')).size().plot()\nplt.ylabel(\"Cantidad de videos\")\nplt.title(\"Distribucion de videos tendencia con el tiempo\")", "_____no_output_____" ] ], [ [ "## Solo 2018", "_____no_output_____" ] ], [ [ "d1 = datetime.datetime(2017, 12, 31)\nFplot = Frvids[Frvids['trending_date']>d]\nFplot['trending_date'] = pd.to_datetime(Fplot['trending_date'])\nplt.figure(figsize=(20, 7))\nFplot.groupby(Fplot['trending_date'].rename('Fechas')).size().plot(marker='o', linestyle='--')", "C:\\Users\\Sebastian\\anaconda3\\envs\\rstudio\\lib\\site-packages\\ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n" ] ], [ [ "## Videos tendencia por mes", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 7))\nFrvids.groupby(Frvids['trending_date'].rename('Meses').dt.month).size().plot(marker='o', linestyle='--')\nplt.ylabel(\"Cantidad de videos\")\nplt.title(\"Distribucion de videos tendencia por mes\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6e6121b0bd57d9abcdf1e99d0d3ece11a84564
21,806
ipynb
Jupyter Notebook
examples/DC2_examples/DC2_gt_profiles.ipynb
96RadhikaJadhav/CLMM
cd0508f82f9a6a4692fe785277ac25c73e89d0d7
[ "BSD-3-Clause" ]
null
null
null
examples/DC2_examples/DC2_gt_profiles.ipynb
96RadhikaJadhav/CLMM
cd0508f82f9a6a4692fe785277ac25c73e89d0d7
[ "BSD-3-Clause" ]
null
null
null
examples/DC2_examples/DC2_gt_profiles.ipynb
96RadhikaJadhav/CLMM
cd0508f82f9a6a4692fe785277ac25c73e89d0d7
[ "BSD-3-Clause" ]
null
null
null
39.647273
589
0.606393
[ [ [ "# Compare tangential shear profiles from the extragalactic and object catalogs for DC2 Run 2.1i\n\nThis notebook can be run at NERSC or CC-IN2P3 where the DESC DC2 products are stored. You need to be a DESC member to be able to access those. The DC2 catalog-related imports below (`FoFCatalogMatching`, `GCR` and `GCRCatalogs`) are readily available from the `desc` conda environement at NERC or CC-IN2P3. If working outside such environment, these packagea first need to be installed. \n\nThis was put together using:\n- the DC2 analysis tutorials (in particular [matching_fof.ipynb](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/matching_fof.ipynb) and [object_gcr_2_lensing_cuts.ipynb](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/object_gcr_2_lensing_cuts.ipynb))\n- the CLMM usage examples\n", "_____no_output_____" ] ], [ [ "# General imports\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom astropy.table import Table\n\n# DC2 catalog-related imports\nimport FoFCatalogMatching\nimport GCRCatalogs\nfrom GCR import GCRQuery\n\n#CLMM imports\ntry: import clmm\nexcept:\n import notebook_install\n notebook_install.install_clmm_pipeline(upgrade=False)\n import clmm", "_____no_output_____" ] ], [ [ "### 1. Load the catalogs\n- DC2 object catalog\n- DC2 extragalactic catalog (cosmoDC2)", "_____no_output_____" ] ], [ [ "object_cat = GCRCatalogs.load_catalog('dc2_object_run2.1i_dr1')", "_____no_output_____" ], [ "extragalactic_cat = GCRCatalogs.load_catalog('cosmoDC2_v1.1.4_small')", "_____no_output_____" ] ], [ [ "### 2. Identify one halo in the extragalactic catalog\nChoosing the most massive one below z = 0.4. The `halo_mass` field of the cosmoDC2 catalog gives the mass in units of M$_{\\odot}$.", "_____no_output_____" ] ], [ [ "# get list of massive halos in a given redshift and mass range\nmmin = 5.e14 #Msun\nzmax = 0.4\n\nmassive_halos = extragalactic_cat.get_quantities(['halo_mass','hostHaloMass','redshift','ra', 'dec', 'halo_id'],\\\n filters=[f'halo_mass > {mmin}','is_central==True',\n f'redshift<{zmax}'])\n\nN_cl = len(massive_halos['halo_mass'])\nprint(f'There are {N_cl} clusters in that mass and redshift ranges')", "_____no_output_____" ], [ "# Selecting the most massive one\nselect = massive_halos['halo_mass'] == np.max(massive_halos['halo_mass'])\nra_cl = massive_halos['ra'][select][0]\ndec_cl = massive_halos['dec'][select][0]\nz_cl = massive_halos['redshift'][select][0]\nmass_cl =massive_halos['halo_mass'][select][0]\nid_cl = massive_halos['halo_id'][select][0]\n\nprint (f'The most massive cluster is halo {id_cl}, in ra = {ra_cl:.2f} deg, dec = {dec_cl:.2f} deg, z = {z_cl:.2f}, with mass = {mass_cl:.2e} Msun')", "_____no_output_____" ] ], [ [ "### 3. Selection of background galaxies around the cluster\n- Define cuts on the cosmoDC2 and object catalogs. \n - Box of 0.7 deg around the cluster center\n - Galaxies with z > z_cluster + 0.1\n - Galaxies with mag_i < 24.5\n- We also add some WL quality cuts for the object catalog.\n- The two catalogs will then be matched to end up with the same selection of galaxies.", "_____no_output_____" ], [ "#### 3.1 Cut definition\n\nNB: the object catalog quality cuts follow that given in the [object_gcr_2_lensing_cuts.ipynb](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/object_gcr_2_lensing_cuts.ipynb) notebook.", "_____no_output_____" ] ], [ [ "# Coordinate filter to be applied applied to both extragalactic and object catalog\nra_min, ra_max = ra_cl-0.35, ra_cl+0.35\ndec_min, dec_max = dec_cl-0.35, dec_cl+0.35\n\ncoord_filters = [\n f'ra >= {ra_min}',\n f'ra < {ra_max}',\n f'dec >= {dec_min}',\n f'dec < {dec_max}',\n]\n\n# Redshift cut to be applied to the extragalactic catalog. The object catalog does not have redshift information.\nz_min = z_cl + 0.1\nredshift_filters = [\n (np.isfinite, 'redshift'),\n f'redshift > {z_min}',\n]\n\n# Magnitude cut to be applied to both catalogs\nmag_filters = [\n (np.isfinite, 'mag_i'),\n 'mag_i < 24.5',\n]\n\n# Following DC2 tutorials, basics cuts to be applied to the object catalog\nobject_basic_cuts = [\n GCRQuery('extendedness > 0'), # Extended objects\n GCRQuery((np.isfinite, 'mag_i')), # Select objects that have i-band magnitudes\n GCRQuery('clean'), # The source has no flagged pixels (interpolated, saturated, edge, clipped...) \n # and was not skipped by the deblender\n GCRQuery('xy_flag == 0'), # Flag for centroid measurement (0 if OK)\n GCRQuery('ext_shapeHSM_HsmShapeRegauss_flag == 0'), # Flag returned by shape measurement code (0 if OK)\n GCRQuery((np.isfinite, 'ext_shapeHSM_HsmShapeRegauss_sigma')), # Shape measurement uncertainty should not be NaN\n]\n\n# Adding the total ellipticity quantity to the object catalog\nobject_cat.add_quantity_modifier('shape_hsm_regauss_etot', \n (np.hypot, 'ext_shapeHSM_HsmShapeRegauss_e1', 'ext_shapeHSM_HsmShapeRegauss_e2'), \n overwrite=True)\n\n# Following DC2 tutorials, additional WL quality cuts to be applied to the object catalog\nobject_properties_cuts = [\n GCRQuery('snr_i_cModel > 10'), # SNR > 10\n GCRQuery('mag_i_cModel < 24.5'), # cModel imag brighter than 24.5\n GCRQuery('ext_shapeHSM_HsmShapeRegauss_resolution >= 0.3'), # Sufficiently resolved galaxies compared to PSF\n GCRQuery('shape_hsm_regauss_etot < 2'), # Total distortion in reasonable range\n GCRQuery('ext_shapeHSM_HsmShapeRegauss_sigma <= 0.4'), # Shape measurement errors reasonable\n GCRQuery('blendedness < 10**(-0.375)') # Avoid spurious detections and those contaminated by blends\n]", "_____no_output_____" ] ], [ [ "#### 3.2 Load quantities from both catalogs, given the cuts defined above", "_____no_output_____" ] ], [ [ "extragal_data = extragalactic_cat.get_quantities(['ra', 'dec', 'shear_1', 'shear_2', \n 'ellipticity_1_true', 'ellipticity_2_true',\n 'redshift', 'convergence', 'galaxy_id'], \n filters=(coord_filters + mag_filters + redshift_filters))", "_____no_output_____" ] ], [ [ "For the object catalog below, the field under scrutiny falls in tract 3448. A DM-stack installation is required to identify a tract given a set of coordinates (this was done separately from this notebook). In any case, specifying that tract using `native_filters` speeds up the process but is not required.\n", "_____no_output_____" ] ], [ [ "object_data = object_cat.get_quantities(['ra', 'dec',\n 'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2', \n 'id'],\n native_filters=['tract == 3448'],\n filters=(coord_filters + object_basic_cuts + object_properties_cuts))", "_____no_output_____" ] ], [ [ "### 4. Match the 2 catalogs\n\nUsing the `FoFCatalogMatching` method; this was examplified in the [DC2 analysis tutorial](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/matching_fof.ipynb) and adapted to our purpose here. As mentioned in the tutorial, *`FoFCatalogMatching.match` takes a dictionary of catalogs to match and a friends-of-friends linking length. Because the \"catalog\" is not an astropy table or pandas dataframe, `len(truth_coord)` won't give the actual length of the table so we need to specify `catalog_len_getter` so that the code knows how to get the length of the catalog.*\n\nNB: `linking_lengths` is in arcsec. Here, we ask `FoFCatalogMatching` to use a linking length of 1 arcsec.\n", "_____no_output_____" ], [ "#### 4.1 Perform the matching", "_____no_output_____" ] ], [ [ "results = FoFCatalogMatching.match(\n catalog_dict={'extragal': extragal_data, 'object': object_data},\n linking_lengths=1.,\n catalog_len_getter=lambda x: len(x['ra']),\n)\n\n# identify which rows are from the extragalactic catalog and which are from the object\nextragal_mask = results['catalog_key'] == 'extragal'\nobject_mask = ~extragal_mask\n\n# np.bincount will give up the number of id occurrences (like histogram but with integer input)\nn_groups = results['group_id'].max() + 1\nn_extragal = np.bincount(results['group_id'][extragal_mask], minlength=n_groups)\nn_object = np.bincount(results['group_id'][object_mask], minlength=n_groups)", "_____no_output_____" ] ], [ [ "#### 4.2 Identify one-to-one extragal/object matches", "_____no_output_____" ] ], [ [ "one_to_one_group_mask = np.in1d(results['group_id'], np.flatnonzero((n_extragal == 1) & (n_object == 1)))\n\n# Row indices in the *original* extragal/object catalogs for those 1-to-1 groups\nextragal_idx = results['row_index'][one_to_one_group_mask & extragal_mask]\nobject_idx = results['row_index'][one_to_one_group_mask & object_mask]\nprint(f'Number of 1-to-1 matched objects: {len(extragal_idx)}, {len(object_idx)}')", "_____no_output_____" ] ], [ [ "### 5. Computes the reduced tangential shear profiles from both datasets, using CLMM", "_____no_output_____" ], [ "#### 5.1 First, dealing with the cosmoDC2 data.\nTo measure a reduced tangential shear profile, the shape measurements must be made according to the $\\epsilon$ or reduced shear definition $g$. So first , we convert cosmoDC2 `shear1` and `shear2` quantities to reduced shear using the `convergence`. These become the `e1` and `e2` fields of the CLMM cluster galaxy catalog.", "_____no_output_____" ] ], [ [ "e1, e2 = clmm.utils.convert_shapes_to_epsilon(extragal_data['shear_1'][extragal_idx],extragal_data['shear_2'][extragal_idx],\n shape_definition='shear',kappa=extragal_data['convergence'][extragal_idx])\n\n# Create the background galaxy catalog as a CLMM GCData (= astropy table)\ndat = clmm.GCData([extragal_data['ra'][extragal_idx],extragal_data['dec'][extragal_idx],e1,\n e2,extragal_data['redshift'][extragal_idx],extragal_data['galaxy_id'][extragal_idx]], \n names=('ra','dec', 'e1', 'e2', 'z','id'))\n\n# Instantiate a CLMM cluster object and save it for later use.\ncl_from_cosmoDC2 = clmm.GalaxyCluster(str(id_cl), ra_cl, dec_cl, z_cl, dat) \ncl_from_cosmoDC2.save('cosmoDC2_GC.pkl')", "_____no_output_____" ] ], [ [ "#### 5.2 Second, doing the same for the DC2 object catalog\nIn the object catalog, shapes are measured by `shapeHSM` which return ellipticities according to the $\\chi$ definition. Need to convert to the $\\epsilon$ definition, once again using the conversion helper function from CLMM. ", "_____no_output_____" ] ], [ [ "e1, e2 = clmm.utils.convert_shapes_to_epsilon(object_data['ext_shapeHSM_HsmShapeRegauss_e1'][object_idx],\n object_data['ext_shapeHSM_HsmShapeRegauss_e2'][object_idx],\n shape_definition='chi')\n\n# The conversion may create NaN, so avoid these by creating a mask\nmask = np.isfinite(e1)", "_____no_output_____" ] ], [ [ "The object catalog has no redshift information so we'll use the redshift of the matched galaxies in cosmoDC2 to create the GalaxyCluster object.", "_____no_output_____" ] ], [ [ "# Create the background galaxy catalog as a CLMM GCData (= astropy table)\ndat = clmm.GCData([object_data['ra'][object_idx][mask],object_data['dec'][object_idx][mask],\n e1[mask],\n e2[mask],\n extragal_data['redshift'][extragal_idx][mask],\n object_data['id'][object_idx][mask]], \n names=('ra','dec', 'e1', 'e2', 'z','id'), masked=True)\n\n\n# Create the background galaxy catalog as astropy table and save it for later use\ncl_from_objectDC2 = clmm.GalaxyCluster(str(id_cl), ra_cl, dec_cl, z_cl, dat) \ncl_from_objectDC2.save('objectDC2_GC.pkl')", "_____no_output_____" ] ], [ [ "#### 5.3 Build the reduced tangential shear profile from both datasets", "_____no_output_____" ] ], [ [ "cl_from_objectDC2 = clmm.GalaxyCluster.load('objectDC2_GC.pkl')\ncl_from_cosmoDC2 = clmm.GalaxyCluster.load('cosmoDC2_GC.pkl')", "_____no_output_____" ], [ "dc2_cosmo = extragalactic_cat.cosmology\ncosmo = clmm.Cosmology(H0 = dc2_cosmo.H0, Omega_dm0 = dc2_cosmo.Om0-dc2_cosmo.Ob0, Omega_b0 = dc2_cosmo.Ob0)\n\nbin_edges = clmm.dataops.make_bins(0.15, 4, 10, method='evenlog10width')\n\ncl_from_cosmoDC2.compute_tangential_and_cross_components(geometry=\"flat\")\nprofile_from_cosmoDC2 = cl_from_cosmoDC2.make_radial_profile(\"Mpc\", bins=bin_edges,cosmo=cosmo)\n\ncl_from_objectDC2.compute_tangential_and_cross_components(geometry=\"flat\")\nprofile_from_objectDC2 = cl_from_objectDC2.make_radial_profile(\"Mpc\", bins=bin_edges,cosmo=cosmo)\n", "_____no_output_____" ] ], [ [ "#### 5.4 Taking into account intrinsic ellipticities from cosmoDC2\n\nSo far, we've used the `shear1` and `shear2` fields of cosmoDC2, i.e., we neglected the intrinsic ellipticities of the galaxies. To account for shape noise from intrinsic ellipticities, we can use the shears and unlensed ellipticities available in the cosmoDC2 catalog to build lensed ellipticities (this is done using the `compute_lensed_ellipticity` function available in CLMM - see the documentation for details). The latter can then be used to bluid a CLMM cluster object. The resulting tangential shear profile will then include shape noise.", "_____no_output_____" ] ], [ [ "es1 = extragal_data['ellipticity_1_true']\nes2 = extragal_data['ellipticity_2_true']\ngamma1 = extragal_data['shear_1']\ngamma2 = extragal_data['shear_2']\nkappa = extragal_data['convergence']\n\nextragal_data['ellipticity_1'] = clmm.utils.compute_lensed_ellipticity(es1, es2, gamma1, gamma2, kappa)[0]\nextragal_data['ellipticity_2'] = clmm.utils.compute_lensed_ellipticity(es1, es2, gamma1, gamma2, kappa)[1]", "_____no_output_____" ] ], [ [ "Make a new CLMM cluster object", "_____no_output_____" ] ], [ [ "dat = clmm.GCData([extragal_data['ra'][extragal_idx],extragal_data['dec'][extragal_idx],\n extragal_data['ellipticity_1'][extragal_idx],\n extragal_data['ellipticity_2'][extragal_idx],\n extragal_data['redshift'][extragal_idx],\n extragal_data['galaxy_id'][extragal_idx]],\n names=('ra','dec', 'e1', 'e2', 'z','id'))\n\ncl_from_cosmoDC2_with_e1e2 = clmm.GalaxyCluster(str(id_cl), ra_cl, dec_cl, z_cl, dat) ", "_____no_output_____" ] ], [ [ "Compute the reduced shear profile", "_____no_output_____" ] ], [ [ "cl_from_cosmoDC2_with_e1e2.compute_tangential_and_cross_components(geometry=\"flat\")\nprofile_from_cosmoDC2_with_e1e2 = cl_from_cosmoDC2_with_e1e2.make_radial_profile(\"Mpc\", bins=bin_edges,cosmo=cosmo)", "_____no_output_____" ] ], [ [ "### 6. Visualize the results for the three profiles, obtained from the same galaxies in the two catalogs\n- from cosmoDC2, neglecting shape noise (blue points)\n- from cosmoDC2, including shape noise (orange)\n- for the DC2 object catalog (green, where the galaxies redshifts taken from cosmoDC2)", "_____no_output_____" ] ], [ [ "plt.errorbar(profile_from_cosmoDC2['radius'],profile_from_cosmoDC2['gt'],profile_from_cosmoDC2['gt_err'], \n marker='o',label='from cosmoDC2 g1g2')\nplt.errorbar(profile_from_cosmoDC2_with_e1e2['radius'],profile_from_cosmoDC2_with_e1e2['gt'],\n profile_from_cosmoDC2['gt_err'],label='from cosmoDC2 e1e2')\nplt.errorbar(profile_from_objectDC2['radius'],profile_from_objectDC2['gt'],profile_from_objectDC2['gt_err'], \n label='from DC2 objects e1e2')\nplt.legend()\nplt.xscale('log')\nplt.yscale('log')\nplt.xlabel('R (Mpc)')\nplt.ylabel(r'$\\langle g_t \\rangle$')\nplt.ylim([2.e-3,0.3])", "_____no_output_____" ] ], [ [ "From cosmoDC2 (orange and blue profiles above), we see the impact of shape noise at low radii (orange/blue =w/wo intrinsic ellipticities), where the number of galaxies per bin is small (see below). The error bars on the data computed by `make_shear_profile` simply corresponds to the standard error of the mean in the bin ($\\sigma_{\\rm bin}/\\sqrt{N_{\\rm gal\\_in\\_bin}}$). The errors on individual shape measurements on the DC2 object catalog have been negelected.", "_____no_output_____" ] ], [ [ "plt.scatter(profile_from_cosmoDC2['radius'], profile_from_cosmoDC2['n_src'], marker='o')\n[plt.axvline(x=r, ymin=0, ymax=1e3, color='k', linestyle=':') for r in profile_from_cosmoDC2['radius_min']]\nplt.ylabel('Ngal in the bin')\nplt.xlabel('R (Mpc)')\nplt.xscale('log')\nplt.yscale('log')\nplt.title('Number of galaxies in each bin')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6e6491c4e92c58048f0a0bd6761d6d9458468c
159,587
ipynb
Jupyter Notebook
codes/old-experiments/RQ1-BTC-Gender.ipynb
yangzhou6666/BiasFinder
4d83a87e6335f1bf6064da08bb48bfaf108a33ad
[ "Apache-2.0" ]
null
null
null
codes/old-experiments/RQ1-BTC-Gender.ipynb
yangzhou6666/BiasFinder
4d83a87e6335f1bf6064da08bb48bfaf108a33ad
[ "Apache-2.0" ]
null
null
null
codes/old-experiments/RQ1-BTC-Gender.ipynb
yangzhou6666/BiasFinder
4d83a87e6335f1bf6064da08bb48bfaf108a33ad
[ "Apache-2.0" ]
1
2021-12-22T11:02:43.000Z
2021-12-22T11:02:43.000Z
41.700287
2,242
0.401743
[ [ [ "# User Study", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport math\nimport time", "_____no_output_____" ], [ "eval_dir = \"gc_imdb\"\n# df = pd.read_csv(\"../data/\" + eval_dir + \"/test.csv\", header=None, sep=\"\\t\", names=[0, 1, \"mutant\", \"template\", \"gender\", \"label\", \"country\"])\ndf = pd.read_csv(\"../data/\" + eval_dir + \"/test.csv\", header=None, sep=\"\\t\", names=[\"label\", \"mutant\", \"template\", \"original\", \"identifier\", \"type\", \"gender\", \"country\"])\ndf", "_____no_output_____" ], [ "def read_txt(fpath):\n pred = []\n file = open(fpath)\n lines = file.readlines()\n for l in lines :\n pred.append(int(l))\n file.close()\n \n return pred", "_____no_output_____" ], [ "output_dir = \"gc_imdb\"\n\nresult_dir = \"../result/\" + output_dir + \"/\"\n\npath = result_dir + \"results_data.txt\"\n\npred = read_txt(path)\n\nprint(len(pred))", "986752\n" ], [ "df[\"prediction\"] = pred", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df[df[\"label\"] == 0].reset_index(drop=True)[\"original\"][0]", "_____no_output_____" ] ], [ [ "### Use Groupby to Group the text by Template", "_____no_output_____" ] ], [ [ "df[\"template\"] = df[\"template\"].astype(\"category\")\ndf[\"template_id\"] = df[\"template\"].cat.codes", "_____no_output_____" ], [ "gb = df.groupby(\"template_id\")", "_____no_output_____" ], [ "gb.count()", "_____no_output_____" ], [ "len(gb.size())", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "### Get DF template only", "_____no_output_____" ] ], [ [ "dft = df.iloc[:,[2,3,9]]\ndft = dft.drop_duplicates()\ndft", "_____no_output_____" ], [ "## template\ndft = dft.sort_values(by=[\"template_id\"])\ndft = dft.reset_index(drop=True)\n\n## mutant\ndf = df.reset_index(drop=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "dft", "_____no_output_____" ], [ "dft.to_csv(\"gender-template.csv\")", "_____no_output_____" ] ], [ [ "## Get Number of Discordant Pairs for Each Template\n\nThere is a memory limitation that make us can't directly produce +- 240M pairs. Fortunately, the number of discordant pairs for each template can be calculate theoritically without crossing th data to get +- 240M pairs. This will solve the memory issue.\n\nFor each template, we will give an example of the male mutant and female mutant for user study", "_____no_output_____" ] ], [ [ "gb = df.groupby(\"template_id\")\ngb.count()", "_____no_output_____" ] ], [ [ "### Data crossing", "_____no_output_____" ] ], [ [ "import time\n\nstart = time.time()\n\nidentifier = \"gender\"\n\nmutant_example = []\nmutant_prediction_stat = []\nkey = []\nfor i in range(len(gb.size())) :\n# for i in range(10) :\n data = gb.get_group(i)\n dc = data.groupby(identifier)\n me = {} # mutant example\n mp = {} # mutant prediction\n key = []\n for k, v in dict(iter(dc)).items() :\n key.append(k)\n is_first_instance = True\n pos_counter = 0 # positive counter\n neg_counter = 0 # negative counter\n for m, p in zip(v[\"mutant\"].values, v[\"prediction\"].values) :\n if is_first_instance :\n me[k] = m\n is_first_instance = False\n if p == 1 :\n pos_counter += 1\n else :\n neg_counter += 1\n mp[k] = {\"pos\": pos_counter, \"neg\" : neg_counter}\n \n mutant_example.append(me)\n mutant_prediction_stat.append(mp)\n \nend = time.time()\nprint(\"Execution time: \", end-start)", "Execution time: 6.637021064758301\n" ], [ "dft[\"mutant_example\"] = mutant_example\ndft[\"mutant_prediction_stat\"] = mutant_prediction_stat\ndft", "_____no_output_____" ], [ "key", "_____no_output_____" ], [ "btcs = []\npairs = []\nfor mp in dft[\"mutant_prediction_stat\"].values :\n if len(mp) > 0 :\n btc = 0\n pair = 0\n already_processed = []\n for k1 in key :\n for k2 in key :\n if k1 != k2 :\n k = k1 + \"-\" + k2\n if k1 > k2 :\n k = k2 + \"-\" + k1\n if k not in already_processed :\n already_processed.append(k)\n\n btc += ((mp[k1][\"pos\"] * mp[k2][\"neg\"]) + (mp[k1][\"neg\"] * mp[k2][\"pos\"]))\n pair += (mp[k1][\"pos\"] + mp[k1][\"neg\"]) * (mp[k2][\"pos\"] + mp[k2][\"neg\"])\n\n# double_counting_divider = len(key) * (len(key)-1)\n# dp.append(int(_dp/double_counting_divider)) # we must divide the number with the number of key to reduce the double counting\n btcs.append(btc)\n pairs.append(pair)\n else :\n btcs.append(0)\n pairs.append(0)", "_____no_output_____" ], [ "dft[\"btc\"] = btcs\ndft[\"possible_pair\"] = pairs\ndft", "_____no_output_____" ] ], [ [ "### Number of Bias-uncvering Test Case", "_____no_output_____" ] ], [ [ "int(dft[\"btc\"].sum())", "_____no_output_____" ] ], [ [ "### BTC Rate", "_____no_output_____" ] ], [ [ "dft[\"btc\"].sum() / dft[\"possible_pair\"].sum()", "_____no_output_____" ] ], [ [ "### Get Data that Have number of BTC more than one", "_____no_output_____" ] ], [ [ "d = dft[dft[\"btc\"] > 0]\nd.head()", "_____no_output_____" ] ], [ [ "### Sort Data based on the number of BTC", "_____no_output_____" ] ], [ [ "d = d.sort_values([\"btc\", \"template\"], ascending=False)\nd = d.reset_index(drop=True)\nd", "_____no_output_____" ], [ "d.to_csv(\"occ-age/gender-btc.csv\")", "_____no_output_____" ], [ "d.iloc[0][\"mutant_prediction_stat\"]", "_____no_output_____" ], [ "d.groupby(\"template_id\").get_group(2760).iloc[0][\"mutant_prediction_stat\"]", "_____no_output_____" ], [ "# d.groupby(\"template_id\").get_group(2760).iloc[0][\"mutant_example\"]", "_____no_output_____" ], [ "# d.groupby(\"template_id\").get_group(2760).iloc[0][\"template\"]", "_____no_output_____" ] ], [ [ "### Get Data BTC for train and test", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "template_that_produce_btc = d[\"template_id\"].tolist()\n# template_that_produce_btc", "_____no_output_____" ], [ "start = time.time()\n\nmutant_text_1 = []\nmutant_text_2 = []\nprediction_1 = []\nprediction_2 = []\nidentifier_1 = []\nidentifier_2 = []\ntemplate = []\nlabel = []\nfor i in template_that_produce_btc: # only processing from template that produce BTC\n data = gb.get_group(i)\n dc = data.groupby(identifier)\n already_processed = []\n for k1, v1 in dict(iter(dc)).items() :\n for k2, v2 in dict(iter(dc)).items() :\n if k1 != k2 :\n key = k1 + \"-\" + k2\n if k1 > k2 :\n key = k2 + \"-\" + k1\n if key not in already_processed :\n already_processed.append(key)\n for m_1, p_1, i_1, t, l in zip(v1[\"mutant\"].values, v1[\"prediction\"].values, v1[identifier].values, v1[\"template\"].values, v1[\"label\"].values) :\n for m_2, p_2, i_2 in zip(v2[\"mutant\"].values, v2[\"prediction\"].values, v2[identifier].values) :\n if p_1 != p_2 : # only add discordant pairs\n mutant_text_1.append(m_1)\n prediction_1.append(p_1)\n identifier_1.append(i_1)\n mutant_text_2.append(m_2)\n prediction_2.append(p_2)\n identifier_2.append(i_2)\n template.append(t)\n label.append(l)\n\nend = time.time()\nprint(\"Execution time: \", end-start)", "Execution time: 0.4049961566925049\n" ], [ "btc = pd.DataFrame(data={\"mutant_1\" : mutant_text_1, \"mutant_2\" : mutant_text_2, \"prediction_1\": prediction_1, \"prediction_2\" : prediction_2, \"identifier_1\": identifier_1, \"identifier_2\" : identifier_2, \"template\": template, \"label\": label})\n\nbtc", "_____no_output_____" ], [ "import os\ndata_dir = \"../data/rq2/biasfinder_btc/\"\n\nif not os.path.exists(data_dir) :\n os.makedirs(data_dir)\n\n\ntrain = btc.sample(frac=1, random_state=123)\ntrain.to_csv(data_dir + \"train.csv\", index=None, header=None, sep=\"\\t\")", "_____no_output_____" ], [ "data_dir = \"../data/rq2/biasfinder_btc/\"\n\nif not os.path.exists(data_dir) :\n os.makedirs(data_dir)\n\ntrain.to_csv(data_dir + \"test.csv\", index=None, header=None, sep=\"\\t\")", "_____no_output_____" ], [ " ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a6e7232dfc4fab4efb795044808485714d39208
186,497
ipynb
Jupyter Notebook
00_quickstart/generated_profiler_report/profiler-report.ipynb
cyourth-cognonic/aws-workshop
9758252e53b527d546f430b24171d5e4b0a051bf
[ "Apache-2.0" ]
null
null
null
00_quickstart/generated_profiler_report/profiler-report.ipynb
cyourth-cognonic/aws-workshop
9758252e53b527d546f430b24171d5e4b0a051bf
[ "Apache-2.0" ]
null
null
null
00_quickstart/generated_profiler_report/profiler-report.ipynb
cyourth-cognonic/aws-workshop
9758252e53b527d546f430b24171d5e4b0a051bf
[ "Apache-2.0" ]
null
null
null
54.771512
6,560
0.508448
[ [ [ "# SageMaker Debugger Profiling Report\n\nSageMaker Debugger auto generated this report. You can generate similar reports on all supported training jobs. The report provides summary of training job, system resource usage statistics, framework metrics, rules summary, and detailed analysis from each rule. The graphs and tables are interactive. \n\n**Legal disclaimer:** This report and any recommendations are provided for informational purposes only and are not definitive. You are responsible for making your own independent assessment of the information.\n", "_____no_output_____" ] ], [ [ "import json\nimport pandas as pd\nimport glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport datetime\nfrom smdebug.profiler.utils import us_since_epoch_to_human_readable_time, ns_since_epoch_to_human_readable_time\n", "[2021-04-16 21:39:09.260 ip-10-0-95-252.us-east-2.compute.internal:172 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: /opt/ml/processing/input/profiler/signals/ProfilerReport\n" ], [ "import bokeh\nfrom bokeh.io import output_notebook, show\nfrom bokeh.layouts import column, row\nfrom bokeh.plotting import figure\nfrom bokeh.models.widgets import DataTable, DateFormatter, TableColumn\nfrom bokeh.models import ColumnDataSource, PreText\nfrom math import pi\nfrom bokeh.transform import cumsum\nimport warnings\nfrom bokeh.models.widgets import Paragraph\nfrom bokeh.models import Legend\nfrom bokeh.util.warnings import BokehDeprecationWarning, BokehUserWarning\nwarnings.simplefilter('ignore', BokehDeprecationWarning)\nwarnings.simplefilter('ignore', BokehUserWarning)\n\noutput_notebook(hide_banner=True)", "_____no_output_____" ], [ "def create_piechart(data_dict, title=None, height=400, width=400, x1=0, x2=0.1, radius=0.4, toolbar_location='right'):\n \n plot = figure(plot_height=height, \n plot_width=width,\n toolbar_location=toolbar_location,\n tools=\"hover,wheel_zoom,reset,pan\", \n tooltips=\"@phase:@value\", \n title=title,\n x_range=(-radius-x1, radius+x2))\n\n data = pd.Series(data_dict).reset_index(name='value').rename(columns={'index':'phase'})\n data['angle'] = data['value']/data['value'].sum() * 2*pi\n data['color'] = bokeh.palettes.viridis(len(data_dict))\n\n plot.wedge(x=0, y=0., radius=radius,\n start_angle=cumsum('angle', include_zero=True), \n end_angle=cumsum('angle'),\n line_color=\"white\", \n source=data, \n fill_color='color', \n legend='phase'\n )\n plot.legend.label_text_font_size = \"8pt\"\n plot.legend.location = 'center_right'\n plot.axis.axis_label=None\n plot.axis.visible=False\n plot.grid.grid_line_color = None\n plot.outline_line_color = \"white\"\n \n return plot", "_____no_output_____" ], [ "from IPython.display import display, HTML, Markdown, Image\ndef pretty_print(df):\n raw_html = df.to_html().replace(\"\\\\n\",\"<br>\").replace('<tr>','<tr style=\"text-align: left;\">')\n return display(HTML(raw_html))", "_____no_output_____" ] ], [ [ "## Training job summary", "_____no_output_____" ] ], [ [ "def load_report(rule_name):\n try:\n report = json.load(open('/opt/ml/processing/output/rule/profiler-output/profiler-reports/'+rule_name+'.json'))\n return report\n except FileNotFoundError:\n print (rule_name + ' not triggered')", "_____no_output_____" ], [ "\njob_statistics = {}\nreport = load_report('MaxInitializationTime')\nif report:\n if \"first\" in report['Details'][\"step_num\"] and \"last\" in report['Details'][\"step_num\"]:\n first_step = report['Details'][\"step_num\"][\"first\"]\n last_step = report['Details'][\"step_num\"][\"last\"]\n tmp = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000)\n date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n job_statistics[\"Start time\"] = f\"{hour} {day}\"\n tmp = us_since_epoch_to_human_readable_time(report['Details']['job_end'] * 1000000)\n date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n job_statistics[\"End time\"] = f\"{hour} {day}\"\n job_duration_in_seconds = int(report['Details']['job_end'] - report['Details']['job_start']) \n job_statistics[\"Job duration\"] = f\"{job_duration_in_seconds} seconds\"\n if \"first\" in report['Details'][\"step_num\"] and \"last\" in report['Details'][\"step_num\"]:\n tmp = us_since_epoch_to_human_readable_time(first_step)\n date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n job_statistics[\"Training loop start\"] = f\"{hour} {day}\"\n tmp = us_since_epoch_to_human_readable_time(last_step)\n date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n job_statistics[\"Training loop end\"] = f\"{hour} {day}\"\n training_loop_duration_in_seconds = int((last_step - first_step) / 1000000)\n job_statistics[\"Training loop duration\"] = f\"{training_loop_duration_in_seconds} seconds\"\n initialization_in_seconds = int(first_step/1000000 - report['Details']['job_start'])\n job_statistics[\"Initialization time\"] = f\"{initialization_in_seconds} seconds\"\n finalization_in_seconds = int(np.abs(report['Details']['job_end'] - last_step/1000000))\n job_statistics[\"Finalization time\"] = f\"{finalization_in_seconds} seconds\"\n initialization_perc = int(initialization_in_seconds / job_duration_in_seconds * 100)\n job_statistics[\"Initialization\"] = f\"{initialization_perc} %\"\n training_loop_perc = int(training_loop_duration_in_seconds / job_duration_in_seconds * 100)\n job_statistics[\"Training loop\"] = f\"{training_loop_perc} %\"\n finalization_perc = int(finalization_in_seconds / job_duration_in_seconds * 100)\n job_statistics[\"Finalization\"] = f\"{finalization_perc} %\"", "_____no_output_____" ], [ "if report:\n text = \"\"\"The following table gives a summary about the training job. The table includes information about when the training job started and ended, how much time initialization, training loop and finalization took.\"\"\"\n if len(job_statistics) > 0:\n df = pd.DataFrame.from_dict(job_statistics, orient='index')\n start_time = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000)\n date = datetime.datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n duration = job_duration_in_seconds\n text = f\"\"\"{text} \\n Your training job started on {day} at {hour} and ran for {duration} seconds.\"\"\"\n\n #pretty_print(df)\n if \"first\" in report['Details'][\"step_num\"] and \"last\" in report['Details'][\"step_num\"]:\n if finalization_perc < 0:\n job_statistics[\"Finalization%\"] = 0\n if training_loop_perc < 0:\n job_statistics[\"Training loop\"] = 0\n if initialization_perc < 0:\n job_statistics[\"Initialization\"] = 0\n else:\n text = f\"\"\"{text} \\n Your training job started on {day} at {hour} and ran for {duration} seconds.\"\"\"\n \n if len(job_statistics) > 0:\n df2 = df.reset_index()\n df2.columns = [\"0\", \"1\"]\n source = ColumnDataSource(data=df2)\n columns = [TableColumn(field='0', title=\"\"),\n TableColumn(field='1', title=\"Job Statistics\"),]\n table = DataTable(source=source, columns=columns, width=450, height=380)\n\n plot = None\n\n if \"Initialization\" in job_statistics:\n piechart_data = {}\n piechart_data[\"Initialization\"] = initialization_perc \n piechart_data[\"Training loop\"] = training_loop_perc\n piechart_data[\"Finalization\"] = finalization_perc \n\n plot = create_piechart(piechart_data, \n height=350,\n width=500,\n x1=0.15,\n x2=0.15,\n radius=0.15, \n toolbar_location=None)\n\n if plot != None:\n paragraph = Paragraph(text=f\"\"\"{text}\"\"\", width = 800)\n show(column(paragraph, row(table, plot)))\n else:\n paragraph = Paragraph(text=f\"\"\"{text}. No step information was profiled from your training job. The time spent on initialization and finalization cannot be computed.\"\"\" , width = 800)\n show(column(paragraph, row(table)))", "_____no_output_____" ] ], [ [ "## System usage statistics", "_____no_output_____" ] ], [ [ "report = load_report('OverallSystemUsage')", "_____no_output_____" ], [ "text1 = ''\nif report:\n if \"GPU\" in report[\"Details\"]:\n for node_id in report[\"Details\"][\"GPU\"]:\n gpu_p95 = report[\"Details\"][\"GPU\"][node_id][\"p95\"]\n gpu_p50 = report[\"Details\"][\"GPU\"][node_id][\"p50\"]\n cpu_p95 = report[\"Details\"][\"CPU\"][node_id][\"p95\"]\n cpu_p50 = report[\"Details\"][\"CPU\"][node_id][\"p50\"]\n \n if gpu_p95 < 70 and cpu_p95 < 70:\n text1 = f\"\"\"{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%. \n The 95th percentile of the total CPU utilization is only {int(cpu_p95)}%. Node {node_id} is underutilized. \n You may want to consider switching to a smaller instance type.\"\"\"\n elif gpu_p95 < 70 and cpu_p95 > 70:\n text1 = f\"\"\"{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%. \n However, the 95th percentile of the total CPU utilization is {int(cpu_p95)}%. GPUs on node {node_id} are underutilized \n likely because of CPU bottlenecks\"\"\"\n elif gpu_p50 > 70:\n text1 = f\"\"\"{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%. \n GPUs on node {node_id} are well utilized\"\"\"\n else:\n text1 = f\"\"\"{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%. \n The median total CPU utilization is {int(cpu_p50)}%.\"\"\"\n else:\n for node_id in report[\"Details\"][\"CPU\"]:\n cpu_p95 = report[\"Details\"][\"CPU\"][node_id][\"p95\"]\n if cpu_p95 > 70:\n text1 = f\"\"\"{text1}The 95th percentile of the total CPU utilization on node {node_id} is {int**(cpu_p95)}%. GPUs on node {node_id} are well utilized\"\"\"\n text1 = Paragraph(text=f\"\"\"{text1}\"\"\", width=1100)\n text2 = Paragraph(text=f\"\"\"The following table shows statistics of resource utilization per worker (node), \n such as the total CPU and GPU utilization, and the memory utilization on CPU and GPU. \n The table also includes the total I/O wait time and the total amount of data sent or received in bytes.\n The table shows min and max values as well as p99, p90 and p50 percentiles.\"\"\", width=900)\n", "_____no_output_____" ], [ "pd.set_option('display.float_format', lambda x: '%.2f' % x)\nrows = [] \nunits = {\"CPU\": \"percentage\", \"CPU memory\": \"percentage\", \"GPU\": \"percentage\", \"Network\": \"bytes\", \"GPU memory\": \"percentage\", \"I/O\": \"percentage\"}\nif report:\n for metric in report['Details']:\n for node_id in report['Details'][metric]:\n values = report['Details'][metric][node_id]\n rows.append([node_id, metric, units[metric], values['max'], values['p99'], values['p95'], values['p50'], values['min']])\n\n df = pd.DataFrame(rows) \n df.columns = ['Node', 'metric', 'unit', 'max', 'p99', 'p95', 'p50', 'min']\n df2 = df.reset_index()\n source = ColumnDataSource(data=df2)\n columns = [TableColumn(field='Node', title=\"node\"),\n TableColumn(field='metric', title=\"metric\"),\n TableColumn(field='unit', title=\"unit\"),\n TableColumn(field='max', title=\"max\"),\n TableColumn(field='p99', title=\"p99\"),\n TableColumn(field='p95', title=\"p95\"),\n TableColumn(field='p50', title=\"p50\"),\n TableColumn(field='min', title=\"min\"),]\n table = DataTable(source=source, columns=columns, width=800, height=df2.shape[0]*30)\n\n show(column( text1, text2, row(table)))", "_____no_output_____" ], [ "report = load_report('OverallFrameworkMetrics')\nif report:\n if 'Details' in report:\n\n display(Markdown(f\"\"\"## Framework metrics summary\"\"\"))\n plots = []\n text = ''\n if 'phase' in report['Details']:\n text = f\"\"\"The following two pie charts show the time spent on the TRAIN phase, the EVAL phase, \n and others. The 'others' includes the time spent between steps (after one step has finished and before\n the next step has started). Ideally, most of the training time should be spent on the \n TRAIN and EVAL phases. If TRAIN/EVAL were not specified in the training script, steps will be recorded as \n GLOBAL.\"\"\"\n\n if 'others' in report['Details']['phase']:\n others = float(report['Details']['phase']['others'])\n\n if others > 25:\n text = f\"\"\"{text} Your training job spent quite a significant amount of time ({round(others,2)}%) in phase \"others\".\n You should check what is happening in between the steps.\"\"\"\n\n plot = create_piechart(report['Details']['phase'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between the time spent on the TRAIN/EVAL phase and others\")\n plots.append(plot)\n\n if 'forward_backward' in report['Details']:\n\n event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)\n perc = report['Details']['forward_backward'][event]\n\n text = f\"\"\"{text} The pie chart on the right shows a more detailed breakdown. \n It shows that {int(perc)}% of the time was spent in event \"{event}\".\"\"\"\n\n if perc > 70:\n text = f\"\"\"There is quite a significant difference between the time spent on forward and backward\n pass.\"\"\"\n else:\n text = f\"\"\"{text} It shows that {int(perc)}% of the training time\n was spent on \"{event}\".\"\"\"\n\n plot = create_piechart(report['Details']['forward_backward'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between forward and backward pass\") \n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=1100)\n show(column(paragraph, row(plots)))\n\n plots = []\n text=''\n if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:\n\n key = list(report['Details']['ratio'].keys())[0]\n ratio = report['Details']['ratio'][key]\n\n text = f\"\"\"The following piechart shows a breakdown of the CPU/GPU operators. \n It shows that {int(ratio)}% of training time was spent on executing the \"{key}\" operator.\"\"\"\n\n plot = create_piechart(report['Details']['ratio'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between the time spent on CPU/GPU operators\")\n plots.append(plot)\n\n\n if 'general' in report['Details']:\n event = max(report['Details']['general'], key=report['Details']['general'].get)\n perc = report['Details']['general'][event]\n\n plot = create_piechart(report['Details']['general'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"General framework operations\")\n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=1100)\n show(column(paragraph, row(plots)))\n\n plots = []\n text = ''\n if 'horovod' in report['Details']:\n display(Markdown(f\"\"\"#### Overview: Horovod metrics\"\"\"))\n event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)\n perc = report['Details']['horovod'][event]\n text = f\"\"\"{text} The following pie chart shows a detailed breakdown of the Horovod metrics profiled\n from your training job. The most expensive function was \"{event}\" with {int(perc)}%.\"\"\"\n\n plot = create_piechart(report['Details']['horovod'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"Horovod metrics \")\n\n paragraph = Paragraph(text=text, width=1100)\n show(column(paragraph, row(plot)))\n", "_____no_output_____" ], [ "pd.set_option('display.float_format', lambda x: '%.2f' % x)\nrows = [] \nvalues = []\nif report:\n if 'CPU_total' in report['Details']:\n display(Markdown(f\"\"\"#### Overview: CPU operators\"\"\"))\n event = max(report['Details']['CPU'], key=report['Details']['CPU'].get)\n perc = report['Details']['CPU'][event]\n\n for function in report['Details']['CPU']:\n percentage = round(report['Details']['CPU'][function],2)\n time = report['Details']['CPU_total'][function] \n rows.append([percentage, time, function])\n\n df = pd.DataFrame(rows) \n df.columns = ['percentage', 'time', 'operator']\n\n df = df.sort_values(by=['percentage'], ascending=False)\n source = ColumnDataSource(data=df)\n columns = [TableColumn(field='percentage', title=\"Percentage\"),\n TableColumn(field='time', title=\"Cumulative time in microseconds\"),\n TableColumn(field='operator', title=\"CPU operator\"),]\n\n table = DataTable(source=source, columns=columns, width=550, height=350)\n\n text = Paragraph(text=f\"\"\"The following table shows a list of operators that ran on the CPUs.\n The most expensive operator on the CPUs was \"{event}\" with {int(perc)} %.\"\"\")\n\n plot = create_piechart(report['Details']['CPU'],\n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n )\n\n show(column(text, row(table, plot)))\n", "_____no_output_____" ], [ "pd.set_option('display.float_format', lambda x: '%.2f' % x)\nrows = [] \nvalues = []\nif report:\n if 'GPU_total' in report['Details']:\n display(Markdown(f\"\"\"#### Overview: GPU operators\"\"\"))\n event = max(report['Details']['GPU'], key=report['Details']['GPU'].get)\n perc = report['Details']['GPU'][event]\n\n for function in report['Details']['GPU']:\n percentage = round(report['Details']['GPU'][function],2)\n time = report['Details']['GPU_total'][function] \n rows.append([percentage, time, function])\n\n df = pd.DataFrame(rows) \n df.columns = ['percentage', 'time', 'operator']\n\n df = df.sort_values(by=['percentage'], ascending=False)\n source = ColumnDataSource(data=df)\n columns = [TableColumn(field='percentage', title=\"Percentage\"),\n TableColumn(field='time', title=\"Cumulative time in microseconds\"),\n TableColumn(field='operator', title=\"GPU operator\"),]\n table = DataTable(source=source, columns=columns, width=450, height=350)\n\n text = Paragraph(text=f\"\"\"The following table shows a list of operators that your training job ran on GPU.\n The most expensive operator on GPU was \"{event}\" with {int(perc)} %\"\"\")\n\n plot = create_piechart(report['Details']['GPU'],\n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n )\n\n show(column(text, row(table, plot)))", "_____no_output_____" ] ], [ [ "## Rules summary", "_____no_output_____" ] ], [ [ "description = {}\ndescription['CPUBottleneck'] = 'Checks if the CPU utilization is high and the GPU utilization is low. \\\nIt might indicate CPU bottlenecks, where the GPUs are waiting for data to arrive \\\nfrom the CPUs. The rule evaluates the CPU and GPU utilization rates, and triggers the issue \\\nif the time spent on the CPU bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.'\ndescription['IOBottleneck'] = 'Checks if the data I/O wait time is high and the GPU utilization is low. \\\nIt might indicate IO bottlenecks where GPU is waiting for data to arrive from storage. \\\nThe rule evaluates the I/O and GPU utilization rates and triggers the issue \\\nif the time spent on the IO bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.'\ndescription['Dataloader'] = 'Checks how many data loaders are running in parallel and whether the total number is equal the number \\\nof available CPU cores. The rule triggers if number is much smaller or larger than the number of available cores. \\\nIf too small, it might lead to low GPU utilization. If too large, it might impact other compute intensive operations on CPU.'\ndescription['GPUMemoryIncrease'] = 'Measures the average GPU memory footprint and triggers if there is a large increase.'\ndescription['BatchSize'] = 'Checks if GPUs are underutilized because the batch size is too small. \\\nTo detect this problem, the rule analyzes the average GPU memory footprint, \\\nthe CPU and the GPU utilization. '\ndescription['LowGPUUtilization'] = 'Checks if the GPU utilization is low or fluctuating. \\\nThis can happen due to bottlenecks, blocking calls for synchronizations, \\\nor a small batch size.'\ndescription['MaxInitializationTime'] = 'Checks if the time spent on initialization exceeds a threshold percent of the total training time. \\\nThe rule waits until the first step of training loop starts. The initialization can take longer \\\nif downloading the entire dataset from Amazon S3 in File mode. The default threshold is 20 minutes.'\ndescription['LoadBalancing'] = 'Detects workload balancing issues across GPUs. \\\nWorkload imbalance can occur in training jobs with data parallelism. \\\nThe gradients are accumulated on a primary GPU, and this GPU might be overused \\\nwith regard to other GPUs, resulting in reducing the efficiency of data parallelization.'\ndescription['StepOutlier'] = 'Detects outliers in step duration. The step duration for forward and backward pass should be \\\nroughly the same throughout the training. If there are significant outliers, \\\nit may indicate a system stall or bottleneck issues.'", "_____no_output_____" ], [ "recommendation = {}\nrecommendation['CPUBottleneck'] = 'Consider increasing the number of data loaders \\\nor applying data pre-fetching.'\nrecommendation['IOBottleneck'] = 'Pre-fetch data or choose different file formats, such as binary formats that \\\nimprove I/O performance.'\nrecommendation['Dataloader'] = 'Change the number of data loader processes.'\nrecommendation['GPUMemoryIncrease'] = 'Choose a larger instance type with more memory if footprint is close to maximum available memory.'\nrecommendation['BatchSize'] = 'The batch size is too small, and GPUs are underutilized. Consider running on a smaller instance type or increasing the batch size.'\nrecommendation['LowGPUUtilization'] = 'Check if there are bottlenecks, minimize blocking calls, \\\nchange distributed training strategy, or increase the batch size.'\nrecommendation['MaxInitializationTime'] = 'Initialization takes too long. \\\nIf using File mode, consider switching to Pipe mode in case you are using TensorFlow framework.'\nrecommendation['LoadBalancing'] = 'Choose a different distributed training strategy or \\\na different distributed training framework.'\nrecommendation['StepOutlier'] = 'Check if there are any bottlenecks (CPU, I/O) correlated to the step outliers.'", "_____no_output_____" ], [ "files = glob.glob('/opt/ml/processing/output/rule/profiler-output/profiler-reports/*json')\nsummary = {}\nfor i in files:\n rule_name = i.split('/')[-1].replace('.json','')\n if rule_name == \"OverallSystemUsage\" or rule_name == \"OverallFrameworkMetrics\":\n continue\n rule_report = json.load(open(i))\n summary[rule_name] = {}\n summary[rule_name]['Description'] = description[rule_name]\n summary[rule_name]['Recommendation'] = recommendation[rule_name]\n summary[rule_name]['Number of times rule triggered'] = rule_report['RuleTriggered'] \n #summary[rule_name]['Number of violations'] = rule_report['Violations'] \n summary[rule_name]['Number of datapoints'] = rule_report['Datapoints']\n summary[rule_name]['Rule parameters'] = rule_report['RuleParameters']\n\ndf = pd.DataFrame.from_dict(summary, orient='index')\ndf = df.sort_values(by=['Number of times rule triggered'], ascending=False)\n\n\ndisplay(Markdown(f\"\"\"The following table shows a profiling summary of the Debugger built-in rules. \nThe table is sorted by the rules that triggered the most frequently. During your training job, the {df.index[0]} rule\nwas the most frequently triggered. It processed {df.values[0,3]} datapoints and was triggered {df.values[0,2]} times.\"\"\"))\n\nwith pd.option_context('display.colheader_justify','left'): \n pretty_print(df)", "_____no_output_____" ], [ "analyse_phase = \"training\"\nif job_statistics and \"initialization_in_seconds\" in job_statistics:\n if job_statistics[\"initialization_in_seconds\"] > job_statistics[\"training_loop_duration_in_seconds\"]:\n analyse_phase = \"initialization\"\n time = job_statistics[\"initialization_in_seconds\"]\n perc = job_statistics[\"initialization_%\"]\n display(Markdown(f\"\"\"The initialization phase took {int(time)} seconds, which is {int(perc)}%*\n of the total training time. Since the training loop has taken the most time, \n we dive deep into the events occurring during this phase\"\"\"))\n display(Markdown(\"\"\"## Analyzing initialization\\n\\n\"\"\"))\n time = job_statistics[\"training_loop_duration_in_seconds\"]\n perc = job_statistics[\"training_loop_%\"]\n display(Markdown(f\"\"\"The training loop lasted for {int(time)} seconds which is {int(perc)}% of the training job time.\n Since the training loop has taken the most time, we dive deep into the events occured during this phase.\"\"\"))\nif analyse_phase == 'training':\n display(Markdown(\"\"\"## Analyzing the training loop\\n\\n\"\"\"))", "_____no_output_____" ], [ "if analyse_phase == \"initialization\":\n display(Markdown(\"\"\"### MaxInitializationTime\\n\\nThis rule helps to detect if the training initialization is taking too much time. \\nThe rule waits until first step is available. The rule takes the parameter `threshold` that defines how many minutes to wait for the first step to become available. Default is 20 minutes.\\nYou can run the rule locally in the following way:\n \"\"\"))\n \n _ = load_report(\"MaxInitializationTime\")", "_____no_output_____" ], [ "if analyse_phase == \"training\":\n display(Markdown(\"\"\"### Step duration analysis\"\"\"))\n report = load_report('StepOutlier')\n if report:\n parameters = report['RuleParameters']\n params = report['RuleParameters'].split('\\n')\n stddev = params[3].split(':')[1]\n mode = params[1].split(':')[1]\n n_outlier = params[2].split(':')[1]\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n\n text = f\"\"\"The StepOutlier rule measures step durations and checks for outliers. The rule \n returns True if duration is larger than {stddev} times the standard deviation. The rule \n also takes the parameter mode, that specifies whether steps from training or validation phase \n should be checked. In your processing job mode was specified as {mode}. \n Typically the first step is taking significantly more time and to avoid the \n rule triggering immediately, one can use n_outliers to specify the number of outliers to ignore. \n n_outliers was set to {n_outlier}.\n The rule analysed {datapoints} datapoints and triggered {triggered} times.\n \"\"\"\n\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph))\n\n if report and len(report['Details']['step_details']) > 0:\n for node_id in report['Details']['step_details']:\n tmp = report['RuleParameters'].split('threshold:')\n threshold = tmp[1].split('\\n')[0]\n n_outliers = report['Details']['step_details'][node_id]['number_of_outliers']\n mean = report['Details']['step_details'][node_id]['step_stats']['mean']\n stddev = report['Details']['step_details'][node_id]['stddev']\n phase = report['Details']['step_details'][node_id]['phase']\n display(Markdown(f\"\"\"**Step durations on node {node_id}:**\"\"\"))\n display(Markdown(f\"\"\"The following table is a summary of the statistics of step durations measured on node {node_id}.\n The rule has analyzed the step duration from {phase} phase.\n The average step duration on node {node_id} was {round(mean, 2)}s. \n The rule detected {n_outliers} outliers, where step duration was larger than {threshold} times the standard deviation of {stddev}s\n \\n\"\"\"))\n step_stats_df = pd.DataFrame.from_dict(report['Details']['step_details'][node_id]['step_stats'], orient='index').T\n step_stats_df.index = ['Step Durations in [s]']\n pretty_print(step_stats_df)\n\n display(Markdown(f\"\"\"The following histogram shows the step durations measured on the different nodes. \n You can turn on or turn off the visualization of histograms by selecting or unselecting the labels in the legend.\"\"\"))\n\n plot = figure(plot_height=450, \n plot_width=850, \n title=f\"\"\"Step durations\"\"\") \n\n colors = bokeh.palettes.viridis(len(report['Details']['step_details']))\n\n for index, node_id in enumerate(report['Details']['step_details']):\n probs = report['Details']['step_details'][node_id]['probs']\n binedges = report['Details']['step_details'][node_id]['binedges']\n\n plot.quad( top=probs,\n bottom=0,\n left=binedges[:-1],\n right=binedges[1:],\n line_color=\"white\",\n fill_color=colors[index],\n fill_alpha=0.7,\n legend=node_id)\n\n plot.add_layout(Legend(), 'right') \n plot.y_range.start = 0\n plot.xaxis.axis_label = f\"\"\"Step durations in [s]\"\"\"\n plot.yaxis.axis_label = \"Occurrences\"\n plot.grid.grid_line_color = \"white\"\n plot.legend.click_policy=\"hide\"\n plot.legend.location = 'center_right'\n show(plot)\n\n if report['RuleTriggered'] > 0:\n\n text=f\"\"\"To get a better understanding of what may have caused those outliers,\n we correlate the timestamps of step outliers with other framework metrics that happened at the same time.\n The left chart shows how much time was spent in the different framework\n metrics aggregated by event phase. The chart on the right shows the histogram of normal step durations (without\n outliers). The following chart shows how much time was spent in the different \n framework metrics when step outliers occurred. In this chart framework metrics are not aggregated byphase.\"\"\"\n plots = []\n if 'phase' in report['Details']:\n text = f\"\"\"{text} The chart (in the middle) shows whether step outliers mainly happened during TRAIN or EVAL phase.\n \"\"\"\n\n plot = create_piechart(report['Details']['phase'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between the time spent on the TRAIN/EVAL phase\")\n plots.append(plot)\n\n if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0:\n\n event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)\n perc = report['Details']['forward_backward'][event]\n\n text = f\"\"\"{text} The pie chart on the right shows a detailed breakdown. \n It shows that {int(perc)}% of the training time was spent on event \"{event}\".\"\"\"\n\n plot = create_piechart(report['Details']['forward_backward'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The Ratio between forward and backward pass\") \n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plots)))\n\n plots = []\n text = \"\"\n if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:\n\n key = list(report['Details']['ratio'].keys())[0]\n ratio = report['Details']['ratio'][key]\n\n text = f\"\"\"The following pie chart shows a breakdown of the CPU/GPU operators executed during the step outliers. \n It shows that {int(ratio)}% of the training time was spent on executing operators in \"{key}\".\"\"\"\n\n plot = create_piechart(report['Details']['ratio'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between CPU/GPU operators\")\n plots.append(plot)\n\n\n if 'general' in report['Details'] and len(report['Details']['general']) > 0:\n\n event = max(report['Details']['general'], key=report['Details']['general'].get)\n perc = report['Details']['general'][event]\n\n plot = create_piechart(report['Details']['general'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"General metrics recorded in framework \")\n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plots)))\n\n plots = []\n text = \"\"\n if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0:\n\n event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)\n perc = report['Details']['horovod'][event]\n text = f\"\"\"The following pie chart shows a detailed breakdown of the Horovod metrics that have been\n recorded when step outliers happened. The most expensive function was {event} with {int(perc)}%\"\"\"\n\n plot = create_piechart(report['Details']['horovod'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"General metrics recorded in framework \")\n\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plot))) ", "_____no_output_____" ], [ "if analyse_phase == \"training\":\n display(Markdown(\"\"\"### GPU utilization analysis\\n\\n\"\"\"))\n display(Markdown(\"\"\"**Usage per GPU** \\n\\n\"\"\"))\n report = load_report('LowGPUUtilization')\n if report:\n params = report['RuleParameters'].split('\\n')\n threshold_p95 = params[0].split(':')[1]\n threshold_p5 = params[1].split(':')[1]\n window = params[2].split(':')[1]\n patience = params[3].split(':')[1]\n violations = report['Violations']\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n \n text=Paragraph(text=f\"\"\"The LowGPUUtilization rule checks for a low and fluctuating GPU usage. If the GPU usage is \n consistently low, it might be caused by bottlenecks or a small batch size. If usage is heavily \n fluctuating, it can be due to bottlenecks or blocking calls. The rule computed the 95th and 5th \n percentile of GPU utilization on {window} continuous datapoints and found {violations} cases where \n p95 was above {threshold_p95}% and p5 was below {threshold_p5}%. If p95 is high and p5 is low,\n it might indicate that the GPU usage is highly fluctuating. If both values are very low, \n it would mean that the machine is underutilized. During initialization, the GPU usage is likely zero, \n so the rule skipped the first {patience} data points.\n The rule analysed {datapoints} datapoints and triggered {triggered} times.\"\"\", width=800)\n show(text)\n\n \n if len(report['Details']) > 0:\n \n timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp'])\n date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n text = Paragraph(text=f\"\"\"Your training job is underutilizing the instance. You may want to consider\n to either switch to a smaller instance type or to increase the batch size. \n The last time that the LowGPUUtilization rule was triggered in your training job was on {day} at {hour}.\n The following boxplots are a snapshot from the timestamps. \n They show the utilization per GPU (without outliers).\n To get a better understanding of the workloads throughout the whole training,\n you can check the workload histogram in the next section.\"\"\", width=800)\n show(text)\n \n del report['Details']['last_timestamp']\n \n for node_id in report['Details']:\n \n plot = figure(plot_height=350, \n plot_width=1000,\n toolbar_location='right',\n tools=\"hover,wheel_zoom,reset,pan\", \n title=f\"Node {node_id}\",\n x_range=(0,17),\n )\n \n for index, key in enumerate(report['Details'][node_id]):\n display(Markdown(f\"\"\"**GPU utilization of {key} on node {node_id}:**\"\"\"))\n text = \"\"\n gpu_max = report['Details'][node_id][key]['gpu_max']\n p_95 = report['Details'][node_id][key]['gpu_95']\n p_5 = report['Details'][node_id][key]['gpu_5']\n text = f\"\"\"{text} The max utilization of {key} on node {node_id} was {gpu_max}%\"\"\"\n if p_95 < int(threshold_p95): \n text = f\"\"\"{text} and the 95th percentile was only {p_95}%. \n {key} on node {node_id} is underutilized\"\"\"\n if p_5 < int(threshold_p5): \n text = f\"\"\"{text} and the 5th percentile was only {p_5}%\"\"\"\n if p_95 - p_5 > 50:\n text = f\"\"\"{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite \n significant, which means that utilization on {key} is fluctuating quite a lot.\\n\"\"\"\n \n upper = report['Details'][node_id][key]['upper']\n lower = report['Details'][node_id][key]['lower']\n p75 = report['Details'][node_id][key]['p75']\n p25 = report['Details'][node_id][key]['p25']\n p50 = report['Details'][node_id][key]['p50']\n\n plot.segment(index+1, upper, index+1, p75, line_color=\"black\")\n plot.segment(index+1, lower, index+1, p25, line_color=\"black\")\n\n plot.vbar(index+1, 0.7, p50, p75, fill_color=\"#FDE725\", line_color=\"black\")\n plot.vbar(index+1, 0.7, p25, p50, fill_color=\"#440154\", line_color=\"black\")\n\n plot.rect(index+1, lower, 0.2, 0.01, line_color=\"black\")\n plot.rect(index+1, upper, 0.2, 0.01, line_color=\"black\")\n\n plot.xaxis.major_label_overrides[index+1] = key\n plot.xgrid.grid_line_color = None\n plot.ygrid.grid_line_color = \"white\"\n plot.grid.grid_line_width = 0\n\n plot.xaxis.major_label_text_font_size=\"10px\"\n text=Paragraph(text=f\"\"\"{text}\"\"\", width=900)\n show(text)\n plot.yaxis.axis_label = \"Utilization in %\"\n plot.xaxis.ticker = np.arange(index+2)\n \n show(plot)", "_____no_output_____" ], [ " \nif analyse_phase == \"training\": \n display(Markdown(\"\"\"**Workload balancing**\\n\\n\"\"\")) \n report = load_report('LoadBalancing')\n if report:\n params = report['RuleParameters'].split('\\n')\n threshold = params[0].split(':')[1]\n patience = params[1].split(':')[1]\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n \n paragraph = Paragraph(text=f\"\"\"The LoadBalancing rule helps to detect issues in workload balancing \n between multiple GPUs. \n It computes a histogram of GPU utilization values for each GPU and compares then the \n similarity between histograms. The rule checked if the distance of histograms is larger than the \n threshold of {threshold}.\n During initialization utilization is likely zero, so the rule skipped the first {patience} data points.\n \"\"\", width=900)\n show(paragraph)\n \n if len(report['Details']) > 0:\n for node_id in report['Details']: \n \n \n text = f\"\"\"The following histogram shows the workload per GPU on node {node_id}. \n You can enable/disable the visualization of a workload by clicking on the label in the legend.\n \"\"\"\n if len(report['Details']) == 1 and len(report['Details'][node_id]['workloads']) == 1:\n text = f\"\"\"{text} Your training job only used one GPU so there is no workload balancing issue.\"\"\"\n \n plot = figure(plot_height=450, \n plot_width=850, \n x_range=(-1,100),\n title=f\"\"\"Workloads on node {node_id}\"\"\")\n \n colors = bokeh.palettes.viridis(len(report['Details'][node_id]['workloads']))\n \n for index, gpu_id2 in enumerate(report['Details'][node_id]['workloads']):\n probs = report['Details'][node_id]['workloads'][gpu_id2]\n plot.quad( top=probs,\n bottom=0,\n left=np.arange(0,98,2),\n right=np.arange(2,100,2),\n line_color=\"white\",\n fill_color=colors[index],\n fill_alpha=0.8,\n legend=gpu_id2 )\n\n plot.y_range.start = 0\n plot.xaxis.axis_label = f\"\"\"Utilization\"\"\"\n plot.yaxis.axis_label = \"Occurrences\"\n plot.grid.grid_line_color = \"white\"\n plot.legend.click_policy=\"hide\"\n \n paragraph = Paragraph(text=text)\n show(column(paragraph, plot))\n \n if \"distances\" in report['Details'][node_id]:\n text = f\"\"\"The rule identified workload balancing issues on node {node_id} \n where workloads differed by more than threshold {threshold}. \n \"\"\"\n for index, gpu_id2 in enumerate(report['Details'][node_id]['distances']):\n for gpu_id1 in report['Details'][node_id]['distances'][gpu_id2]:\n distance = round(report['Details'][node_id]['distances'][gpu_id2][gpu_id1], 2)\n text = f\"\"\"{text} The difference of workload between {gpu_id2} and {gpu_id1} is: {distance}.\"\"\"\n\n paragraph = Paragraph(text=f\"\"\"{text}\"\"\", width=900)\n show(column(paragraph))", "_____no_output_____" ], [ "if analyse_phase == \"training\":\n display(Markdown(\"\"\"### Dataloading analysis\\n\\n\"\"\"))\n report = load_report('Dataloader')\n if report:\n params = report['RuleParameters'].split(\"\\n\")\n min_threshold = params[0].split(':')[1]\n max_threshold = params[1].split(':')[1]\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n \n text=f\"\"\"The number of dataloader workers can greatly affect the overall performance \n of your training job. The rule analyzed the number of dataloading processes that have been running in \n parallel on the training instance and compares it against the total number of cores. \n The rule checked if the number of processes is smaller than {min_threshold}% or larger than \n {max_threshold}% the total number of cores. Having too few dataloader workers can slowdown data preprocessing and lead to GPU \n underutilization. Having too many dataloader workers may hurt the\n overall performance if you are running other compute intensive tasks on the CPU.\n The rule analysed {datapoints} datapoints and triggered {triggered} times.\"\"\"\n \n paragraph = Paragraph(text=f\"{text}\", width=900)\n show(paragraph)\n text = \"\"\n if 'cores' in report['Details']:\n cores = int(report['Details']['cores'])\n dataloaders = report['Details']['dataloaders']\n if dataloaders < cores: \n text=f\"\"\"{text} Your training instance provided {cores} CPU cores, however your training job only \n ran on average {dataloaders} dataloader workers in parallel. We recommend you to increase the number of\n dataloader workers.\"\"\"\n if dataloaders > cores:\n text=f\"\"\"{text} Your training instance provided {cores} CPU cores, however your training job ran \n on average {dataloaders} dataloader workers. We recommed you to decrease the number of dataloader\n workers.\"\"\"\n if 'pin_memory' in report['Details'] and report['Details']['pin_memory'] == False:\n text=f\"\"\"{text} Using pinned memory also improves performance because it enables fast data transfer to CUDA-enabled GPUs.\n The rule detected that your training job was not using pinned memory. \n In case of using PyTorch Dataloader, you can enable this by setting pin_memory=True.\"\"\"\n \n if 'prefetch' in report['Details'] and report['Details']['prefetch'] == False:\n text=f\"\"\"{text} It appears that your training job did not perform any data pre-fetching. Pre-fetching can improve your\n data input pipeline as it produces the data ahead of time.\"\"\"\n paragraph = Paragraph(text=f\"{text}\", width=900)\n show(paragraph)\n \n colors=bokeh.palettes.viridis(10)\n if \"dataloading_time\" in report['Details']:\n median = round(report['Details'][\"dataloading_time\"]['p50'],4)\n p95 = round(report['Details'][\"dataloading_time\"]['p95'],4)\n p25 = round(report['Details'][\"dataloading_time\"]['p25'],4)\n binedges = report['Details'][\"dataloading_time\"]['binedges']\n probs = report['Details'][\"dataloading_time\"]['probs']\n text=f\"\"\"The following histogram shows the distribution of dataloading times that have been measured throughout your training job. The median dataloading time was {median}s. \n The 95th percentile was {p95}s and the 25th percentile was {p25}s\"\"\"\n\n plot = figure(plot_height=450, \n plot_width=850,\n toolbar_location='right',\n tools=\"hover,wheel_zoom,reset,pan\",\n x_range=(binedges[0], binedges[-1])\n )\n \n plot.quad( top=probs,\n bottom=0,\n left=binedges[:-1],\n right=binedges[1:],\n line_color=\"white\",\n fill_color=colors[0],\n fill_alpha=0.8,\n legend=\"Dataloading events\" )\n\n plot.y_range.start = 0\n plot.xaxis.axis_label = f\"\"\"Dataloading in [s]\"\"\"\n plot.yaxis.axis_label = \"Occurrences\"\n plot.grid.grid_line_color = \"white\"\n plot.legend.click_policy=\"hide\"\n\n paragraph = Paragraph(text=f\"{text}\", width=900)\n show(column(paragraph, plot))", "_____no_output_____" ], [ "if analyse_phase == \"training\":\n display(Markdown(\"\"\" ### Batch size\"\"\"))\n report = load_report('BatchSize')\n if report:\n params = report['RuleParameters'].split('\\n')\n cpu_threshold_p95 = int(params[0].split(':')[1])\n gpu_threshold_p95 = int(params[1].split(':')[1])\n gpu_memory_threshold_p95 = int(params[2].split(':')[1])\n patience = int(params[3].split(':')[1])\n window = int(params[4].split(':')[1])\n violations = report['Violations']\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n \n text = Paragraph(text=f\"\"\"The BatchSize rule helps to detect if GPU is underutilized because of the batch size being \n too small. To detect this the rule analyzes the GPU memory footprint, CPU and GPU utilization. The rule checked if the 95th percentile of CPU utilization is below cpu_threshold_p95 of \n {cpu_threshold_p95}%, the 95th percentile of GPU utilization is below gpu_threshold_p95 of {gpu_threshold_p95}% and the 95th percentile of memory footprint \\\n below gpu_memory_threshold_p95 of {gpu_memory_threshold_p95}%. In your training job this happened {violations} times. \\\n The rule skipped the first {patience} datapoints. The rule computed the percentiles over window size of {window} continuous datapoints.\\n\n The rule analysed {datapoints} datapoints and triggered {triggered} times.\n \"\"\", width=800)\n show(text)\n if len(report['Details']) >0: \n timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp'])\n date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n del report['Details']['last_timestamp']\n text = Paragraph(text=f\"\"\"Your training job is underutilizing the instance. You may want to consider\n either switch to a smaller instance type or to increase the batch size. \n The last time the BatchSize rule triggered in your training job was on {day} at {hour}.\n The following boxplots are a snapshot from the timestamps. They the total \n CPU utilization, the GPU utilization, and the GPU memory usage per GPU (without outliers).\"\"\", \n width=800)\n show(text)\n\n for node_id in report['Details']:\n xmax = max(20, len(report['Details'][node_id]))\n \n plot = figure(plot_height=350, \n plot_width=1000,\n toolbar_location='right',\n tools=\"hover,wheel_zoom,reset,pan\", \n title=f\"Node {node_id}\",\n x_range=(0,xmax)\n )\n \n for index, key in enumerate(report['Details'][node_id]):\n upper = report['Details'][node_id][key]['upper']\n lower = report['Details'][node_id][key]['lower']\n p75 = report['Details'][node_id][key]['p75']\n p25 = report['Details'][node_id][key]['p25']\n p50 = report['Details'][node_id][key]['p50']\n\n plot.segment(index+1, upper, index+1, p75, line_color=\"black\")\n plot.segment(index+1, lower, index+1, p25, line_color=\"black\")\n\n plot.vbar(index+1, 0.7, p50, p75, fill_color=\"#FDE725\", line_color=\"black\")\n plot.vbar(index+1, 0.7, p25, p50, fill_color=\"#440154\", line_color=\"black\")\n\n plot.rect(index+1, lower, 0.2, 0.01, line_color=\"black\")\n plot.rect(index+1, upper, 0.2, 0.01, line_color=\"black\")\n\n plot.xaxis.major_label_overrides[index+1] = key\n plot.xgrid.grid_line_color = None\n plot.ygrid.grid_line_color = \"white\"\n plot.grid.grid_line_width = 0\n\n plot.xaxis.major_label_text_font_size=\"10px\"\n plot.xaxis.ticker = np.arange(index+2)\n plot.yaxis.axis_label = \"Utilization in %\"\n show(plot)", "_____no_output_____" ], [ "if analyse_phase == \"training\": \n display(Markdown(\"\"\"### CPU bottlenecks\\n\\n\"\"\"))\n\n report = load_report('CPUBottleneck')\n if report:\n params = report['RuleParameters'].split('\\n')\n threshold = int(params[0].split(':')[1])\n cpu_threshold = int(params[1].split(':')[1])\n gpu_threshold = int(params[2].split(':')[1])\n patience = int(params[3].split(':')[1])\n violations = report['Violations']\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n \n if report['Violations'] > 0:\n perc = int(report['Violations']/report['Datapoints']*100)\n else:\n perc = 0\n if perc < threshold:\n string = 'below'\n else:\n string = 'above'\n text = f\"\"\"The CPUBottleneck rule checked when the CPU utilization was above cpu_threshold of {cpu_threshold}% \n and GPU utilization was below gpu_threshold of {gpu_threshold}%. \n During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints.\n With this configuration the rule found {violations} CPU bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}%\n The rule analysed {datapoints} data points and triggered {triggered} times.\"\"\"\n \n paragraph = Paragraph(text=text, width=900)\n show(paragraph)\n if report:\n\n plots = []\n text = \"\"\n if report['RuleTriggered'] > 0:\n\n low_gpu = report['Details']['low_gpu_utilization']\n cpu_bottleneck = {}\n cpu_bottleneck[\"GPU usage above threshold\"] = report[\"Datapoints\"] - report[\"Details\"][\"low_gpu_utilization\"]\n cpu_bottleneck[\"GPU usage below threshold\"] = report[\"Details\"][\"low_gpu_utilization\"] - len(report[\"Details\"])\n cpu_bottleneck[\"Low GPU usage due to CPU bottlenecks\"] = len(report[\"Details\"][\"bottlenecks\"])\n\n n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2)\n text = f\"\"\"The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}%\n and how many of those datapoints were likely caused by a CPU bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization \n below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by CPU bottlenecks. \n \"\"\"\n\n plot = create_piechart(cpu_bottleneck, \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"Low GPU usage caused by CPU bottlenecks\")\n\n plots.append(plot)\n\n if 'phase' in report['Details']:\n text = f\"\"\"{text} The chart (in the middle) shows whether CPU bottlenecks mainly \n happened during train/validation phase.\n \"\"\"\n\n plot = create_piechart(report['Details']['phase'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between time spent on TRAIN/EVAL phase\")\n plots.append(plot)\n\n if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0:\n\n event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)\n perc = report['Details']['forward_backward'][event]\n\n text = f\"\"\"{text} The pie charts on the right shows a more detailed breakdown. \n It shows that {int(perc)}% of the training time was spent on event {event}\"\"\"\n\n plot = create_piechart(report['Details']['forward_backward'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between forward and backward pass\") \n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plots)))\n\n plots = []\n text = \"\"\n if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:\n\n key = list(report['Details']['ratio'].keys())[0]\n ratio = report['Details']['ratio'][key]\n\n text = f\"\"\"The following pie chart shows a breakdown of the CPU/GPU operators that happened during CPU bottlenecks. \n It shows that {int(ratio)}% of the training time was spent on executing operators in \"{key}\".\"\"\"\n\n plot = create_piechart(report['Details']['ratio'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between CPU/GPU operators\")\n plots.append(plot)\n\n\n if 'general' in report['Details'] and len(report['Details']['general']) > 0:\n\n event = max(report['Details']['general'], key=report['Details']['general'].get)\n perc = report['Details']['general'][event]\n \n plot = create_piechart(report['Details']['general'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"General metrics recorded in framework \")\n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plots)))\n\n plots = []\n text = \"\"\n if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0:\n\n event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)\n perc = report['Details']['horovod'][event]\n text = f\"\"\"The following pie chart shows a detailed breakdown of the Horovod metrics \n that have been recorded when the CPU bottleneck happened. The most expensive function was \n {event} with {int(perc)}%\"\"\"\n\n plot = create_piechart(report['Details']['horovod'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"General metrics recorded in framework \")\n\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plot)))", "_____no_output_____" ], [ "if analyse_phase == \"training\": \n display(Markdown(\"\"\"### I/O bottlenecks\\n\\n\"\"\"))\n\n report = load_report('IOBottleneck')\n if report:\n params = report['RuleParameters'].split('\\n')\n threshold = int(params[0].split(':')[1])\n io_threshold = int(params[1].split(':')[1])\n gpu_threshold = int(params[2].split(':')[1])\n patience = int(params[3].split(':')[1])\n violations = report['Violations']\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n \n if report['Violations'] > 0:\n perc = int(report['Violations']/report['Datapoints']*100)\n else:\n perc = 0\n if perc < threshold:\n string = 'below'\n else:\n string = 'above'\n text = f\"\"\"The IOBottleneck rule checked when I/O wait time was above io_threshold of {io_threshold}% \n and GPU utilization was below gpu_threshold of {gpu_threshold}. During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints. \n With this configuration the rule found {violations} I/O bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}%.\n The rule analysed {datapoints} datapoints and triggered {triggered} times.\"\"\"\n paragraph = Paragraph(text=text, width=900)\n show(paragraph)\n \n if report:\n\n plots = []\n text = \"\"\n if report['RuleTriggered'] > 0:\n\n low_gpu = report['Details']['low_gpu_utilization']\n cpu_bottleneck = {}\n cpu_bottleneck[\"GPU usage above threshold\"] = report[\"Datapoints\"] - report[\"Details\"][\"low_gpu_utilization\"]\n cpu_bottleneck[\"GPU usage below threshold\"] = report[\"Details\"][\"low_gpu_utilization\"] - len(report[\"Details\"])\n cpu_bottleneck[\"Low GPU usage due to I/O bottlenecks\"] = len(report[\"Details\"][\"bottlenecks\"])\n\n n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2)\n text = f\"\"\"The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}%\n and how many of those datapoints were likely caused by a I/O bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization \n below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by I/O bottlenecks. \n \"\"\"\n\n plot = create_piechart(cpu_bottleneck, \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"Low GPU usage caused by I/O bottlenecks\")\n\n plots.append(plot)\n\n if 'phase' in report['Details']:\n text = f\"\"\"{text} The chart (in the middle) shows whether I/O bottlenecks mainly happened during trianing or validation phase.\n \"\"\"\n\n plot = create_piechart(report['Details']['phase'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between the time spent on the TRAIN/EVAL phase\")\n plots.append(plot)\n\n if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0:\n\n event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)\n perc = report['Details']['forward_backward'][event]\n\n text = f\"\"\"{text} The pie charts on the right shows a more detailed breakdown. \n It shows that {int(perc)}% of the training time was spent on event \"{event}\".\"\"\"\n\n plot = create_piechart(report['Details']['forward_backward'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"The ratio between forward and backward pass\") \n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plots)))\n\n plots = []\n text = \"\"\n if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:\n\n key = list(report['Details']['ratio'].keys())[0]\n ratio = report['Details']['ratio'][key]\n\n text = f\"\"\"The following pie chart shows a breakdown of the CPU/GPU operators that happened \n during I/O bottlenecks. It shows that {int(ratio)}% of the training time was spent on executing operators in \"{key}\".\"\"\"\n\n plot = create_piechart(report['Details']['ratio'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"Ratio between CPU/GPU operators\")\n plots.append(plot)\n\n\n if 'general' in report['Details'] and len(report['Details']['general']) > 0:\n\n event = max(report['Details']['general'], key=report['Details']['general'].get)\n perc = report['Details']['general'][event]\n\n plot = create_piechart(report['Details']['general'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"General metrics recorded in framework \")\n plots.append(plot)\n\n if len(plots) > 0:\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plots)))\n\n plots = []\n text = \"\"\n if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0:\n\n event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)\n perc = report['Details']['horovod'][event]\n text = f\"\"\"The following pie chart shows a detailed breakdown of the Horovod metrics that have been\n recorded when I/O bottleneck happened. The most expensive function was {event} with {int(perc)}%\"\"\"\n\n plot = create_piechart(report['Details']['horovod'], \n height=350,\n width=600,\n x1=0.2,\n x2=0.6,\n radius=0.3, \n title=\"General metrics recorded in framework \")\n\n paragraph = Paragraph(text=text, width=900)\n show(column(paragraph, row(plot))) \n", "_____no_output_____" ], [ "if analyse_phase == \"training\":\n display(Markdown(\"\"\"### GPU memory\\n\\n\"\"\"))\n \n report = load_report('GPUMemoryIncrease')\n if report:\n params = report['RuleParameters'].split('\\n')\n increase = float(params[0].split(':')[1])\n patience = params[1].split(':')[1]\n window = params[2].split(':')[1]\n violations = report['Violations']\n triggered = report['RuleTriggered']\n datapoints = report['Datapoints']\n \n text=Paragraph(text=f\"\"\"The GPUMemoryIncrease rule helps to detect large increase in memory usage on GPUs. \n The rule checked if the moving average of memory increased by more than {increase}%. \n So if the moving average increased for instance from 10% to {11+increase}%, \n the rule would have triggered. During initialization utilization is likely 0, so the rule skipped the first {patience} datapoints.\n The moving average was computed on a window size of {window} continuous datapoints. The rule detected {violations} violations\n where the moving average between previous and current time window increased by more than {increase}%.\n The rule analysed {datapoints} datapoints and triggered {triggered} times.\"\"\",\n width=900)\n show(text)\n\n if len(report['Details']) > 0:\n \n timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp'])\n date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f')\n day = date.date().strftime(\"%m/%d/%Y\")\n hour = date.time().strftime(\"%H:%M:%S\")\n text = Paragraph(text=f\"\"\"Your training job triggered memory spikes. \n The last time the GPUMemoryIncrease rule triggered in your training job was on {day} at {hour}.\n The following boxplots are a snapshot from the timestamps. They show for each node and GPU the corresponding\n memory utilization (without outliers).\"\"\", width=900)\n show(text)\n \n del report['Details']['last_timestamp']\n \n for node_id in report['Details']:\n \n plot = figure(plot_height=350, \n plot_width=1000,\n toolbar_location='right',\n tools=\"hover,wheel_zoom,reset,pan\", \n title=f\"Node {node_id}\",\n x_range=(0,17),\n )\n\n for index, key in enumerate(report['Details'][node_id]):\n display(Markdown(f\"\"\"**Memory utilization of {key} on node {node_id}:**\"\"\"))\n text = \"\"\n gpu_max = report['Details'][node_id][key]['gpu_max']\n text = f\"\"\"{text} The max memory utilization of {key} on node {node_id} was {gpu_max}%.\"\"\"\n \n p_95 = int(report['Details'][node_id][key]['p95'])\n p_5 = report['Details'][node_id][key]['p05']\n if p_95 < int(50): \n text = f\"\"\"{text} The 95th percentile was only {p_95}%.\"\"\"\n if p_5 < int(5): \n text = f\"\"\"{text} The 5th percentile was only {p_5}%.\"\"\"\n if p_95 - p_5 > 50:\n text = f\"\"\"{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite \n significant, which means that memory utilization on {key} is fluctuating quite a lot.\"\"\"\n \n text = Paragraph(text=f\"\"\"{text}\"\"\", width=900)\n show(text)\n \n upper = report['Details'][node_id][key]['upper']\n lower = report['Details'][node_id][key]['lower']\n p75 = report['Details'][node_id][key]['p75']\n p25 = report['Details'][node_id][key]['p25']\n p50 = report['Details'][node_id][key]['p50']\n\n plot.segment(index+1, upper, index+1, p75, line_color=\"black\")\n plot.segment(index+1, lower, index+1, p25, line_color=\"black\")\n\n plot.vbar(index+1, 0.7, p50, p75, fill_color=\"#FDE725\", line_color=\"black\")\n plot.vbar(index+1, 0.7, p25, p50, fill_color=\"#440154\", line_color=\"black\")\n\n plot.rect(index+1, lower, 0.2, 0.01, line_color=\"black\")\n plot.rect(index+1, upper, 0.2, 0.01, line_color=\"black\")\n\n plot.xaxis.major_label_overrides[index+1] = key\n plot.xgrid.grid_line_color = None\n plot.ygrid.grid_line_color = \"white\"\n plot.grid.grid_line_width = 0\n\n plot.xaxis.major_label_text_font_size=\"10px\"\n plot.xaxis.ticker = np.arange(index+2)\n plot.yaxis.axis_label = \"Utilization in %\"\n show(plot)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6e72e0db1d16c1b37ca9dcfbc3c49858bfba54
3,911
ipynb
Jupyter Notebook
notebook/hacking_force.ipynb
LinuxSuRen/open-digger
d7c57fab23cd454c3a2475aadff13a4789f3fcce
[ "Apache-2.0", "CC-BY-4.0" ]
16
2020-08-19T01:45:54.000Z
2020-09-14T02:34:59.000Z
notebook/hacking_force.ipynb
LinuxSuRen/open-digger
d7c57fab23cd454c3a2475aadff13a4789f3fcce
[ "Apache-2.0", "CC-BY-4.0" ]
61
2020-08-19T02:16:46.000Z
2020-09-14T05:19:00.000Z
notebook/hacking_force.ipynb
LinuxSuRen/open-digger
d7c57fab23cd454c3a2475aadff13a4789f3fcce
[ "Apache-2.0", "CC-BY-4.0" ]
6
2020-08-18T11:06:40.000Z
2020-09-12T03:48:42.000Z
30.084615
197
0.468934
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a6e7324a04273199a08af53624422cabe5f243f
74,855
ipynb
Jupyter Notebook
cratchpad-ch8.ipynb
gilmar-lima/pydata-book
a1eb5e08448b57f7afbc9283042a9b3241942e6a
[ "MIT" ]
null
null
null
cratchpad-ch8.ipynb
gilmar-lima/pydata-book
a1eb5e08448b57f7afbc9283042a9b3241942e6a
[ "MIT" ]
null
null
null
cratchpad-ch8.ipynb
gilmar-lima/pydata-book
a1eb5e08448b57f7afbc9283042a9b3241942e6a
[ "MIT" ]
null
null
null
25.539065
252
0.296654
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "data = pd.Series(np.random.randn(9),\n index=[['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'd'],\n [1, 2, 3, 1, 3, 1, 2, 2, 3]])\ndata", "_____no_output_____" ], [ "data.index", "_____no_output_____" ], [ "data['b']", "_____no_output_____" ], [ "data['b':'c']", "_____no_output_____" ], [ "data.loc[['b','d']]", "_____no_output_____" ], [ "data.loc[:,2]", "_____no_output_____" ], [ "data.unstack()", "_____no_output_____" ], [ "data.unstack().stack()", "_____no_output_____" ], [ "frame = pd.DataFrame(np.arange(12).reshape((4,3)), index=[['a','a','b','b'],[1,2,1,2]], columns=[['Ohio', 'Ohio', 'Colorado'],['Green','Red','Green']])\nframe", "_____no_output_____" ], [ "frame.index.names=['key1', 'key2']\nframe.columns.names = ['state','color']\nframe", "_____no_output_____" ], [ "frame['Ohio']", "_____no_output_____" ], [ "pd.MultiIndex.from_arrays([['Ohio', 'Ohio', 'Colorado'],['Green', 'Red', 'Green']], names = ['state','color'])", "_____no_output_____" ], [ "frame.swaplevel('key1','key2')", "_____no_output_____" ], [ "frame.sort_index(level=1)", "_____no_output_____" ], [ "frame.swaplevel(0,1).sort_index(level=0)", "_____no_output_____" ], [ "frame.sum(level='key2')", "/tmp/ipykernel_169/2004046222.py:1: FutureWarning: Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n frame.sum(level='key2')\n" ], [ "frame.sum(level='color', axis=1)", "/tmp/ipykernel_169/4133796543.py:1: FutureWarning: Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n frame.sum(level='color', axis=1)\n" ], [ "frame = pd.DataFrame({'a': range(7), 'b': range(7, 0, -1),\n 'c': ['one', 'one', 'one', 'two', 'two',\n 'two', 'two'],\n 'd': [0, 1, 2, 0, 1, 2, 3]})\nframe", "_____no_output_____" ], [ "frame2 = frame.set_index(['c','d'])\nframe2", "_____no_output_____" ], [ "frame2 = frame.set_index(['c','d'], drop=False)\nframe2\n", "_____no_output_____" ] ], [ [ "Data Frame joins", "_____no_output_____" ] ], [ [ "df1 = pd.DataFrame({'key':['b','b','a','c','a','a','b'], 'data1':range(7)})\ndf2 = pd.DataFrame({'key':['a','b','d'], 'data2':range(3)})\ndf1\n", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "pd.merge(df1,df2)", "_____no_output_____" ], [ "pd.merge(df1, df2, on = 'key')", "_____no_output_____" ], [ "df3 = pd.DataFrame({'lkey':['b','b','a','c','a','a','b'], 'data1':range(7)})\ndf4 = pd.DataFrame({'rkey':['a','b','d'], 'data2':range(3)})\n\n", "_____no_output_____" ], [ "pd.merge(df3, df4, left_on='lkey', right_on='rkey')", "_____no_output_____" ], [ "pd.merge(df1, df2, on = 'key', how='outer')\n", "_____no_output_____" ], [ "df1 = pd.DataFrame({'key':['b','b','a','c','a','b'], 'data1':range(6)})\ndf2 = pd.DataFrame({'key':['a','b','a','b','d'], 'data2':range(5)})\n\n", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "pd.merge(df1,df2, on='key', how='left')", "_____no_output_____" ], [ "pd.merge(df1, df2, how='inner')", "_____no_output_____" ], [ "left = pd.DataFrame({'key1': ['foo', 'foo', 'bar'],\n 'key2': ['one', 'two', 'one'],\n 'lval': [1, 2, 3]})\nright = pd.DataFrame({'key1': ['foo', 'foo', 'bar', 'bar'],\n 'key2': ['one', 'one', 'one', 'two'],\n 'rval': [4, 5, 6, 7]})\npd.merge(left, right, on=['key1', 'key2'], how='outer')", "_____no_output_____" ], [ "pd.merge(left, right, on = 'key1')", "_____no_output_____" ], [ "pd.merge(left, right, on='key1', suffixes=('_left','_right'))", "_____no_output_____" ], [ "left1 = pd.DataFrame({'key': ['a', 'b', 'a', 'a', 'b', 'c'],\n 'value': range(6)})\nright1 = pd.DataFrame({'group_val': [3.5, 7]}, index=['a', 'b'])\n\nleft1", "_____no_output_____" ], [ "right1", "_____no_output_____" ], [ "pd.merge(left1, right1, left_on='key', right_index=True, how='outer')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6e7d78cf450c40ba137812c8bce0805b9c57ca
1,626
ipynb
Jupyter Notebook
dataset_info.ipynb
wissamjur/local-impact
afb843d1908ed831e3169bb41c27deafc4e020a5
[ "MIT" ]
null
null
null
dataset_info.ipynb
wissamjur/local-impact
afb843d1908ed831e3169bb41c27deafc4e020a5
[ "MIT" ]
null
null
null
dataset_info.ipynb
wissamjur/local-impact
afb843d1908ed831e3169bb41c27deafc4e020a5
[ "MIT" ]
null
null
null
25.809524
87
0.606396
[ [ [ "import numpy as np\nimport pandas as pd\nfrom surprise import SVD, KNNWithMeans, NMF, SlopeOne, NMF\nfrom surprise import Dataset, Reader, accuracy\nfrom util.helpers import load_train_test_surpriselib, load_dataset_explicit\nfrom util.knn import get_knn\nfrom neighborhood_eval.neighborhood_accuracy import critical_nbhds_accuracy\n\n# path to the datasets folder\ndataset_name = 'ml-latest'\ndataset_path = '../data/' + dataset_name\n\n# load the data\nratings = load_dataset_explicit(dataset_name, dataset_path, total_users=16000)\nprint('Dataset size:', len(ratings))\nprint('Total no of Users:', len(set(ratings.user_id.to_list())))", "Dataset size: 1571685\nTotal no of Users: 15999\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
4a6eace4a7598e0698418da6e2012e5e1505f91a
108,183
ipynb
Jupyter Notebook
model_training/Roberta.ipynb
anryabrahamyan/Machine_Learning_project
bb274064590a6d61f1b09d09b0547ba9c7fe6b31
[ "MIT" ]
null
null
null
model_training/Roberta.ipynb
anryabrahamyan/Machine_Learning_project
bb274064590a6d61f1b09d09b0547ba9c7fe6b31
[ "MIT" ]
null
null
null
model_training/Roberta.ipynb
anryabrahamyan/Machine_Learning_project
bb274064590a6d61f1b09d09b0547ba9c7fe6b31
[ "MIT" ]
null
null
null
73.146045
37,324
0.703549
[ [ [ "import numpy as np\nimport regex as re\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport statistics\nimport math\nimport os\nimport keras.backend as K\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nimport tokenizers\nfrom transformers import RobertaTokenizer, TFRobertaModel\nimport tensorflow as tf\nimport pandas as pd\nfrom sklearn.metrics import classification_report\nfrom transformers import RobertaTokenizerFast, TFRobertaForSequenceClassification, pipeline\nfrom collections import Counter\nimport tensorflow_addons as tfa\nfrom collections import Counter\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "# Detect hardware, return appropriate distribution strategy (you can see that it is pretty easy to set up).\ntry:\n # TPU detection. No parameters necessary if TPU_NAME environment variable is set (always set in Kaggle)\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n tf.config.experimental_connect_to_cluster(tpu)\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.experimental.TPUStrategy(tpu)\n print('Running on TPU ', tpu.master())\nexcept ValueError:\n # Default distribution strategy in Tensorflow. Works on CPU and single GPU.\n strategy = tf.distribute.get_strategy()\n\nprint('Number of replicas:', strategy.num_replicas_in_sync)", "Number of replicas: 1\n" ], [ "MODEL_NAME = 'roberta-base'\nMAX_LEN = 200\nARTIFACTS_PATH = '../artifacts/'\n\nBATCH_SIZE = 32 * strategy.num_replicas_in_sync\nEPOCHS = 2\n\nif not os.path.exists(ARTIFACTS_PATH):\n os.makedirs(ARTIFACTS_PATH)", "_____no_output_____" ], [ "df = pd.read_csv('../datasets/tweet_emotions.csv')\ndf.head()", "_____no_output_____" ], [ "X_data = df[['content']].to_numpy().reshape(-1)\ny_data = df[['sentiment']].to_numpy().reshape(-1)", "_____no_output_____" ], [ "categories = df[['sentiment']].values.reshape(-1)\n\ncounter_categories = Counter(categories)\ncategory_names = counter_categories.keys()\ncategory_values = counter_categories.values()\n\ny_pos = np.arange(len(category_names))\n\nplt.figure(1, figsize=(10, 5))\nplt.bar(y_pos, category_values, align='center', alpha=0.5)\nplt.xticks(y_pos, category_names)\nplt.ylabel('Number of texts')\nplt.title('Distribution of texts per category')\nplt.gca().yaxis.grid(True)\nplt.show()\n\nprint(counter_categories)\n", "_____no_output_____" ], [ "def calculate_stats(df, split_char=' '):\n categories = df['sentiment'].unique()\n \n all_lengths = []\n per_category = {\n 'lengths': {c:[] for c in categories},\n 'mean': {c:0 for c in categories},\n 'stdev': {c:0 for c in categories}\n }\n\n for index, row in df.iterrows():\n text = row['content']\n text = re.sub(r\"\\s+\", ' ', text) # Normalize\n text = text.split(split_char)\n l = len(text)\n \n category = row['sentiment']\n \n all_lengths.append(l)\n per_category['lengths'][category].append(l)\n \n for c in categories:\n per_category['mean'][c] = statistics.mean(per_category['lengths'][c])\n per_category['stdev'][c] = statistics.stdev(per_category['lengths'][c])\n \n global_stats = {\n 'mean': statistics.mean(all_lengths),\n 'stdev': statistics.stdev(all_lengths),\n 'lengths': all_lengths\n }\n \n return {\n 'global': global_stats,\n 'per_category': pd.DataFrame(per_category)\n }\n\n\ndef display_lengths_histograms(df_stats, n_cols=3):\n categories = df['sentiment'].unique()\n n_rows = math.ceil(len(categories) / n_cols)\n \n plt.figure(figsize=(15, 8))\n plt.suptitle('Distribution of lengths')\n \n # Subplot of all lengths\n plt.subplot(n_rows, n_cols, 1)\n plt.title('All categories')\n lengths = df_stats['global']['lengths']\n plt.hist(lengths, color='r')\n\n # Subplot of each category\n index_subplot = 2\n for c in categories:\n plt.subplot(n_rows, n_cols, index_subplot)\n plt.title('Category: %s' % c)\n \n lengths = df_stats['per_category']['lengths'][c]\n plt.hist(lengths, color='b')\n\n index_subplot += 1\n\n plt.show()", "_____no_output_____" ], [ "df_stats = calculate_stats(df)\ndf_stats['per_category']", "_____no_output_____" ], [ "display_lengths_histograms(df_stats)", "_____no_output_____" ], [ "n_texts = len(X_data)\nprint('Texts in dataset: %d' % n_texts)\n\ncategories = df['sentiment'].unique()\nn_categories = len(categories)\nprint('Number of categories: %d' % n_categories)\n\nprint('Done!')", "Texts in dataset: 40000\nNumber of categories: 13\nDone!\n" ], [ "def roberta_encode(texts, tokenizer):\n ct = len(texts)\n input_ids = np.ones((ct, MAX_LEN), dtype='int32')\n attention_mask = np.zeros((ct, MAX_LEN), dtype='int32')\n token_type_ids = np.zeros((ct, MAX_LEN), dtype='int32') # Not used in text classification\n\n for k, text in enumerate(texts):\n # Tokenize\n tok_text = tokenizer.tokenize(text)\n \n # Truncate and convert tokens to numerical IDs\n enc_text = tokenizer.convert_tokens_to_ids(tok_text[:(MAX_LEN-2)])\n \n input_length = len(enc_text) + 2\n input_length = input_length if input_length < MAX_LEN else MAX_LEN\n \n # Add tokens [CLS] and [SEP] at the beginning and the end\n input_ids[k,:input_length] = np.asarray([0] + enc_text + [2], dtype='int32')\n \n # Set to 1s in the attention input\n attention_mask[k,:input_length] = 1\n\n return {\n 'input_word_ids': input_ids,\n 'input_mask': attention_mask,\n 'input_type_ids': token_type_ids\n }", "_____no_output_____" ], [ "# Transform categories into numbers\ncategory_to_id = {}\ncategory_to_name = {}\n\nfor index, c in enumerate(y_data):\n if c in category_to_id:\n category_id = category_to_id[c]\n else:\n category_id = len(category_to_id)\n category_to_id[c] = category_id\n category_to_name[category_id] = c\n \n y_data[index] = category_id\n\n# Display dictionary\ncategory_to_name", "_____no_output_____" ], [ "datasets = pd.read_csv('../datasets/train_preprocessed.csv').dropna()\nX_train = datasets[\"content\"].astype(\"string\").to_numpy()\ny_train = datasets[\"sentiment\"].astype(\"category\").cat.codes.to_numpy()", "_____no_output_____" ], [ "datasets_t = pd.read_csv('../datasets/test_preprocessed.csv').dropna()\nX_test = datasets_t[\"content\"].astype(\"string\").to_numpy()\ny_test = datasets_t[\"sentiment\"].astype(\"category\").cat.codes.to_numpy()", "_____no_output_____" ], [ "tokenizer = RobertaTokenizer.from_pretrained(MODEL_NAME)", "_____no_output_____" ], [ "X_train = roberta_encode(X_train, tokenizer)\nX_test = roberta_encode(X_test, tokenizer)\n\ny_train = np.asarray(y_train, dtype='int32')\ny_test = np.asarray(y_test, dtype='int32')", "_____no_output_____" ], [ "def build_model(n_categories):\n with strategy.scope():\n input_word_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_word_ids')\n input_mask = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_mask')\n input_type_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_type_ids')\n\n # Import RoBERTa model from HuggingFace\n roberta_model = TFRobertaModel.from_pretrained(MODEL_NAME)\n x = roberta_model(input_word_ids, attention_mask=input_mask, token_type_ids=input_type_ids)\n\n # Huggingface transformers have multiple outputs, embeddings are the first one,\n # so let's slice out the first position\n x = x[0]\n\n x = tf.keras.layers.Dropout(0.1)(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(256, activation='relu')(x)\n x = tf.keras.layers.Dense(n_categories, activation='softmax')(x)\n\n model = tf.keras.Model(inputs=[input_word_ids, input_mask, input_type_ids], outputs=x)\n model.compile(\n optimizer=tf.keras.optimizers.Adam(lr=1e-4),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n", "_____no_output_____" ], [ "with strategy.scope():\n model = build_model(n_categories)\n model.summary()", "Some layers from the model checkpoint at roberta-base were not used when initializing TFRobertaModel: ['lm_head']\n- This IS expected if you are initializing TFRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing TFRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nAll the layers of TFRobertaModel were initialized from the model checkpoint at roberta-base.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use TFRobertaModel for predictions without further training.\n" ], [ "def macro_double_soft_f1(y, y_hat):\n \"\"\"Compute the macro soft F1-score as a cost (average 1 - soft-F1 across all labels).\n Use probability values instead of binary predictions.\n This version uses the computation of soft-F1 for both positive and negative class for each label.\n \n Args:\n y (int32 Tensor): targets array of shape (BATCH_SIZE, N_LABELS)\n y_hat (float32 Tensor): probability matrix from forward propagation of shape (BATCH_SIZE, N_LABELS)\n \n Returns:\n cost (scalar Tensor): value of the cost function for the batch\n \"\"\"\n y = tf.cast(y, tf.float32)\n y_hat = tf.cast(y_hat, tf.float32)\n tp = tf.reduce_sum(y_hat * y, axis=0)\n fp = tf.reduce_sum(y_hat * (1 - y), axis=0)\n fn = tf.reduce_sum((1 - y_hat) * y, axis=0)\n tn = tf.reduce_sum((1 - y_hat) * (1 - y), axis=0)\n soft_f1_class1 = 2*tp / (2*tp + fn + fp + 1e-16)\n soft_f1_class0 = 2*tn / (2*tn + fn + fp + 1e-16)\n cost_class1 = 1 - soft_f1_class1 # reduce 1 - soft-f1_class1 in order to increase soft-f1 on class 1\n cost_class0 = 1 - soft_f1_class0 # reduce 1 - soft-f1_class0 in order to increase soft-f1 on class 0\n cost = 0.5 * (cost_class1 + cost_class0) # take into account both class 1 and class 0\n macro_cost = tf.reduce_mean(cost) # average on all labels\n return macro_cost", "_____no_output_____" ], [ "callbacks = [\n tf.keras.callbacks.ModelCheckpoint(filepath=\"./checkpoints/usual/\",save_best_only=True,save_weights_only=False)\n]", "_____no_output_____" ], [ "BATCH_SIZE", "_____no_output_____" ], [ "with strategy.scope():\n print('Training...')\n history = model.fit(X_train,\n y_train,\n epochs=2,\n batch_size=BATCH_SIZE,\n verbose=1,\n callbacks = callbacks,\n validation_split=0.2,\n workers = -1)", "Training...\nEpoch 1/2\nWARNING:tensorflow:Gradients do not exist for variables ['tf_roberta_model/roberta/pooler/dense/kernel:0', 'tf_roberta_model/roberta/pooler/dense/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss`argument?\nWARNING:tensorflow:Gradients do not exist for variables ['tf_roberta_model/roberta/pooler/dense/kernel:0', 'tf_roberta_model/roberta/pooler/dense/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss`argument?\n669/669 [==============================] - ETA: 0s - loss: 2.1611 - accuracy: 0.2262" ], [ "model.save_weights('./weights/usual/saved_weights.h5')", "_____no_output_____" ], [ "counts = Counter(y_train)\nweights = {i:1/j for i,j in counts.items()}", "_____no_output_____" ], [ "with strategy.scope():\n print('Training...')\n history = model.fit(X_train,\n y_train,\n epochs=2,\n batch_size=BATCH_SIZE,\n verbose=1,\n callbacks = callbacks,\n validation_split=0.2,\n class_weight = weights)", "Training...\nEpoch 1/2\nWARNING:tensorflow:Gradients do not exist for variables ['tf_roberta_model/roberta/pooler/dense/kernel:0', 'tf_roberta_model/roberta/pooler/dense/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss`argument?\n" ], [ "model.save_weights('./weights/weighted/saved_weights.h5')", "_____no_output_____" ], [ "# -augment 1", "_____no_output_____" ], [ "with strategy.scope():\n print('Training...')\n history = model.fit(X_train,\n y_train,\n epochs=2,\n batch_size=BATCH_SIZE,\n verbose=1,\n callbacks = callbacks,\n validation_split=0.2)", "Training...\nEpoch 1/2\n669/669 [==============================] - 847s 1s/step - loss: 2.1632 - accuracy: 0.2177 - val_loss: 2.1482 - val_accuracy: 0.2258\nEpoch 2/2\n669/669 [==============================] - ETA: 0s - loss: 2.1620 - accuracy: 0.2168" ], [ "model.save_weights('./weights/prepr/saved_weights.h5')", "_____no_output_____" ], [ "def build_model(n_categories):\n with strategy.scope():\n input_word_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_word_ids')\n input_mask = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_mask')\n input_type_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_type_ids')\n\n # Import RoBERTa model from HuggingFace\n roberta_model = TFRobertaModel.from_pretrained(MODEL_NAME)\n x = roberta_model(input_word_ids, attention_mask=input_mask, token_type_ids=input_type_ids)\n\n # Huggingface transformers have multiple outputs, embeddings are the first one,\n # so let's slice out the first position\n x = x[0]\n\n x = tf.keras.layers.Dropout(0.1)(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(256, activation='relu')(x)\n x = tf.keras.layers.Dense(n_categories, activation='softmax')(x)\n\n model = tf.keras.Model(inputs=[input_word_ids, input_mask, input_type_ids], outputs=x)\n model.compile(\n optimizer=tf.keras.optimizers.Adam(lr=1e-5),\n loss=macro_double_soft_f1,\n metrics=['accuracy'])\n\n return model", "_____no_output_____" ], [ "with strategy.scope():\n model = build_model(n_categories)\n model.summary()", "Some layers from the model checkpoint at roberta-base were not used when initializing TFRobertaModel: ['lm_head']\n- This IS expected if you are initializing TFRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing TFRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nAll the layers of TFRobertaModel were initialized from the model checkpoint at roberta-base.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use TFRobertaModel for predictions without further training.\n" ], [ "with strategy.scope():\n print('Training...')\n history = model.fit(X_train,\n y_train,\n epochs=2,\n batch_size=BATCH_SIZE,\n verbose=1,\n callbacks = callbacks,\n validation_split=0.2)", "Training...\nEpoch 1/2\nWARNING:tensorflow:Gradients do not exist for variables ['tf_roberta_model_1/roberta/pooler/dense/kernel:0', 'tf_roberta_model_1/roberta/pooler/dense/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss`argument?\n" ], [ "model.save_weights('./weights/f1_loss/saved_weights.h5')", "_____no_output_____" ], [ "#usual\nmodel.load_weights('weights/usual/saved_weights.h5')\ny_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1)\nprint(classification_report(y_test, y_pred, zero_division = 0))", "412/412 [==============================] - 150s 363ms/step\n precision recall f1-score support\n\n 0 0.00 0.00 0.00 36\n 1 0.00 0.00 0.00 59\n 2 0.00 0.00 0.00 269\n 3 0.00 0.00 0.00 250\n 4 0.00 0.00 0.00 586\n 5 0.00 0.00 0.00 1719\n 6 0.00 0.00 0.00 437\n 7 0.00 0.00 0.00 1268\n 8 0.00 0.00 0.00 2831\n 9 0.00 0.00 0.00 504\n 10 0.00 0.00 0.00 1703\n 11 0.00 0.00 0.00 722\n 12 0.21 1.00 0.35 2790\n\n accuracy 0.21 13174\n macro avg 0.02 0.08 0.03 13174\nweighted avg 0.04 0.21 0.07 13174\n\n" ], [ "# weighted \nmodel.load_weights('weights/weighted/saved_weights.h5')\ny_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1)\nprint(classification_report(y_test, y_pred,zero_division = 0))", "412/412 [==============================] - 149s 362ms/step\n precision recall f1-score support\n\n 0 0.00 0.00 0.00 36\n 1 0.00 0.00 0.00 59\n 2 0.00 0.00 0.00 269\n 3 0.00 0.00 0.00 250\n 4 0.00 0.00 0.00 586\n 5 0.00 0.00 0.00 1719\n 6 0.00 0.00 0.00 437\n 7 0.00 0.00 0.00 1268\n 8 0.21 1.00 0.35 2831\n 9 0.00 0.00 0.00 504\n 10 0.00 0.00 0.00 1703\n 11 0.00 0.00 0.00 722\n 12 0.00 0.00 0.00 2790\n\n accuracy 0.21 13174\n macro avg 0.02 0.08 0.03 13174\nweighted avg 0.05 0.21 0.08 13174\n\n" ], [ "# f1 loss \nmodel.load_weights('weights/f1_loss/saved_weights.h5')\ny_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1)\nprint(classification_report(y_test, y_pred,zero_division = 0))", "412/412 [==============================] - 150s 364ms/step\n precision recall f1-score support\n\n 0 0.00 0.00 0.00 36\n 1 0.00 0.00 0.00 59\n 2 0.00 0.00 0.00 269\n 3 0.00 0.00 0.00 250\n 4 0.00 0.00 0.00 586\n 5 0.00 0.00 0.00 1719\n 6 0.00 0.00 0.00 437\n 7 0.00 0.00 0.00 1268\n 8 0.00 0.00 0.00 2831\n 9 0.00 0.00 0.00 504\n 10 0.00 0.00 0.00 1703\n 11 0.00 0.00 0.00 722\n 12 0.21 1.00 0.35 2790\n\n accuracy 0.21 13174\n macro avg 0.02 0.08 0.03 13174\nweighted avg 0.04 0.21 0.07 13174\n\n" ], [ "# augmented \nmodel.load_weights('weights/prepr/saved_weights.h5')\ny_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1)\nprint(classification_report(y_test, y_pred,zero_division = 0))", "412/412 [==============================] - 150s 364ms/step\n precision recall f1-score support\n\n 0 0.00 0.00 0.00 36\n 1 0.00 0.00 0.00 59\n 2 0.00 0.00 0.00 269\n 3 0.00 0.00 0.00 250\n 4 0.00 0.00 0.00 586\n 5 0.00 0.00 0.00 1719\n 6 0.00 0.00 0.00 437\n 7 0.00 0.00 0.00 1268\n 8 0.21 1.00 0.35 2831\n 9 0.00 0.00 0.00 504\n 10 0.00 0.00 0.00 1703\n 11 0.00 0.00 0.00 722\n 12 0.00 0.00 0.00 2790\n\n accuracy 0.21 13174\n macro avg 0.02 0.08 0.03 13174\nweighted avg 0.05 0.21 0.08 13174\n\n" ], [ "scores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1] * 100))", "Accuracy: 39.24%\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6eaf2e45a2cd98150dd9b3e3c0b0cf7fcdd449
16,793
ipynb
Jupyter Notebook
docs/algorithms/hubs.ipynb
a1ip/Network-Analysis-Made-Simple
7404c35cab8cdc9c119961ba33baef0398a20adc
[ "MIT" ]
1
2020-08-27T16:40:05.000Z
2020-08-27T16:40:05.000Z
docs/algorithms/hubs.ipynb
a1ip/Network-Analysis-Made-Simple
7404c35cab8cdc9c119961ba33baef0398a20adc
[ "MIT" ]
null
null
null
docs/algorithms/hubs.ipynb
a1ip/Network-Analysis-Made-Simple
7404c35cab8cdc9c119961ba33baef0398a20adc
[ "MIT" ]
null
null
null
29.154514
135
0.597332
[ [ [ "%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'", "_____no_output_____" ] ], [ [ "## Introduction\n\nBecause of the relational structure in a graph,\nwe can begin to think about \"importance\" of a node\nthat is induced because of its relationships\nto the rest of the nodes in the graph.\n\nBefore we go on, let's think about\na pertinent and contemporary example.\n\n### An example: contact tracing\n\nAt the time of writing (April 2020),\nfinding important nodes in a graph has actually taken on a measure of importance\nthat we might not have appreciated before.\nWith the COVID-19 virus spreading,\ncontact tracing has become quite important.\nIn an infectious disease contact network,\nwhere individuals are nodes and\ncontact between individuals of some kind are the edges,\nan \"important\" node in this contact network\nwould be an individual who was infected\nwho also was in contact with many people\nduring the time that they were infected.\n\n### Our dataset: \"Sociopatterns\"\n\nThe dataset that we will use in this chapter is the \"[sociopatterns network][sociopatterns]\" dataset.\nIncidentally, it's also about infectious diseases.\n\n[sociopatterns]: http://konect.uni-koblenz.de/networks/sociopatterns-infectious\n\nHere is the description of the dataset.\n\n> This network describes the face-to-face behavior of people\n> during the exhibition INFECTIOUS: STAY AWAY in 2009\n> at the Science Gallery in Dublin.\n> Nodes represent exhibition visitors;\n> edges represent face-to-face contacts that were active for at least 20 seconds.\n> Multiple edges between two nodes are possible and denote multiple contacts.\n> The network contains the data from the day with the most interactions.\n\nTo simplify the network, we have represented only the last contact between individuals.", "_____no_output_____" ] ], [ [ "from nams import load_data as cf\nG = cf.load_sociopatterns_network()", "_____no_output_____" ] ], [ [ "It is loaded as an undirected graph object:", "_____no_output_____" ] ], [ [ "type(G)", "_____no_output_____" ] ], [ [ "As usual, before proceeding with any analysis,\nwe should know basic graph statistics.", "_____no_output_____" ] ], [ [ "len(G.nodes()), len(G.edges())", "_____no_output_____" ] ], [ [ "## A Measure of Importance: \"Number of Neighbors\"\n\nOne measure of importance of a node is\nthe number of **neighbors** that the node has.\nWhat is a **neighbor**?\nWe will work with the following definition:\n\n> The neighbor of a node is connected to that node by an edge.\n\nLet's explore this concept, using the NetworkX API.\n\nEvery NetworkX graph provides a `G.neighbors(node)` class method,\nwhich lets us query a graph for the number of neighbors\nof a given node:", "_____no_output_____" ] ], [ [ "G.neighbors(7)", "_____no_output_____" ] ], [ [ "It returns a generator that doesn't immediately return\nthe exact neighbors list.\nThis means we cannot know its exact length,\nas it is a generator.\nIf you tried to do:\n\n```python\nlen(G.neighbors(7))\n```\n\nyou would get the following error:\n\n```python\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-13-72c56971d077> in <module>\n----> 1 len(G.neighbors(7))\n\nTypeError: object of type 'dict_keyiterator' has no len()\n```\n\nHence, we will need to cast it as a list in order to know\nboth its length\nand its members:", "_____no_output_____" ] ], [ [ "list(G.neighbors(7))", "_____no_output_____" ] ], [ [ "In the event that some nodes have an extensive list of neighbors,\nthen using the `dict_keyiterator` is potentially a good memory-saving technique,\nas it lazily yields the neighbors.", "_____no_output_____" ], [ "### Exercise: Rank-ordering the number of neighbors a node has\n\nSince we know how to get the list of nodes that are neighbors of a given node,\ntry this following exercise:\n\n> Can you create a ranked list of the importance of each individual, based on the number of neighbors they have?\n\nHere are a few hints to help:\n\n- You could consider using a `pandas Series`. This would be a modern and idiomatic way of approaching the problem.\n- You could also consider using Python's `sorted` function.", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import rank_ordered_neighbors\n\n#### REPLACE THE NEXT FEW LINES WITH YOUR ANSWER\n# answer = rank_ordered_neighbors(G)\n# answer", "_____no_output_____" ] ], [ [ "The original implementation looked like the following", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import rank_ordered_neighbors_original\n# rank_ordered_neighbors_original??", "_____no_output_____" ] ], [ [ "And another implementation that uses generators:", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import rank_ordered_neighbors_generator\n# rank_ordered_neighbors_generator??", "_____no_output_____" ] ], [ [ "## Generalizing \"neighbors\" to arbitrarily-sized graphs\n\nThe concept of neighbors is simple and appealing,\nbut it leaves us with a slight point of dissatisfaction:\nit is difficult to compare graphs of different sizes.\nIs a node more important solely because it has more neighbors?\nWhat if it were situated in an extremely large graph?\nWould we not expect it to have more neighbors?\n\nAs such, we need a normalization factor.\nOne reasonable one, in fact, is\n_the number of nodes that a given node could **possibly** be connected to._\nBy taking the ratio of the number of neighbors a node has\nto the number of neighbors it could possibly have,\nwe get the **degree centrality** metric.\n\nFormally defined, the degree centrality of a node (let's call it $d$)\nis the number of neighbors that a node has (let's call it $n$)\ndivided by the number of neighbors it could _possibly_ have (let's call it $N$):\n\n$$d = \\frac{n}{N}$$\n\nNetworkX provides a function for us to calculate degree centrality conveniently:", "_____no_output_____" ] ], [ [ "import networkx as nx\nimport pandas as pd\ndcs = pd.Series(nx.degree_centrality(G))\ndcs", "_____no_output_____" ] ], [ [ "`nx.degree_centrality(G)` returns to us a dictionary of key-value pairs,\nwhere the keys are node IDs\nand values are the degree centrality score.\nTo save on output length, I took the liberty of casting it as a pandas Series\nto make it easier to display.\n\nIncidentally, we can also sort the series\nto find the nodes with the highest degree centralities:", "_____no_output_____" ] ], [ [ "dcs.sort_values(ascending=False)", "_____no_output_____" ] ], [ [ "Does the list order look familiar?\nIt should, since the numerator of the degree centrality metric\nis identical to the number of neighbors,\nand the denominator is a constant.\n\n## Distribution of graph metrics\n\nOne important concept that you should come to know\nis that the distribution of node-centric values\ncan characterize classes of graphs.\n\nWhat do we mean by \"distribution of node-centric values\"?\nOne would be the degree distribution,\nthat is, the collection of node degree values in a graph.\n\nGenerally, you might be familiar with plotting a histogram\nto visualize distributions of values,\nbut in this book, we are going to avoid histograms like the plague.\nI detail a lot of reasons in a [blog post][ecdf] I wrote in 2018,\nbut the main points are that:\n\n1. It's easier to lie with histograms.\n1. You get informative statistical information (median, IQR, extremes/outliers)\nmore easily.\n\n[ecdf]: https://ericmjl.github.io/blog/2018/7/14/ecdfs/\n\n### Exercise: Degree distribution\n\nIn this next exercise, we are going to get practice visualizing these values\nusing empirical cumulative distribution function plots.\n\nI have written for you an ECDF function that you can use already.\nIts API looks like the following:\n\n```python\nx, y = ecdf(list_of_values)\n```\n\ngiving you `x` and `y` values that you can directly plot.\n\nThe exercise prompt is this:\n\n> Plot the ECDF of the degree centrality and degree distributions.\n\nFirst do it for **degree centrality**:", "_____no_output_____" ] ], [ [ "from nams.functions import ecdf\nfrom nams.solutions.hubs import ecdf_degree_centrality\n\n#### REPLACE THE FUNCTION CALL WITH YOUR ANSWER\necdf_degree_centrality(G)", "_____no_output_____" ] ], [ [ "Now do it for **degree**:", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import ecdf_degree\n\n#### REPLACE THE FUNCTION CALL WITH YOUR ANSWER\necdf_degree(G)", "_____no_output_____" ] ], [ [ "The fact that they are identically-shaped\nshould not surprise you!", "_____no_output_____" ], [ "### Exercise: What about that denominator?\n\nThe denominator $N$ in the degree centrality definition\nis \"the number of nodes that a node could _possibly_ be connected to\".\nCan you think of two ways $N$ be defined?", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import num_possible_neighbors\n\n#### UNCOMMENT TO SEE MY ANSWER\n# print(num_possible_neighbors())", "_____no_output_____" ] ], [ [ "### Exercise: Circos Plotting\n\nLet's get some practice with the `nxviz` API.\n\n> Visualize the graph `G`, while ordering and colouring them by the 'order' node attribute.", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import circos_plot\n\n#### REPLACE THE NEXT LINE WITH YOUR ANSWER\ncircos_plot(G)", "_____no_output_____" ] ], [ [ "### Exercise: Visual insights\n\nSince we know that node colour and order\nare by the \"order\" in which the person entered into the exhibit,\nwhat does this visualization tell you?", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import visual_insights\n\n#### UNCOMMENT THE NEXT LINE TO SEE MY ANSWER\n# print(visual_insights())", "_____no_output_____" ] ], [ [ "### Exercise: Investigating degree centrality and node order\n\nOne of the insights that we might have gleaned from visualizing the graph\nis that the nodes that have a high degree centrality\nmight also be responsible for the edges that criss-cross the Circos plot.\nTo test this, plot the following:\n\n- x-axis: node degree centrality\n- y-axis: maximum difference between the neighbors' `order`s (a node attribute) and the node's `order`.", "_____no_output_____" ] ], [ [ "from nams.solutions.hubs import dc_node_order\n\ndc_node_order(G)", "_____no_output_____" ] ], [ [ "The somewhat positive correlation between the degree centrality might tell us that this trend holds true.\nA further applied question would be to ask what behaviour of these nodes would give rise to this pattern.\nAre these nodes actually exhibit staff?\nOr is there some other reason why they are staying so long?\nThis, of course, would require joining in further information\nthat we would overlay on top of the graph\n(by adding them as node or edge attributes)\nbefore we might make further statements.", "_____no_output_____" ], [ "## Reflections\n\nIn this chapter, we defined a metric of node importance: the degree centrality metric.\nIn the example we looked at, it could help us identify\npotential infectious agent superspreaders in a disease contact network.\nIn other settings, it might help us spot:\n\n- message amplifiers/influencers in a social network, and \n- potentially crowded airports that have lots of connections into and out of it (still relevant to infectious disease spread!)\n- and many more!\n\nWhat other settings can you think of in which the number of neighbors that a node has can become\na metric of importance for the node?", "_____no_output_____" ], [ "## Solutions\n\nHere are the solutions to the exercises above.", "_____no_output_____" ] ], [ [ "from nams.solutions import hubs\nimport inspect\n\nprint(inspect.getsource(hubs))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
4a6eb036b6ce8d9cfb00a1030547f2a978bf5c7d
24,473
ipynb
Jupyter Notebook
tutorials/tutorial08_environments.ipynb
2855239858/CenLight-Traffic-Grid-Signal-Optimization-viaAction-and-State-Decomposition
c9d51f583c1d90daf9c8afa1796e65d9f7ba6736
[ "MIT" ]
1
2021-05-16T12:15:38.000Z
2021-05-16T12:15:38.000Z
tutorials/tutorial08_environments.ipynb
2855239858/CenLight-Traffic-Grid-Signal-Optimization-viaAction-and-State-Decomposition
c9d51f583c1d90daf9c8afa1796e65d9f7ba6736
[ "MIT" ]
null
null
null
tutorials/tutorial08_environments.ipynb
2855239858/CenLight-Traffic-Grid-Signal-Optimization-viaAction-and-State-Decomposition
c9d51f583c1d90daf9c8afa1796e65d9f7ba6736
[ "MIT" ]
null
null
null
42.784965
608
0.632003
[ [ [ "# Tutorial 08: Creating Custom Environments 创建自定义环境\n\nThis tutorial walks you through the process of creating custom environments in Flow. Custom environments contain specific methods that define the problem space of a task, such as the state and action spaces of the RL agent and the signal (or reward) that the RL algorithm will optimize over. By specifying a few methods within a custom environment, individuals can use Flow to design traffic control tasks of various types, such as optimal traffic light signal timing and flow regulation via mixed autonomy traffic (see the figures below). Finally, these environments are compatible with OpenAI Gym.\n\n本教程将带您完成在Flow中创建自定义环境的过程。自定义环境包含定义任务的问题空间的特定方法,例如RL代理的状态和操作空间,以及RL算法将优化的信号(或奖励)。通过在自定义环境中指定一些方法,个人可以使用流来设计各种类型的交通控制任务,例如最优的交通灯信号定时和混合自主交通的流量调节(见下图)。最后,这些环境与OpenAI健身房是兼容的。\n\nThe rest of the tutorial is organized as follows: in section 1 walks through the process of creating an environment for mixed autonomy vehicle control where the autonomous vehicles perceive all vehicles in the network, and section two implements the environment in simulation.\n\n本教程的其余部分组织如下:第1节介绍了创建混合自主车辆控制环境的过程,其中自主车辆感知网络中的所有车辆,第2节在仿真中实现了该环境。\n\n<img src=\"img/sample_envs.png\">\n\n\n## 1. Creating an Environment Class 创建一个环境类\n\nIn this tutorial we will create an environment in which the accelerations of a handful of vehicles in the network are specified by a single centralized agent, with the objective of the agent being to improve the average speed of all vehicle in the network. In order to create this environment, we begin by inheriting the base environment class located in *flow.envs*:\n在本教程中,我们将创建一个环境,其中网络中少数车辆的加速由一个集中的代理指定,代理的目标是提高网络中所有车辆的平均速度。为了创建这样的环境,我们从继承位于*flow.envs*中的基本环境类开始:", "_____no_output_____" ] ], [ [ "# import the base environment class\nfrom flow.envs import Env\n\n# define the environment class, and inherit properties from the base environment class\nclass myEnv(Env):\n pass", "_____no_output_____" ] ], [ [ "`Env` provides the interface for running and modifying a SUMO simulation. Using this class, we are able to start sumo, provide a network to specify a configuration and controllers, perform simulation steps, and reset the simulation to an initial configuration.\n“Env”提供了运行和修改sumo模拟的接口。使用这个类,我们可以启动sumo,提供指定配置和控制器的网络,执行模拟步骤,并将模拟重置为初始配置。\n\nBy inheriting Flow's base environment, a custom environment for varying control tasks can be created by adding the following functions to the child class: \n\n通过继承Flow的基环境,可以通过在子类中添加以下函数来创建用于变化控制任务的自定义环境:\n\n* **action_space**动作空间\n* **observation_space**观测空间\n* **apply_rl_actions**RL应用空间\n* **get_state**获取状态\n* **compute_reward**计算奖励值\n\nEach of these components are covered in the next few subsections.\n\n### 1.1 ADDITIONAL_ENV_PARAMS\n\nThe features used to parametrize components of the state/action space as well as the reward function are specified within the `EnvParams` input, as discussed in tutorial 1. Specifically, for the sake of our environment, the `additional_params` attribute within `EnvParams` will be responsible for storing information on the maximum possible accelerations and decelerations by the autonomous vehicles in the network. Accordingly, for this problem, we define an `ADDITIONAL_ENV_PARAMS` variable of the form:\n用于参数化状态/动作空间组件的特性以及奖励功能在“EnvParams”输入中指定,如教程1中所述。具体来说,为了保护我们的环境,‘EnvParams’中的‘additional_params’属性将负责存储网络中自动驾驶车辆最大可能的加速和减速信息。因此,对于这个问题,我们定义了表单的‘ADDITIONAL_ENV_PARAMS’变量:", "_____no_output_____" ] ], [ [ "ADDITIONAL_ENV_PARAMS = {\n \"max_accel\": 1,\n \"max_decel\": 1,\n}", "_____no_output_____" ] ], [ [ "All environments presented in Flow provide a unique `ADDITIONAL_ENV_PARAMS` component containing the information needed to properly define some environment-specific parameters. We assume that these values are always provided by the user, and accordingly can be called from `env_params`. For example, if we would like to call the \"max_accel\" parameter, we simply type:\nFlow中提供的所有环境都提供了一个惟一的‘ADDITIONAL_ENV_PARAMS’组件,其中包含正确定义某些特定于环境的参数所需的信息。我们假设这些值总是由用户提供的,因此可以从' env_params '中调用。例如,如果我们想调用“max_accel”参数,我们只需输入:\n\n max_accel = env_params.additional_params[\"max_accel\"]\n\n### 1.2 action_space 动作空间\n\nThe `action_space` method defines the number and bounds of the actions provided by the RL agent. In order to define these bounds with an OpenAI gym setting, we use several objects located within *gym.spaces*. For instance, the `Box` object is used to define a bounded array of values in $\\mathbb{R}^n$.\n“action_space”方法定义了RL代理提供的操作的数量和界限。为了定义OpenAI健身房设置的这些边界,我们使用了位于*gym.spaces*内的几个对象。例如,“Box”对象用于定义$\\mathbb{R}^n$中的有界值数组。", "_____no_output_____" ] ], [ [ "from gym.spaces.box import Box", "_____no_output_____" ] ], [ [ "In addition, `Tuple` objects (not used by this tutorial) allow users to combine multiple `Box` elements together.\n此外,“Tuple”对象(本教程中没有使用)允许用户将多个“Box”元素组合在一起。", "_____no_output_____" ] ], [ [ "from gym.spaces import Tuple", "_____no_output_____" ] ], [ [ "Once we have imported the above objects, we are ready to define the bounds of our action space. Given that our actions consist of a list of n real numbers (where n is the number of autonomous vehicles) bounded from above and below by \"max_accel\" and \"max_decel\" respectively (see section 1.1), we can define our action space as follows:\n\n一旦导入了上述对象,就可以定义操作空间的边界了。假设我们的动作是由n个实数组成的列表(其中n是自动驾驶车辆的数量),从上到下分别由“max_accel”和“max_decel”约束(参见1.1节),我们可以这样定义我们的动作空间:", "_____no_output_____" ] ], [ [ "class myEnv(myEnv):\n\n @property\n def action_space(self):\n num_actions = self.initial_vehicles.num_rl_vehicles\n accel_ub = self.env_params.additional_params[\"max_accel\"]\n accel_lb = - abs(self.env_params.additional_params[\"max_decel\"])\n\n return Box(low=accel_lb,\n high=accel_ub,\n shape=(num_actions,))", "_____no_output_____" ] ], [ [ "### 1.3 observation_space 观察空间\nThe observation space of an environment represents the number and types of observations that are provided to the reinforcement learning agent. For this example, we will be observe two values for each vehicle: its position and speed. Accordingly, we need a observation space that is twice the size of the number of vehicles in the network.\n环境的观察空间表示提供给强化学习代理的观察的数量和类型。对于本例,我们将观察每个车辆的两个值:位置和速度。因此,我们需要的观测空间是网络中车辆数量的两倍。", "_____no_output_____" ] ], [ [ "class myEnv(myEnv): # update my environment class\n\n @property\n def observation_space(self):\n return Box(\n low=0,\n high=float(\"inf\"),\n shape=(2*self.initial_vehicles.num_vehicles,),\n )", "_____no_output_____" ] ], [ [ "### 1.4 apply_rl_actions 应用Rl动作\nThe function `apply_rl_actions` is responsible for transforming commands specified by the RL agent into actual actions performed within the simulator. The vehicle kernel within the environment class contains several helper methods that may be of used to facilitate this process. These functions include:\n\n函数' apply_rl_actions '负责将RL代理指定的命令转换为在模拟器中执行的实际操作。environment类中的vehicle内核包含几个辅助方法,可以用来促进这个过程。这些功能包括:\n\n* **apply_acceleration** (list of str, list of float) -> None: converts an action, or a list of actions, into accelerations to the specified vehicles (in simulation)\n* **apply_lane_change** (list of str, list of {-1, 0, 1}) -> None: converts an action, or a list of actions, into lane change directions for the specified vehicles (in simulation)\n* **choose_route** (list of str, list of list of str) -> None: converts an action, or a list of actions, into rerouting commands for the specified vehicles (in simulation)\n\nFor our example we consider a situation where the RL agent can only specify accelerations for the RL vehicles; accordingly, the actuation method for the RL agent is defined as follows:\n\n在我们的例子中,我们考虑这样一种情况:RL代理只能为RL车辆指定加速;因此,RL agent的驱动方法定义如下:", "_____no_output_____" ] ], [ [ "class myEnv(myEnv): # update my environment class\n\n def _apply_rl_actions(self, rl_actions):\n # the names of all autonomous (RL) vehicles in the network\n rl_ids = self.k.vehicle.get_rl_ids()\n\n # use the base environment method to convert actions into accelerations for the rl vehicles\n self.k.vehicle.apply_acceleration(rl_ids, rl_actions)", "_____no_output_____" ] ], [ [ "### 1.5 get_state 获取状态\n\nThe `get_state` method extracts features from within the environments and provides then as inputs to the policy provided by the RL agent. Several helper methods exist within flow to help facilitate this process. Some useful helper method can be accessed from the following objects:\n\n“get_state”方法从环境中提取特性,然后作为RL代理提供的策略的输入。flow中存在几个帮助方法来帮助简化这个过程。一些有用的帮助方法可以从以下对象访问:\n\n* **self.k.vehicle**: provides current state information for all vehicles within the network为网络中的所有车辆提供当前状态信息\n* **self.k.traffic_light**: provides state information on the traffic lights提供交通信号灯的状态信息\n* **self.k.network**: information on the network, which unlike the vehicles and traffic lights is static网络上的信息,这与车辆和红绿灯是静态的\n* More accessor objects and methods can be found within the Flow documentation at: http://berkeleyflow.readthedocs.io/en/latest/\n\nIn order to model global observability within the network, our state space consists of the speeds and positions of all vehicles (as mentioned in section 1.3). This is implemented as follows:\n为了在网络中建立全局可观测性模型,我们的状态空间由所有车辆的速度和位置组成(如第1.3节所述)。实施办法如下:", "_____no_output_____" ] ], [ [ "import numpy as np\n\nclass myEnv(myEnv): # update my environment class\n\n def get_state(self, **kwargs):\n # the get_ids() method is used to get the names of all vehicles in the network\n ids = self.k.vehicle.get_ids()\n\n # we use the get_absolute_position method to get the positions of all vehicles\n pos = [self.k.vehicle.get_x_by_id(veh_id) for veh_id in ids]\n\n # we use the get_speed method to get the velocities of all vehicles\n vel = [self.k.vehicle.get_speed(veh_id) for veh_id in ids]\n\n # the speeds and positions are concatenated to produce the state\n return np.concatenate((pos, vel))", "_____no_output_____" ] ], [ [ "### 1.6 compute_reward 计算奖励值\n\nThe `compute_reward` method returns the reward associated with any given state. These value may encompass returns from values within the state space (defined in section 1.5) or may contain information provided by the environment but not immediately available within the state, as is the case in partially observable tasks (or POMDPs).\n\n' compute_reward '方法返回与任何给定状态相关联的奖励。这些值可能包含状态空间(在第1.5节中定义)中的值的返回,或者可能包含环境提供的信息,但是不能立即在状态中使用,就像部分可观察任务(或POMDPs)中的情况一样。\n\nFor this tutorial, we choose the reward function to be the average speed of all vehicles currently in the network. In order to extract this information from the environment, we use the `get_speed` method within the Vehicle kernel class to collect the current speed of all vehicles in the network, and return the average of these speeds as the reward. This is done as follows:\n在本教程中,我们选择奖励函数作为当前网络中所有车辆的平均速度。为了从环境中提取这些信息,我们在车辆内核类中使用' get_speed '方法来收集网络中所有车辆的当前速度,并返回这些速度的平均值作为奖励。具体做法如下:", "_____no_output_____" ] ], [ [ "import numpy as np\n\nclass myEnv(myEnv): # update my environment class\n\n def compute_reward(self, rl_actions, **kwargs):\n # the get_ids() method is used to get the names of all vehicles in the network\n ids = self.k.vehicle.get_ids()\n\n # we next get a list of the speeds of all vehicles in the network\n speeds = self.k.vehicle.get_speed(ids)\n\n # finally, we return the average of all these speeds as the reward\n return np.mean(speeds)", "_____no_output_____" ] ], [ [ "## 2. Testing the New Environment 测试新环境\n\n\n### 2.1 Testing in Simulation\nNow that we have successfully created our new environment, we are ready to test this environment in simulation. We begin by running this environment in a non-RL based simulation. The return provided at the end of the simulation is indicative of the cumulative expected reward when jam-like behavior exists within the netowrk. \n\n现在我们已经成功地创建了新的环境,我们准备在模拟中测试这个环境。我们首先在一个非基于rl的模拟中运行这个环境。在模拟结束时提供的回报指示了在netowrk中存在类似于jam的行为时累积的预期回报。", "_____no_output_____" ] ], [ [ "from flow.controllers import IDMController, ContinuousRouter\nfrom flow.core.experiment import Experiment\nfrom flow.core.params import SumoParams, EnvParams, \\\n InitialConfig, NetParams\nfrom flow.core.params import VehicleParams\nfrom flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS\n\nsim_params = SumoParams(sim_step=0.1, render=True)\n\nvehicles = VehicleParams()\nvehicles.add(veh_id=\"idm\",\n acceleration_controller=(IDMController, {}),\n routing_controller=(ContinuousRouter, {}),\n num_vehicles=22)\n\nenv_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)\n\nadditional_net_params = ADDITIONAL_NET_PARAMS.copy()\nnet_params = NetParams(additional_params=additional_net_params)\n\ninitial_config = InitialConfig(bunching=20)\n\nflow_params = dict(\n exp_tag='ring',\n env_name=myEnv, # using my new environment for the simulation\n network=RingNetwork,\n simulator='traci',\n sim=sim_params,\n env=env_params,\n net=net_params,\n veh=vehicles,\n initial=initial_config,\n)\n\n# number of time steps\nflow_params['env'].horizon = 1500\nexp = Experiment(flow_params)\n\n# run the sumo simulation\n_ = exp.run(1)", "_____no_output_____" ] ], [ [ "### 2.2 Training the New Environment 培训新环境\n\nNext, we wish to train this environment in the presence of the autonomous vehicle agent to reduce the formation of waves in the network, thereby pushing the performance of vehicles in the network past the above expected return.\n\n接下来,我们希望在自主车辆代理存在的情况下训练这种环境,以减少网络中波浪的形成,从而使网络中车辆的性能超过上述预期收益。\n\nThe below code block may be used to train the above environment using the Proximal Policy Optimization (PPO) algorithm provided by RLlib. In order to register the environment with OpenAI gym, the environment must first be placed in a separate \".py\" file and then imported via the script below. Then, the script immediately below should function regularly.\n\n下面的代码块可以使用RLlib提供的Proximal Policy Optimization (PPO)算法来训练上述环境。为了注册OpenAI健身房的环境,环境必须首先放在一个单独的。py”。然后通过下面的脚本导入。然后,下面的脚本应该正常工作。", "_____no_output_____" ] ], [ [ "#############################################################\n####### Replace this with the environment you created #######\n#############################################################\nfrom flow.envs import AccelEnv as myEnv", "_____no_output_____" ] ], [ [ "**Note**: We do not recommend training this environment to completion within a jupyter notebook setting; however, once training is complete, visualization of the resulting policy should show that the autonomous vehicle learns to dissipate the formation and propagation of waves in the network.\n\n**注**:我们不建议在这种环境下进行的培训是在木星笔记本设置中完成的;然而,一旦训练完成,结果策略的可视化应该表明,自主车辆学会了在网络中消散波的形成和传播。", "_____no_output_____" ] ], [ [ "import json\nimport ray\nfrom ray.rllib.agents.registry import get_agent_class\nfrom ray.tune import run_experiments\nfrom ray.tune.registry import register_env\n\nfrom flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS\nfrom flow.utils.registry import make_create_env\nfrom flow.utils.rllib import FlowParamsEncoder\nfrom flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\nfrom flow.core.params import VehicleParams, SumoCarFollowingParams\nfrom flow.controllers import RLController, IDMController, ContinuousRouter\n\n\n# time horizon of a single rollout\nHORIZON = 1500\n# number of rollouts per training iteration\nN_ROLLOUTS = 20\n# number of parallel workers\nN_CPUS = 2\n\n\n# We place one autonomous vehicle and 22 human-driven vehicles in the network\nvehicles = VehicleParams()\nvehicles.add(\n veh_id=\"human\",\n acceleration_controller=(IDMController, {\n \"noise\": 0.2\n }),\n car_following_params=SumoCarFollowingParams(\n min_gap=0\n ),\n routing_controller=(ContinuousRouter, {}),\n num_vehicles=21)\nvehicles.add(\n veh_id=\"rl\",\n acceleration_controller=(RLController, {}),\n routing_controller=(ContinuousRouter, {}),\n num_vehicles=1)\n\nflow_params = dict(\n # name of the experiment\n exp_tag=\"stabilizing_the_ring\",\n\n # name of the flow environment the experiment is running on\n env_name=myEnv, # <------ here we replace the environment with our new environment\n\n # name of the network class the experiment is running on\n network=RingNetwork,\n\n # simulator that is used by the experiment\n simulator='traci',\n\n # sumo-related parameters (see flow.core.params.SumoParams)\n sim=SumoParams(\n sim_step=0.1,\n render=True,\n ),\n\n # environment related parameters (see flow.core.params.EnvParams)\n env=EnvParams(\n horizon=HORIZON,\n warmup_steps=750,\n clip_actions=False,\n additional_params={\n \"target_velocity\": 20,\n \"sort_vehicles\": False,\n \"max_accel\": 1,\n \"max_decel\": 1,\n },\n ),\n\n # network-related parameters (see flow.core.params.NetParams and the\n # network's documentation or ADDITIONAL_NET_PARAMS component)\n net=NetParams(\n additional_params=ADDITIONAL_NET_PARAMS.copy()\n ),\n\n # vehicles to be placed in the network at the start of a rollout (see\n # flow.core.params.VehicleParams)\n veh=vehicles,\n\n # parameters specifying the positioning of vehicles upon initialization/\n # reset (see flow.core.params.InitialConfig)\n initial=InitialConfig(\n bunching=20,\n ),\n)\n\n\ndef setup_exps():\n \"\"\"Return the relevant components of an RLlib experiment.\n\n Returns\n -------\n str\n name of the training algorithm\n str\n name of the gym environment to be trained\n dict\n training configuration parameters\n \"\"\"\n alg_run = \"PPO\"\n\n agent_cls = get_agent_class(alg_run)\n config = agent_cls._default_config.copy()\n config[\"num_workers\"] = N_CPUS\n config[\"train_batch_size\"] = HORIZON * N_ROLLOUTS\n config[\"gamma\"] = 0.999 # discount rate\n config[\"model\"].update({\"fcnet_hiddens\": [3, 3]})\n config[\"use_gae\"] = True\n config[\"lambda\"] = 0.97\n config[\"kl_target\"] = 0.02\n config[\"num_sgd_iter\"] = 10\n config['clip_actions'] = False # FIXME(ev) temporary ray bug\n config[\"horizon\"] = HORIZON\n\n # save the flow params for replay\n flow_json = json.dumps(\n flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4)\n config['env_config']['flow_params'] = flow_json\n config['env_config']['run'] = alg_run\n\n create_env, gym_name = make_create_env(params=flow_params, version=0)\n\n # Register as rllib env\n register_env(gym_name, create_env)\n return alg_run, gym_name, config\n\n\nalg_run, gym_name, config = setup_exps()\nray.init(num_cpus=N_CPUS + 1)\ntrials = run_experiments({\n flow_params[\"exp_tag\"]: {\n \"run\": alg_run,\n \"env\": gym_name,\n \"config\": {\n **config\n },\n \"checkpoint_freq\": 20,\n \"checkpoint_at_end\": True,\n \"max_failures\": 999,\n \"stop\": {\n \"training_iteration\": 200,\n },\n }\n})", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6eb9717126ea3823a41af7f0d430193a8f809a
15,016
ipynb
Jupyter Notebook
Code/.ipynb_checkpoints/copy-study-subjects-checkpoint.ipynb
Aglinskas/BC-ORG-Data
3ea6419d7f71b1ea0d5882f0c0aa6590f11772ba
[ "MIT" ]
null
null
null
Code/.ipynb_checkpoints/copy-study-subjects-checkpoint.ipynb
Aglinskas/BC-ORG-Data
3ea6419d7f71b1ea0d5882f0c0aa6590f11772ba
[ "MIT" ]
null
null
null
Code/.ipynb_checkpoints/copy-study-subjects-checkpoint.ipynb
Aglinskas/BC-ORG-Data
3ea6419d7f71b1ea0d5882f0c0aa6590f11772ba
[ "MIT" ]
null
null
null
30.833676
116
0.433804
[ [ [ "pwd", "_____no_output_____" ], [ "import pandas as pd\nimport os", "_____no_output_____" ], [ "ls", "copy-study-subjects.ipynb ndar_fmri_to_subfolders.ipynb.save\nempty.md tut-neuroimaging-basic-IO.ipynb\nndar_fmri_to_subfolders.ipynb xx-bids-validator.ipynb\n" ], [ "df = pd.read_csv('../Data/general_csv.csv') #grab master list\ndf.head(5) #show first 5", "_____no_output_____" ], [ "df_trim = df.copy()\ndf_trim = df_trim.iloc[df_trim['collection_title'].values=='Multimodal Treatment Study of Children With ADHD']", "_____no_output_____" ], [ "df_trim['local_paths'] ## the files that we'll need", "_____no_output_____" ], [ "# copy those files into ds-2155 folder\nimport shutil\nsrc = '/mmfs1/data/pijarj/ndar_fmri/image03/TNAD-x-212988-x-212988-x-901-d0203.nii.gz'\ndst = '/mmfs1/data/pijarj/ds-2155/image0.nii.gz'\nshutil.copyfile(src=src,dst=dst)", "_____no_output_____" ], [ "import shutil\nsrc = '/mmfs1/data/pijarj/ndar_fmri/image03/TNAD-x-214064-x-214064-x-501-d0203.nii.gz'\ndst = '/mmfs1/data/pijarj/ds-2155/image1.nii.gz'\nshutil.copyfile(src=src,dst=dst)", "_____no_output_____" ], [ "import shutil\nsrc = '/mmfs1/data/pijarj/ndar_fmri/image03/TNAD-x-212797-x-212797-x-501-d0203.nii.gz'\ndst = '/mmfs1/data/pijarj/ds-2155/image2.nii.gz'\nshutil.copyfile(src=src,dst=dst)", "_____no_output_____" ], [ "import shutil\nsrc = '/mmfs1/data/pijarj/ndar_fmri/image03/TNAD-x-215614-x-215614-x-501-d0203.nii.gz'\ndst = '/mmfs1/data/pijarj/ds-2155/image3.nii.gz'\nshutil.copyfile(src=src,dst=dst)", "_____no_output_____" ], [ "import shutil\nsrc = '/mmfs1/data/pijarj/ndar_fmri/image03/TNAD-x-212467-x-212467-x-501-d0203.nii.gz'\ndst = '/mmfs1/data/pijarj/ds-2155/image4.nii.gz'\nshutil.copyfile(src=src,dst=dst)", "_____no_output_____" ], [ "# Make sure https://bids.neuroimaging.io/", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6eca2b5f3745e18e3a2daf9bd98cb27ae92216
218,342
ipynb
Jupyter Notebook
Twitter_Emotions.ipynb
l33pif/Sentiment_Analysis_Twitter
8622813c04ea385be675d88d60dede1852cf42f8
[ "MIT" ]
null
null
null
Twitter_Emotions.ipynb
l33pif/Sentiment_Analysis_Twitter
8622813c04ea385be675d88d60dede1852cf42f8
[ "MIT" ]
null
null
null
Twitter_Emotions.ipynb
l33pif/Sentiment_Analysis_Twitter
8622813c04ea385be675d88d60dede1852cf42f8
[ "MIT" ]
null
null
null
199.399087
133,138
0.862358
[ [ [ "# Description : This is a emotion analysis program that parses the tweets fetched from Twitter using Python", "_____no_output_____" ] ], [ [ "# import libraries\n\nimport tweepy\nfrom textblob import TextBlob\nfrom wordcloud import WordCloud\nimport pandas as pd\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')", "_____no_output_____" ], [ "# Twitter API credentials\nconsumer_key = 'ycEZMmO2frdqYsFQfYSZlEZzy'\nconsumer_secret = 'r9aJEgLmvmPte9HslJoM37RyP5Gay1ZJ3NfSam67wMTPFmq7IY'\naccess_token = '1197336121107050496-7OTvNKMk5Z1v4nwgjJCpzDaJE5gIdc'\naccess_t_secret = 'Wc9IwyuX48jsKLnF2lsn679sZ03g6yXuy2KZthziMLbgP'", "_____no_output_____" ], [ "#create the authentication object\nauthenticate = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\n#Set the access tokens\nauthenticate.set_access_token(access_token, access_t_secret)\n\n#Create API object while passing in the auth info\napi = tweepy.API(authenticate, wait_on_rate_limit = True)", "_____no_output_____" ], [ "#Extract 100 tweets from a hashtag\nhashtag = api.search(q='#messi', lang='en', result_type='recent', count=100)\n\n#print the last 5 tweets\nprint('show 5 tweets')\ni = 1\nfor tweet in hashtag[:5]:\n print(str(i) + ') ' + tweet.text + '\\n')\n i += 1", "show 5 tweets\n1) 🚨New burofax incoming 💣💣 #messi\n\n2) @FCBarcelona sacked their law firm for advising Messi on how to terminate his contract with the club. Lol. \n#Messi #FCBarcelona #football\n\n3) RT @1xbet___Odds: ☑️ #1xbet\n\nRegistration Code 👉5050\n\n*To get 200% Bonus\n\n📎https://t.co/JRuMooC9sr\n\n#MatchDay #football #sports #betting #L…\n\n4) RT @Barca_Buzz: 🗣: \"Adjectives have been exhausted trying to describe Messi but there are plenty to describe this really sad, slow, painful…\n\n5) RT @cavcav_11: Every Picture Has A Story...\n#Messi \n🔟🐐 https://t.co/4aDxJNrom2\n\n" ], [ "#Create a dataframe\ndf = pd.DataFrame([tweet.text for tweet in hashtag], columns=['Tweets'])\n\n#show the first 5 rows of data\ndf.head()", "_____no_output_____" ], [ "#Clean text\n\n#create a function to clean the text\n\ndef clean_text(text):\n text = re.sub(r'@[A-za-z0-9]+', '', text) # removing @metions\n text = re.sub(r'#', '', text) # removing the '#' symbol\n text = re.sub(r'RT[\\s]+', '', text) #removing RT's\n text = re.sub(r'https?:\\/\\/?', '', text) #removing links\n\n return text\n\ndf['Tweets'] = df['Tweets'].apply(clean_text)\n\n#show the clean text\ndf.head()\n", "_____no_output_____" ], [ "# Create a function to get the subjectivity\ndef get_subjectivity(text):\n return TextBlob(text).sentiment.subjectivity\n\n# Create a function to get the polarity\ndef get_polarity(text):\n return TextBlob(text).sentiment.polarity\n\n# Create the new colums\ndf['Subjectivity'] = df['Tweets'].apply(get_subjectivity)\ndf['Polarity'] = df['Tweets'].apply(get_polarity)\n\n# Show the new DataFrame\ndf\n", "_____no_output_____" ], [ "# Plot the Word Cloid\nall_words = ' '.join( [twts for twts in df['Tweets']] )\nword_cloud = WordCloud(width=500, height=300, random_state= 21, max_font_size=119).generate(all_words)\n\nplt.imshow(word_cloud, interpolation= 'bilinear')\nplt.axis('off')\nplt.show()", "_____no_output_____" ], [ "# Create a function to compute the negative, neutral and positive analysis\n\ndef analysis(score):\n if score < 0:\n return 'Negative'\n elif score == 0:\n return 'Neutral'\n else:\n return 'Positive'\n\n# Adding new Column\ndf['Analysis'] = df['Polarity'].apply(analysis)\n\n# Show DataFrame\ndf\n", "_____no_output_____" ], [ "# Print all of positive tweets\nj = 1\nsortedDF = df.sort_values(by=['Polarity'])\nfor i in range(0, sortedDF.shape[0]):\n if(sortedDF['Analysis'][i] == 'Positive'):\n print(str(j) + ') ' + sortedDF['Tweets'][i])\n print()\n j += 1", "1) sacked their law firm for advising Messi on how to terminate his contract with the club. Lol. \nMessi FCBarcelona football\n\n2) 2020 is so unpredictable :\n&gt; Psg finally had their first UCL final\n&gt; Messi Leaving Barcelona\n&gt; Pogba has not change… t.co/0RilC2nUaw\n\n3) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n4) LEAKED! Footage reveals Manchester United's Ed Woodward live in action in the transfer market. \n\nARSLIV… t.co/hcCrN4CWbE\n\n5) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n6) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n7) : Lionel Messi has announced he wants to leave. Here we take a look at some of his best moments for Barcelona. Simply the great…\n\n8) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n9) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n10) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n11) : Meet our new recruit at , joining our A&amp;R team! Transfer fee was €700M, but I’m sure he’s gonna bring in…\n\n12) I grew up supporting Cruyff values in Barcelona and followed Pep who respected them wherever he went. \n\nValues over… t.co/9U2qZWv1WZ\n\n13) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n14) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n15) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n16) CALL IT SNIDENESS: when after the purchase of Pjarnic, something comes up and messi wants to leave and the Media a… t.co/XdkF7aDKW6\n\n17) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n18) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n19) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n20) : Meet our new recruit at , joining our A&amp;R team! Transfer fee was €700M, but I’m sure he’s gonna bring in…\n\n21) : Meet our new recruit at , joining our A&amp;R team! Transfer fee was €700M, but I’m sure he’s gonna bring in…\n\n22) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n23) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n24) : Meet our new recruit at , joining our A&amp;R team! Transfer fee was €700M, but I’m sure he’s gonna bring in…\n\n25) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n26) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n27) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n28) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n29) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n30) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n31) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n32) : Messi = Barcelona\nThe Best Part ♥️💔\n•Never saw any player making Commentators Emotional out of joy.\n\nThe name Lionel Mes…\n\n33) 2/2....Messi also said \"super eslavo, el Boingy, el Boingy\"(Super Slav, Boing Boing)...\"Smethwick parece un poco a… t.co/jU9Vh26I9q\n\n34) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n35) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n36) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n37) Messi-Bartomeu saga jumps to the high gear again. Bartomeu still believes he can keep Messi or sell him with a high… t.co/IU4aQFGCEJ\n\n38) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n39) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n40) : Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training, as…\n\n41) Lionel Messi is expected to send a new burofax to Barcelona confirming he will not be attending pre-season training… t.co/rcLd66afqT\n\n42) : Meet our new recruit at , joining our A&amp;R team! Transfer fee was €700M, but I’m sure he’s gonna bring in…\n\n43) : Meet our new recruit at , joining our A&amp;R team! Transfer fee was €700M, but I’m sure he’s gonna bring in…\n\n44) Wow big news for messitransfer MessiTooLeeds Messi t.co/NzSByBaAsM\n\n45) you clown destroyed !!! messi deserves a winning team !!! fuck you BartomeuDimision\n\n46) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n47) : In 55 years watching football the best player I have seen is Messi.\n\nI can tell you that in your lifetime you may still see a…\n\n48) : Hi i will design professional amazon product listing images, and amazon photo editing, if anyone need my service contact…\n\n49) 🔥Messi has informed Barça that he will not appear for the PCR tests at the start of the new season tomorrow. The A… t.co/NCys4uWSIJ\n\n50) : Meet our new recruit at , joining our A&amp;R team! Transfer fee was €700M, but I’m sure he’s gonna bring in…\n\n51) The latest a life in black and white! t.co/OdJAyZsx2F Thanks to seriea messi\n\n52) ❤ the best there ever was. The goat. Messi t.co/EJKpiIZOoS\n\n" ], [ "# Print all of negative tweets\nj = 1\nsortedDF = df.sort_values(by=['Polarity'], ascending='False')\nfor i in range(0, sortedDF.shape[0]):\n if(sortedDF['Analysis'][i] == 'Negative'):\n print(str(j) + ') ' + sortedDF['Tweets'][i])\n print()\n j += 1", "1) : 🗣: \"Adjectives have been exhausted trying to describe Messi but there are plenty to describe this really sad, slow, painful…\n\n2) Long play the king. Messi t.co/gDtXYTxjfQ\n\n3) OFFICIAL: After a long discussion LeoMessi has finally decided to accept our proposal to join our hometown club FC… t.co/n1tA2bffuA\n\n4) Only Maradona would bring a mediocre team like… t.co/kdIZ4s7HSH\n\n5) SOME RANDOM KILLS 🔥🔥 | PUBG MOBILE FUNNY MOMENTS 😂😂 | EPISODE 02 t.co/NYEeY5Z2Pf via pubg… t.co/7gT2NjxDCK\n\n6) Messi will not appear tomorrow for pre-season medical tests at training center. He keeps the decision to leave th… t.co/P4Bc4wwNoS\n\n7) : A Barca fan cries in front of the stadium a sad sight 😢\nFCBarcelona Messi \n t.co/9QvOUbTSng\n\n8) 🗣: \"Adjectives have been exhausted trying to describe Messi but there are plenty to describe this really sad, slow,… t.co/1R4cw9YAyv\n\n9) Leo_Messi 10 👑\nOnce you hear this name appear in front of you King..!!⚽️ of Football Not worth leaving this way H… t.co/RITHip8m75\n\n" ], [ "# Plot the polarity ans subjectivity\nplt.figure(figsize=(8,6))\nfor i in range(0, df.shape[0]):\n plt.scatter(df['Polarity'][i], df['Subjectivity'][i], color='Blue')\n\nplt.title('Sentiment Analysis')\nplt.xlabel('Polarity')\nplt.ylabel('Subjectivity')\nplt.show()", "_____no_output_____" ], [ "# get the percentage of positive tweets\n\nptweet = df[df.Analysis == 'Positive']\nptweet = ptweet['Tweets']\n\nround(ptweet.shape[0] / df.shape[0] * 100, 1)", "_____no_output_____" ], [ "# get the percentage of negative tweets\n\nntweet = df[df.Analysis == 'Negative']\nntweet = ntweet['Tweets']\n\nround(ntweet.shape[0] / df.shape[0] * 100, 1)", "_____no_output_____" ], [ "# Show the value counts\n\ndf['Analysis'].value_counts()\n\n# Plot and visualize the counts\nplt.title('Sentiment Analysis')\nplt.xlabel('Sentiment')\nplt.ylabel('Counts')\ndf['Analysis'].value_counts().plot(kind='bar')\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6ed44b7589cfc0f51676f57692656bc4b2713f
35,628
ipynb
Jupyter Notebook
mygolab1.ipynb
shiluan/PowerBI-CSharp
0213b699d9dd6d90b992beaea70beba97a29c164
[ "MIT" ]
null
null
null
mygolab1.ipynb
shiluan/PowerBI-CSharp
0213b699d9dd6d90b992beaea70beba97a29c164
[ "MIT" ]
null
null
null
mygolab1.ipynb
shiluan/PowerBI-CSharp
0213b699d9dd6d90b992beaea70beba97a29c164
[ "MIT" ]
null
null
null
282.761905
32,470
0.922842
[ [ [ "<a href=\"https://colab.research.google.com/github/shiluan/PowerBI-CSharp/blob/master/mygolab1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "print('welcome my golab!!!') ", "welcome my golab!!!\n" ], [ "message = 'A Great Tutorial on Colab by Tutorialspoint!!'\ngreeting = !echo -e '$message\\n$message'\ngreeting", "_____no_output_____" ], [ "import numpy as np\nfrom matplotlib import pyplot as plt\n\ny = np.random.randn(100)\nx = [x for x in range(len(y))]\n\nplt.plot(x, y, '-')\nplt.fill_between(x, y, 200, where = (y > 195), facecolor='g', alpha=0.6)\n\nplt.title(\"Sample Plot\")\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
4a6ede42df3f6dfafb4e8815879101ed779aa659
115,578
ipynb
Jupyter Notebook
Q1 PartD/Q1 PartD - Monthly/MiniProj_LSTM_AdaGrad_MAE_Q1_PartD_Pytorch_Monthly.ipynb
parhamzm/Beijing-Weather-Prediction
9971201a766d8ddce50b3fb737b6aa49eea55fc3
[ "MIT" ]
null
null
null
Q1 PartD/Q1 PartD - Monthly/MiniProj_LSTM_AdaGrad_MAE_Q1_PartD_Pytorch_Monthly.ipynb
parhamzm/Beijing-Weather-Prediction
9971201a766d8ddce50b3fb737b6aa49eea55fc3
[ "MIT" ]
null
null
null
Q1 PartD/Q1 PartD - Monthly/MiniProj_LSTM_AdaGrad_MAE_Q1_PartD_Pytorch_Monthly.ipynb
parhamzm/Beijing-Weather-Prediction
9971201a766d8ddce50b3fb737b6aa49eea55fc3
[ "MIT" ]
null
null
null
115,578
115,578
0.756294
[ [ [ "!git clone https://github.com/parhamzm/Beijing-Pollution-DataSet", "fatal: destination path 'Beijing-Pollution-DataSet' already exists and is not an empty directory.\n" ], [ "!ls Beijing-Pollution-DataSet", "pollution.csv polution_dataSet.npy README.md\n" ], [ "import torch\nimport torchvision\nimport torch.nn as nn\nfrom torchvision import transforms\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom torch.utils.data import random_split\n\nfrom math import sqrt\nfrom numpy import concatenate\nfrom matplotlib import pyplot\nfrom pandas import read_csv\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import mean_squared_error\n\nfrom numpy import array\nfrom numpy import hstack", "_____no_output_____" ] ], [ [ "# **Data Pre Processing**", "_____no_output_____" ] ], [ [ "DATA_DIR = \"Beijing-Pollution-DataSet/\"\nfrom pandas import read_csv\nfrom datetime import datetime\nfrom random import randint\n\ndef select_month(sequences, n_samples=250):\n X, y = list(), list()\n rand_hour = randint(0, 24)\n rand_day = randint(0, 7)\n for i in range(0, n_samples):\n start_ix = rand_hour + rand_day*24 + 672 * i # 168 : Week hours!\n idxs = []\n for j in range(0, 4):\n if j <=2:\n idx = start_ix + (j * 168) # Add different weeks\n idxs.append(idx)\n if j == 3: # Target\n idy = start_ix + (j * 168)\n seq_x = sequences[idxs, :]\n seq_y = sequences[idy, 0]\n y.append(seq_y)\n X.append(seq_x)\n\n return X, y\n\n\n\n# split a multivariate sequence into samples\ndef split_sequences(sequences, n_steps, n_samples=12000, start_from=0):\n\tX, y = list(), list()\n\tfor i in range(start_from, (start_from + n_samples)):\n # find the end of this pattern\n\t\tend_ix = i + n_steps\n # check if we are beyond the dataset\n # if end_ix > len(sequences):\n # break\n # gather input and output parts of the pattern\n\t\tseq_x = sequences[i:end_ix, :]\n\t\tseq_y = sequences[end_ix, 0]\n\t\ty.append(seq_y)\n\t\tX.append(seq_x)\n \n\n\t\n\treturn array(X), array(y)\n\n\n# load dataset\nDATA_DIR = \"Beijing-Pollution-DataSet/\"\n\ndata = np.load(DATA_DIR + 'polution_dataSet.npy')\nscaled_data = data\n\nx, y = select_month(data, n_samples=65)\nprint(\"X shape => \", np.array(x).shape)\nprint(\"y shape => \", np.array(y).shape)\nx = np.array(x)\ny = np.array(y)\ndataset = data\ntrain_X, train_y = x[0:50], y[0:50] #split_sequences(dataset, n_timesteps, n_samples=15000, start_from=0)\nvalid_X, valid_y = x[50:], y[50:] #split_sequences(dataset, n_timesteps, n_samples=3000, start_from=15000)", "X shape => (65, 3, 8)\ny shape => (65,)\n" ], [ "test_loader_X = torch.utils.data.DataLoader(dataset=(train_X), batch_size=20, shuffle=False)\n# train_X = torch.tensor(train_X, dtype=torch.float32) \n# train_y = torch.tensor(train_y, dtype=torch.float32)\nprint(\"Train X Shape :=> \", train_X.shape)\nprint(\"Train Y Shape :=> \", train_y.shape)\nprint(\"####################################\")\n# print(\"Test X Shape :=> \", test_X.shape)\n# print(\"Test Y Shape :=> \", test_y.shape)", "Train X Shape :=> (50, 3, 8)\nTrain Y Shape :=> (50,)\n####################################\n" ], [ "class LSTM(torch.nn.Module):\n def __init__(self, n_features=8, n_output=1, seq_length=11, n_hidden_layers=233, n_layers=1):\n super(LSTM, self).__init__()\n self.n_features = n_features\n self.seq_len = seq_length\n\n self.n_hidden = n_hidden_layers # number of hidden states\n self.n_layers = n_layers # number of LSTM layers (stacked)\n self.n_output = n_output\n\n self.l_lstm = torch.nn.LSTM(input_size = n_features, \n hidden_size = self.n_hidden,\n num_layers = self.n_layers, \n batch_first = True)\n # according to pytorch docs LSTM output is \n # (batch_size, seq_len, num_directions * hidden_size)\n # when considering batch_first = True\n self.l_linear = torch.nn.Linear(self.n_hidden * self.seq_len, self.n_output)\n\n\n def forward(self, x):\n hidden_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_()\n cell_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_()\n self.hidden = (hidden_state.detach(), cell_state.detach())\n\n batch_size, seq_len, _ = x.size()\n\n lstm_out, self.hidden = self.l_lstm(x, self.hidden)\n\n # lstm_out(with batch_first = True) is \n # (batch_size,seq_len,num_directions * hidden_size)\n # for following linear layer we want to keep batch_size dimension and merge rest \n # .contiguous() -> solves tensor compatibility error\n x = lstm_out.contiguous().view(batch_size, -1)\n # print(\"X shape :=> \", x.shape)\n # out = self.l_linear(lstm_out[:, -1, :])\n # print(\"Out Shape :=> \", lstm_out[:, -1, :].shape)\n out = self.l_linear(x)\n return out", "_____no_output_____" ], [ "torch.manual_seed(13)\nmodel = LSTM(n_features=8, n_output=1, seq_length=3, n_hidden_layers=233, n_layers=1)\ncriterion = nn.L1Loss()\noptimizer = torch.optim.Adagrad(model.parameters(), lr=0.0003)", "_____no_output_____" ], [ "model = model #.to(device)\ncriterion = criterion #.to(device)\nfor p in model.parameters():\n print(p.numel())", "7456\n217156\n932\n932\n699\n1\n" ], [ "import time\nstart_time = time.time()\n\n# train_X, train_y\nepochs = 200\nmodel.train()\nbatch_size = 5\nrunning_loss_history = []\nval_running_loss_history = []\nfor epoch in range(epochs):\n running_loss = 0.0\n val_running_loss = 0.0\n model.train()\n for b in range(0, len(train_X), batch_size):\n inpt = train_X[b:b+batch_size, :, :]\n target = train_y[b:b+batch_size]\n\n # print(\"Input Shape :=> \", inpt.shape)\n\n x_batch = torch.tensor(inpt, dtype=torch.float32) \n y_batch = torch.tensor(target, dtype=torch.float32)\n\n output = model(x_batch) \n loss = criterion(output.view(-1), y_batch)\n\n running_loss += loss.item()\n\n loss.backward()\n optimizer.step() \n optimizer.zero_grad() \n\n else:\n \n with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false\n model.eval()\n for b in range(0, len(valid_X), batch_size):\n inpt = valid_X[b:b+batch_size, :, :]\n target = valid_y[b:b+batch_size]\n\n x_batch_test = torch.tensor(inpt, dtype=torch.float32)\n y_batch_test = torch.tensor(target, dtype=torch.float32)\n\n # model.init_hidden(x_batch_test.size(0))\n\n output_test = model(x_batch_test) \n loss_test = criterion(output_test.view(-1), y_batch_test)\n\n val_running_loss += loss_test.item()\n\n val_epoch_loss = val_running_loss / len(valid_X)\n val_running_loss_history.append(val_epoch_loss)\n epoch_loss = running_loss / len(valid_X)\n running_loss_history.append(epoch_loss)\n print('step : ' , epoch , ' Train loss : ' , epoch_loss, ', Valid Loss : => ', val_epoch_loss)\n print(\"***->>>-----------------------------------------------<<<-***\")\n\ntotal_time = time.time() - start_time\nprint(\"===========================================================\")\nprint(\"*********************************************************\")\nprint(\"The total Training Time is Equal with ==> : {0} Sec.\".format(total_time))\nprint(\"*********************************************************\")\nprint(\"===========================================================\")", "step : 0 Train loss : 0.06657924105723699 , Valid Loss : => 0.014905717720588048\n***->>>-----------------------------------------------<<<-***\nstep : 1 Train loss : 0.05417056605219841 , Valid Loss : => 0.013276852170626323\n***->>>-----------------------------------------------<<<-***\nstep : 2 Train loss : 0.04865399884680907 , Valid Loss : => 0.012453635782003402\n***->>>-----------------------------------------------<<<-***\nstep : 3 Train loss : 0.046136669876674814 , Valid Loss : => 0.01237548291683197\n***->>>-----------------------------------------------<<<-***\nstep : 4 Train loss : 0.04509344461063544 , Valid Loss : => 0.012574477990468343\n***->>>-----------------------------------------------<<<-***\nstep : 5 Train loss : 0.04465406884749731 , Valid Loss : => 0.0126502625644207\n***->>>-----------------------------------------------<<<-***\nstep : 6 Train loss : 0.04449370553096135 , Valid Loss : => 0.0127188540995121\n***->>>-----------------------------------------------<<<-***\nstep : 7 Train loss : 0.04435061477124691 , Valid Loss : => 0.012808095415433247\n***->>>-----------------------------------------------<<<-***\nstep : 8 Train loss : 0.044220111519098285 , Valid Loss : => 0.012902375807364782\n***->>>-----------------------------------------------<<<-***\nstep : 9 Train loss : 0.04409930482506752 , Valid Loss : => 0.012991201629241307\n***->>>-----------------------------------------------<<<-***\nstep : 10 Train loss : 0.04398624698321025 , Valid Loss : => 0.013075486570596696\n***->>>-----------------------------------------------<<<-***\nstep : 11 Train loss : 0.043879570936163265 , Valid Loss : => 0.013155915339787801\n***->>>-----------------------------------------------<<<-***\nstep : 12 Train loss : 0.04377013829847177 , Valid Loss : => 0.013186699648698171\n***->>>-----------------------------------------------<<<-***\nstep : 13 Train loss : 0.0437264750401179 , Valid Loss : => 0.013216480612754822\n***->>>-----------------------------------------------<<<-***\nstep : 14 Train loss : 0.04368912267188231 , Valid Loss : => 0.013198998818794886\n***->>>-----------------------------------------------<<<-***\nstep : 15 Train loss : 0.04366287315885226 , Valid Loss : => 0.013181980947653453\n***->>>-----------------------------------------------<<<-***\nstep : 16 Train loss : 0.04363782132665316 , Valid Loss : => 0.013209435095389684\n***->>>-----------------------------------------------<<<-***\nstep : 17 Train loss : 0.0436161865790685 , Valid Loss : => 0.01319321741660436\n***->>>-----------------------------------------------<<<-***\nstep : 18 Train loss : 0.043592479825019834 , Valid Loss : => 0.013177374005317688\n***->>>-----------------------------------------------<<<-***\nstep : 19 Train loss : 0.04356946473320325 , Valid Loss : => 0.013161882758140564\n***->>>-----------------------------------------------<<<-***\nstep : 20 Train loss : 0.04354708194732666 , Valid Loss : => 0.013146719088157018\n***->>>-----------------------------------------------<<<-***\nstep : 21 Train loss : 0.043525282045205435 , Valid Loss : => 0.013131863375504812\n***->>>-----------------------------------------------<<<-***\nstep : 22 Train loss : 0.0435040257871151 , Valid Loss : => 0.013117299228906632\n***->>>-----------------------------------------------<<<-***\nstep : 23 Train loss : 0.04348326760033767 , Valid Loss : => 0.013103009512027105\n***->>>-----------------------------------------------<<<-***\nstep : 24 Train loss : 0.043462980662782986 , Valid Loss : => 0.01308897982041041\n***->>>-----------------------------------------------<<<-***\nstep : 25 Train loss : 0.04344437892238299 , Valid Loss : => 0.013111931582291922\n***->>>-----------------------------------------------<<<-***\nstep : 26 Train loss : 0.043426176284750304 , Valid Loss : => 0.013098368793725968\n***->>>-----------------------------------------------<<<-***\nstep : 27 Train loss : 0.0434071318556865 , Valid Loss : => 0.013085031261046728\n***->>>-----------------------------------------------<<<-***\nstep : 28 Train loss : 0.04338844617207845 , Valid Loss : => 0.01307190681497256\n***->>>-----------------------------------------------<<<-***\nstep : 29 Train loss : 0.04337010383605957 , Valid Loss : => 0.013058985769748687\n***->>>-----------------------------------------------<<<-***\nstep : 30 Train loss : 0.04335208336512248 , Valid Loss : => 0.013046260426441829\n***->>>-----------------------------------------------<<<-***\nstep : 31 Train loss : 0.043334371348222094 , Valid Loss : => 0.013033721347649891\n***->>>-----------------------------------------------<<<-***\nstep : 32 Train loss : 0.04331694915890694 , Valid Loss : => 0.013021360337734222\n***->>>-----------------------------------------------<<<-***\nstep : 33 Train loss : 0.04330073148012161 , Valid Loss : => 0.013041785856088003\n***->>>-----------------------------------------------<<<-***\nstep : 34 Train loss : 0.04328506489594777 , Valid Loss : => 0.013029752175013225\n***->>>-----------------------------------------------<<<-***\nstep : 35 Train loss : 0.04326844140887261 , Valid Loss : => 0.013017876197894414\n***->>>-----------------------------------------------<<<-***\nstep : 36 Train loss : 0.043252054850260416 , Valid Loss : => 0.013006153206030527\n***->>>-----------------------------------------------<<<-***\nstep : 37 Train loss : 0.043235898142059644 , Valid Loss : => 0.012994576245546341\n***->>>-----------------------------------------------<<<-***\nstep : 38 Train loss : 0.04321996221939723 , Valid Loss : => 0.012983141839504242\n***->>>-----------------------------------------------<<<-***\nstep : 39 Train loss : 0.043204233547051746 , Valid Loss : => 0.012971842288970947\n***->>>-----------------------------------------------<<<-***\nstep : 40 Train loss : 0.043188708896438284 , Valid Loss : => 0.012960676103830338\n***->>>-----------------------------------------------<<<-***\nstep : 41 Train loss : 0.04317418957750003 , Valid Loss : => 0.01297928641239802\n***->>>-----------------------------------------------<<<-***\nstep : 42 Train loss : 0.04316013107697169 , Valid Loss : => 0.01296836311618487\n***->>>-----------------------------------------------<<<-***\nstep : 43 Train loss : 0.04314516944189866 , Valid Loss : => 0.012957558284203212\n***->>>-----------------------------------------------<<<-***\nstep : 44 Train loss : 0.04313038028776646 , Valid Loss : => 0.012946870177984238\n***->>>-----------------------------------------------<<<-***\nstep : 45 Train loss : 0.043115755543112755 , Valid Loss : => 0.012936293582121531\n***->>>-----------------------------------------------<<<-***\nstep : 46 Train loss : 0.04310129458705584 , Valid Loss : => 0.012925827503204345\n***->>>-----------------------------------------------<<<-***\nstep : 47 Train loss : 0.04308698487778505 , Valid Loss : => 0.012915463000535966\n***->>>-----------------------------------------------<<<-***\nstep : 48 Train loss : 0.04307282616694768 , Valid Loss : => 0.012905201812585195\n***->>>-----------------------------------------------<<<-***\nstep : 49 Train loss : 0.04305955295761426 , Valid Loss : => 0.012922425071398418\n***->>>-----------------------------------------------<<<-***\nstep : 50 Train loss : 0.04304665264983972 , Valid Loss : => 0.012912355611721675\n***->>>-----------------------------------------------<<<-***\nstep : 51 Train loss : 0.04303291713198026 , Valid Loss : => 0.012902378290891647\n***->>>-----------------------------------------------<<<-***\nstep : 52 Train loss : 0.04301931175092856 , Valid Loss : => 0.012892492612202962\n***->>>-----------------------------------------------<<<-***\nstep : 53 Train loss : 0.04300583538909753 , Valid Loss : => 0.012882695347070695\n***->>>-----------------------------------------------<<<-***\nstep : 54 Train loss : 0.042992479354143145 , Valid Loss : => 0.012872984260320663\n***->>>-----------------------------------------------<<<-***\nstep : 55 Train loss : 0.042979242155949275 , Valid Loss : => 0.012863356620073318\n***->>>-----------------------------------------------<<<-***\nstep : 56 Train loss : 0.04296612441539764 , Valid Loss : => 0.012856879085302354\n***->>>-----------------------------------------------<<<-***\nstep : 57 Train loss : 0.042953764523069066 , Valid Loss : => 0.012869932502508164\n***->>>-----------------------------------------------<<<-***\nstep : 58 Train loss : 0.042941794544458387 , Valid Loss : => 0.012862558166186014\n***->>>-----------------------------------------------<<<-***\nstep : 59 Train loss : 0.04292900500198205 , Valid Loss : => 0.012857518841822941\n***->>>-----------------------------------------------<<<-***\nstep : 60 Train loss : 0.042916318277517955 , Valid Loss : => 0.0128525177637736\n***->>>-----------------------------------------------<<<-***\nstep : 61 Train loss : 0.04290373710294564 , Valid Loss : => 0.012847556422154108\n***->>>-----------------------------------------------<<<-***\nstep : 62 Train loss : 0.042891249805688855 , Valid Loss : => 0.012842633575201035\n***->>>-----------------------------------------------<<<-***\nstep : 63 Train loss : 0.04287886035939058 , Valid Loss : => 0.012837749222914379\n***->>>-----------------------------------------------<<<-***\nstep : 64 Train loss : 0.04286656379699707 , Valid Loss : => 0.012832900633414586\n***->>>-----------------------------------------------<<<-***\nstep : 65 Train loss : 0.042854858189821245 , Valid Loss : => 0.012843065957228342\n***->>>-----------------------------------------------<<<-***\nstep : 66 Train loss : 0.042843695854147275 , Valid Loss : => 0.012838286658128103\n***->>>-----------------------------------------------<<<-***\nstep : 67 Train loss : 0.04283166254560153 , Valid Loss : => 0.012833543370167415\n***->>>-----------------------------------------------<<<-***\nstep : 68 Train loss : 0.04281971827149391 , Valid Loss : => 0.012828833609819412\n***->>>-----------------------------------------------<<<-***\nstep : 69 Train loss : 0.042807854960362116 , Valid Loss : => 0.01282415712873141\n***->>>-----------------------------------------------<<<-***\nstep : 70 Train loss : 0.04279607099791368 , Valid Loss : => 0.012819512188434601\n***->>>-----------------------------------------------<<<-***\nstep : 71 Train loss : 0.04278436551491419 , Valid Loss : => 0.012814899533987045\n***->>>-----------------------------------------------<<<-***\nstep : 72 Train loss : 0.042772737021247544 , Valid Loss : => 0.01281031792362531\n***->>>-----------------------------------------------<<<-***\nstep : 73 Train loss : 0.042761472736795744 , Valid Loss : => 0.012819978843132655\n***->>>-----------------------------------------------<<<-***\nstep : 74 Train loss : 0.042751061543822286 , Valid Loss : => 0.012815457085768383\n***->>>-----------------------------------------------<<<-***\nstep : 75 Train loss : 0.04273965309063594 , Valid Loss : => 0.012810963143905004\n***->>>-----------------------------------------------<<<-***\nstep : 76 Train loss : 0.04272831591467063 , Valid Loss : => 0.01280649850765864\n***->>>-----------------------------------------------<<<-***\nstep : 77 Train loss : 0.04271704542140166 , Valid Loss : => 0.012802062183618545\n***->>>-----------------------------------------------<<<-***\nstep : 78 Train loss : 0.04270584384600321 , Valid Loss : => 0.012797653675079346\n***->>>-----------------------------------------------<<<-***\nstep : 79 Train loss : 0.042694705973068875 , Valid Loss : => 0.012793269505103429\n***->>>-----------------------------------------------<<<-***\nstep : 80 Train loss : 0.04268363391359647 , Valid Loss : => 0.012788913647333781\n***->>>-----------------------------------------------<<<-***\nstep : 81 Train loss : 0.04267263114452362 , Valid Loss : => 0.012798147151867549\n***->>>-----------------------------------------------<<<-***\nstep : 82 Train loss : 0.04266294874250889 , Valid Loss : => 0.012793842454751332\n***->>>-----------------------------------------------<<<-***\nstep : 83 Train loss : 0.04265206307172775 , Valid Loss : => 0.01278956135114034\n***->>>-----------------------------------------------<<<-***\nstep : 84 Train loss : 0.04264123613635699 , Valid Loss : => 0.012785305827856063\n***->>>-----------------------------------------------<<<-***\nstep : 85 Train loss : 0.04263046532869339 , Valid Loss : => 0.012781074394782384\n***->>>-----------------------------------------------<<<-***\nstep : 86 Train loss : 0.04261975313226382 , Valid Loss : => 0.012776867548624674\n***->>>-----------------------------------------------<<<-***\nstep : 87 Train loss : 0.04260909495254358 , Valid Loss : => 0.012772681812445322\n***->>>-----------------------------------------------<<<-***\nstep : 88 Train loss : 0.04259849352141221 , Valid Loss : => 0.012768519421418508\n***->>>-----------------------------------------------<<<-***\nstep : 89 Train loss : 0.04258794412016868 , Valid Loss : => 0.012764380127191544\n***->>>-----------------------------------------------<<<-***\nstep : 90 Train loss : 0.04257820894320806 , Valid Loss : => 0.012773200372854869\n***->>>-----------------------------------------------<<<-***\nstep : 91 Train loss : 0.042568196232120194 , Valid Loss : => 0.01276910404364268\n***->>>-----------------------------------------------<<<-***\nstep : 92 Train loss : 0.042557804534832634 , Valid Loss : => 0.012765029817819596\n***->>>-----------------------------------------------<<<-***\nstep : 93 Train loss : 0.042547462259729704 , Valid Loss : => 0.012760975956916809\n***->>>-----------------------------------------------<<<-***\nstep : 94 Train loss : 0.042537167916695276 , Valid Loss : => 0.012756943951050441\n***->>>-----------------------------------------------<<<-***\nstep : 95 Train loss : 0.042526921878258385 , Valid Loss : => 0.01275293081998825\n***->>>-----------------------------------------------<<<-***\nstep : 96 Train loss : 0.0425167229026556 , Valid Loss : => 0.012748938302199047\n***->>>-----------------------------------------------<<<-***\nstep : 97 Train loss : 0.04250656999647617 , Valid Loss : => 0.012744966149330138\n***->>>-----------------------------------------------<<<-***\nstep : 98 Train loss : 0.042496790364384654 , Valid Loss : => 0.01275346428155899\n***->>>-----------------------------------------------<<<-***\nstep : 99 Train loss : 0.04248753028611342 , Valid Loss : => 0.012749530375003815\n***->>>-----------------------------------------------<<<-***\nstep : 100 Train loss : 0.04247751384973526 , Valid Loss : => 0.012745614101489385\n***->>>-----------------------------------------------<<<-***\nstep : 101 Train loss : 0.042467540005842845 , Valid Loss : => 0.01274171769618988\n***->>>-----------------------------------------------<<<-***\nstep : 102 Train loss : 0.04245760565002759 , Valid Loss : => 0.01273783842722575\n***->>>-----------------------------------------------<<<-***\nstep : 103 Train loss : 0.04244771723945936 , Valid Loss : => 0.01273397778471311\n***->>>-----------------------------------------------<<<-***\nstep : 104 Train loss : 0.04243786819279194 , Valid Loss : => 0.01273013378183047\n***->>>-----------------------------------------------<<<-***\nstep : 105 Train loss : 0.04242805913090706 , Valid Loss : => 0.01272630716363589\n***->>>-----------------------------------------------<<<-***\nstep : 106 Train loss : 0.04241829204062621 , Valid Loss : => 0.012722498923540115\n***->>>-----------------------------------------------<<<-***\nstep : 107 Train loss : 0.04240938511987527 , Valid Loss : => 0.01273068015774091\n***->>>-----------------------------------------------<<<-***\nstep : 108 Train loss : 0.042399940888086955 , Valid Loss : => 0.012726906190315882\n***->>>-----------------------------------------------<<<-***\nstep : 109 Train loss : 0.04239028878509998 , Valid Loss : => 0.012723147124052047\n***->>>-----------------------------------------------<<<-***\nstep : 110 Train loss : 0.04238067219654719 , Valid Loss : => 0.012719404449065526\n***->>>-----------------------------------------------<<<-***\nstep : 111 Train loss : 0.04237109397848447 , Valid Loss : => 0.012715677171945572\n***->>>-----------------------------------------------<<<-***\nstep : 112 Train loss : 0.04236155115067959 , Valid Loss : => 0.012711966534455617\n***->>>-----------------------------------------------<<<-***\nstep : 113 Train loss : 0.04235204632083575 , Valid Loss : => 0.01270827054977417\n***->>>-----------------------------------------------<<<-***\nstep : 114 Train loss : 0.04234257563948631 , Valid Loss : => 0.012704590956370035\n***->>>-----------------------------------------------<<<-***\nstep : 115 Train loss : 0.04233338236808777 , Valid Loss : => 0.01271252209941546\n***->>>-----------------------------------------------<<<-***\nstep : 116 Train loss : 0.04232475633422534 , Valid Loss : => 0.012708873053391774\n***->>>-----------------------------------------------<<<-***\nstep : 117 Train loss : 0.042315388470888136 , Valid Loss : => 0.012705237170060476\n***->>>-----------------------------------------------<<<-***\nstep : 118 Train loss : 0.042303813497225444 , Valid Loss : => 0.012711427360773086\n***->>>-----------------------------------------------<<<-***\nstep : 119 Train loss : 0.04229926330347856 , Valid Loss : => 0.01270781879623731\n***->>>-----------------------------------------------<<<-***\nstep : 120 Train loss : 0.042289986337224646 , Valid Loss : => 0.012704222897688548\n***->>>-----------------------------------------------<<<-***\nstep : 121 Train loss : 0.04227917951842149 , Valid Loss : => 0.012710354228814443\n***->>>-----------------------------------------------<<<-***\nstep : 122 Train loss : 0.042274014403422676 , Valid Loss : => 0.01270678515235583\n***->>>-----------------------------------------------<<<-***\nstep : 123 Train loss : 0.042264824733138084 , Valid Loss : => 0.012703227996826171\n***->>>-----------------------------------------------<<<-***\nstep : 124 Train loss : 0.0422547680636247 , Valid Loss : => 0.012709300220012664\n***->>>-----------------------------------------------<<<-***\nstep : 125 Train loss : 0.04224899833401044 , Valid Loss : => 0.012705768396457037\n***->>>-----------------------------------------------<<<-***\nstep : 126 Train loss : 0.042237256467342374 , Valid Loss : => 0.012711806346972783\n***->>>-----------------------------------------------<<<-***\nstep : 127 Train loss : 0.04223326295614242 , Valid Loss : => 0.012708299358685811\n***->>>-----------------------------------------------<<<-***\nstep : 128 Train loss : 0.04222421186665694 , Valid Loss : => 0.012704804042975108\n***->>>-----------------------------------------------<<<-***\nstep : 129 Train loss : 0.042213199660182 , Valid Loss : => 0.012710786114136377\n***->>>-----------------------------------------------<<<-***\nstep : 130 Train loss : 0.04220861047506332 , Valid Loss : => 0.012707314640283584\n***->>>-----------------------------------------------<<<-***\nstep : 131 Train loss : 0.04219963885843754 , Valid Loss : => 0.01270385558406512\n***->>>-----------------------------------------------<<<-***\nstep : 132 Train loss : 0.04218934252858162 , Valid Loss : => 0.012709785004456837\n***->>>-----------------------------------------------<<<-***\nstep : 133 Train loss : 0.04218416872123877 , Valid Loss : => 0.012706348299980163\n***->>>-----------------------------------------------<<<-***\nstep : 134 Train loss : 0.042175273845593136 , Valid Loss : => 0.01270292301972707\n***->>>-----------------------------------------------<<<-***\nstep : 135 Train loss : 0.04216568569342295 , Valid Loss : => 0.012708801527818043\n***->>>-----------------------------------------------<<<-***\nstep : 136 Train loss : 0.0421599297473828 , Valid Loss : => 0.01270539810260137\n***->>>-----------------------------------------------<<<-***\nstep : 137 Train loss : 0.042148703088363014 , Valid Loss : => 0.012711245566606522\n***->>>-----------------------------------------------<<<-***\nstep : 138 Train loss : 0.04214466748138269 , Valid Loss : => 0.012707864244778952\n***->>>-----------------------------------------------<<<-***\nstep : 139 Train loss : 0.042135889455676076 , Valid Loss : => 0.012704493353764217\n***->>>-----------------------------------------------<<<-***\nstep : 140 Train loss : 0.04212535160283248 , Valid Loss : => 0.012710292885700861\n***->>>-----------------------------------------------<<<-***\nstep : 141 Train loss : 0.04212074192861716 , Valid Loss : => 0.01270694260795911\n***->>>-----------------------------------------------<<<-***\nstep : 142 Train loss : 0.04211203716695309 , Valid Loss : => 0.01270360325773557\n***->>>-----------------------------------------------<<<-***\nstep : 143 Train loss : 0.04210217632353306 , Valid Loss : => 0.012709355602661768\n***->>>-----------------------------------------------<<<-***\nstep : 144 Train loss : 0.04209700499971707 , Valid Loss : => 0.012706035872300465\n***->>>-----------------------------------------------<<<-***\nstep : 145 Train loss : 0.0420883622020483 , Valid Loss : => 0.012702727317810058\n***->>>-----------------------------------------------<<<-***\nstep : 146 Train loss : 0.04207917228341103 , Valid Loss : => 0.012708432972431183\n***->>>-----------------------------------------------<<<-***\nstep : 147 Train loss : 0.04207344402869542 , Valid Loss : => 0.012705143541097641\n***->>>-----------------------------------------------<<<-***\nstep : 148 Train loss : 0.042062652731935185 , Valid Loss : => 0.012710823863744735\n***->>>-----------------------------------------------<<<-***\nstep : 149 Train loss : 0.04205859104792277 , Valid Loss : => 0.012707553307215373\n***->>>-----------------------------------------------<<<-***\nstep : 150 Train loss : 0.04205005516608556 , Valid Loss : => 0.012704292684793473\n***->>>-----------------------------------------------<<<-***\nstep : 151 Train loss : 0.04203991815447807 , Valid Loss : => 0.012709928303956985\n***->>>-----------------------------------------------<<<-***\nstep : 152 Train loss : 0.042035308107733725 , Valid Loss : => 0.012706687798102696\n***->>>-----------------------------------------------<<<-***\nstep : 153 Train loss : 0.04202683232724667 , Valid Loss : => 0.012703454494476319\n***->>>-----------------------------------------------<<<-***\nstep : 154 Train loss : 0.04201733817656835 , Valid Loss : => 0.01270904839038849\n***->>>-----------------------------------------------<<<-***\nstep : 155 Train loss : 0.042012184485793115 , Valid Loss : => 0.012705834209918975\n***->>>-----------------------------------------------<<<-***\nstep : 156 Train loss : 0.042003768309950826 , Valid Loss : => 0.012702629218498867\n***->>>-----------------------------------------------<<<-***\nstep : 157 Train loss : 0.04199491118391355 , Valid Loss : => 0.012708182881275813\n***->>>-----------------------------------------------<<<-***\nstep : 158 Train loss : 0.04198921819527944 , Valid Loss : => 0.01270499477783839\n***->>>-----------------------------------------------<<<-***\nstep : 159 Train loss : 0.04197879942754904 , Valid Loss : => 0.01271052286028862\n***->>>-----------------------------------------------<<<-***\nstep : 160 Train loss : 0.041974726940194766 , Valid Loss : => 0.012707352389891942\n***->>>-----------------------------------------------<<<-***\nstep : 161 Train loss : 0.041966405759255095 , Valid Loss : => 0.0127041923503081\n***->>>-----------------------------------------------<<<-***\nstep : 162 Train loss : 0.041956607873241104 , Valid Loss : => 0.012709681689739228\n***->>>-----------------------------------------------<<<-***\nstep : 163 Train loss : 0.041952007760604225 , Valid Loss : => 0.012706537296374639\n***->>>-----------------------------------------------<<<-***\nstep : 164 Train loss : 0.041943738733728725 , Valid Loss : => 0.012703401347001394\n***->>>-----------------------------------------------<<<-***\nstep : 165 Train loss : 0.04193455403049787 , Valid Loss : => 0.012708851446708044\n***->>>-----------------------------------------------<<<-***\nstep : 166 Train loss : 0.041929430638750396 , Valid Loss : => 0.01270573188861211\n***->>>-----------------------------------------------<<<-***\nstep : 167 Train loss : 0.04192121451099714 , Valid Loss : => 0.012702622264623643\n***->>>-----------------------------------------------<<<-***\nstep : 168 Train loss : 0.041912635788321495 , Valid Loss : => 0.01270803486307462\n***->>>-----------------------------------------------<<<-***\nstep : 169 Train loss : 0.041906990110874176 , Valid Loss : => 0.012704940636952718\n***->>>-----------------------------------------------<<<-***\nstep : 170 Train loss : 0.04189688563346863 , Valid Loss : => 0.012710332870483398\n***->>>-----------------------------------------------<<<-***\nstep : 171 Train loss : 0.04189282221098741 , Valid Loss : => 0.012707253297170004\n***->>>-----------------------------------------------<<<-***\nstep : 172 Train loss : 0.041884687294562656 , Valid Loss : => 0.012704181671142577\n***->>>-----------------------------------------------<<<-***\nstep : 173 Train loss : 0.04187517613172531 , Valid Loss : => 0.012709537893533707\n***->>>-----------------------------------------------<<<-***\nstep : 174 Train loss : 0.04187059924006462 , Valid Loss : => 0.01270648166537285\n***->>>-----------------------------------------------<<<-***\nstep : 175 Train loss : 0.04186251449088255 , Valid Loss : => 0.012703432639439901\n***->>>-----------------------------------------------<<<-***\nstep : 176 Train loss : 0.041853592296441394 , Valid Loss : => 0.012708753844102224\n***->>>-----------------------------------------------<<<-***\nstep : 177 Train loss : 0.041848507896065715 , Valid Loss : => 0.012705719967683157\n***->>>-----------------------------------------------<<<-***\nstep : 178 Train loss : 0.04183807844916979 , Valid Loss : => 0.012711021055777868\n***->>>-----------------------------------------------<<<-***\nstep : 179 Train loss : 0.041834548115730286 , Valid Loss : => 0.012708003322283426\n***->>>-----------------------------------------------<<<-***\nstep : 180 Train loss : 0.04182653625806173 , Valid Loss : => 0.012704991052548091\n***->>>-----------------------------------------------<<<-***\nstep : 181 Train loss : 0.04181668634215991 , Valid Loss : => 0.01271025836467743\n***->>>-----------------------------------------------<<<-***\nstep : 182 Train loss : 0.041812653591235475 , Valid Loss : => 0.012707261989514032\n***->>>-----------------------------------------------<<<-***\nstep : 183 Train loss : 0.041804689168930056 , Valid Loss : => 0.012704271823167801\n***->>>-----------------------------------------------<<<-***\nstep : 184 Train loss : 0.041795407483975096 , Valid Loss : => 0.012709507097800573\n***->>>-----------------------------------------------<<<-***\nstep : 185 Train loss : 0.041790875419974324 , Valid Loss : => 0.012706531087557475\n***->>>-----------------------------------------------<<<-***\nstep : 186 Train loss : 0.04178295619785786 , Valid Loss : => 0.012703563024600347\n***->>>-----------------------------------------------<<<-***\nstep : 187 Train loss : 0.04177423951526483 , Valid Loss : => 0.012708765516678492\n***->>>-----------------------------------------------<<<-***\nstep : 188 Train loss : 0.041769216085473695 , Valid Loss : => 0.012705810119708378\n***->>>-----------------------------------------------<<<-***\nstep : 189 Train loss : 0.04175902307033539 , Valid Loss : => 0.012710993985335033\n***->>>-----------------------------------------------<<<-***\nstep : 190 Train loss : 0.04175552241504192 , Valid Loss : => 0.01270805150270462\n***->>>-----------------------------------------------<<<-***\nstep : 191 Train loss : 0.041747665777802465 , Valid Loss : => 0.012705117464065552\n***->>>-----------------------------------------------<<<-***\nstep : 192 Train loss : 0.041738029941916464 , Valid Loss : => 0.012710271775722504\n***->>>-----------------------------------------------<<<-***\nstep : 193 Train loss : 0.04173403829336166 , Valid Loss : => 0.012707350154717764\n***->>>-----------------------------------------------<<<-***\nstep : 194 Train loss : 0.04172622275849183 , Valid Loss : => 0.012704433997472127\n***->>>-----------------------------------------------<<<-***\nstep : 195 Train loss : 0.041717136402924855 , Valid Loss : => 0.012709558755159379\n***->>>-----------------------------------------------<<<-***\nstep : 196 Train loss : 0.04171266034245491 , Valid Loss : => 0.012706655263900756\n***->>>-----------------------------------------------<<<-***\nstep : 197 Train loss : 0.04170488603413105 , Valid Loss : => 0.01270375947157542\n***->>>-----------------------------------------------<<<-***\nstep : 198 Train loss : 0.04169634481271108 , Valid Loss : => 0.012708855420351028\n***->>>-----------------------------------------------<<<-***\nstep : 199 Train loss : 0.04169138645132383 , Valid Loss : => 0.012705971052249273\n***->>>-----------------------------------------------<<<-***\n===========================================================\n*********************************************************\nThe total Training Time is Equal with ==> : 9.03040885925293 Sec.\n*********************************************************\n===========================================================\n" ], [ "f, ax = plt.subplots(1, 1, figsize=(10, 7))\nplt.title(\"Valid & Test Loss\", fontsize=18)\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Loss\")\nplt.plot(running_loss_history, label='train')\nplt.plot(val_running_loss_history, label='test')\n# pyplot.plot(history.history['val_loss'], label='test')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "test_x, test_y = x[50:], y[50:]\nmodel.eval()\ntest_x = torch.tensor(test_x, dtype=torch.float32)\ntest_y = torch.tensor(test_y, dtype=torch.float32)\nres = model(test_x)\nloss_test = criterion(res.view(-1), test_y)\n\nfuture = 100\nwindow_size = 11\n", "_____no_output_____" ], [ "fig = plt.figure(figsize=(20, 7))\nplt.title(\"Beijing Polution Prediction - LSTM\", fontsize=18)\nplt.ylabel('Polution')\nplt.xlabel('Num data')\nplt.grid(True)\nplt.autoscale(axis='x', tight=True)\nfig.autofmt_xdate()\n\n# plt.plot(data[15000:15100, 0])\nplt.plot(test_y, label=\"Real\")\n# plt.plot(preds[12:])\nprint(res.shape)\nplt.plot(res.detach().numpy(), label=\"Prediction\")\nplt.legend()\nplt.show()", "torch.Size([15, 1])\n" ], [ "test_x, test_y = x[50:], y[50:]\nmodel.eval()\n\n\ntest_running_loss = 0\nwith torch.no_grad(): # it will temprerorerly set all the required grad flags to be false\n model.eval()\n for b in range(0, len(test_x), batch_size):\n inpt = test_x[b:b+batch_size, :, :]\n target = test_y[b:b+batch_size] \n\n x_batch_test = torch.tensor(inpt, dtype=torch.float32)\n y_batch_test = torch.tensor(target, dtype=torch.float32)\n\n # model.init_hidden(x_batch_test.size(0))\n\n output_test = model(x_batch_test)\n\n loss_test = criterion(output_test.view(-1), y_batch_test)\n\n test_running_loss += loss_test.item()\n\n test_epoch_loss = test_running_loss / len(test_x)\n\nprint(\"##########################################################\")\nprint(\">>>>---------------------------------------------------<<<<\")\nprint(\">>>>----------***************************--------------<<<<\")\nprint(\"**** Test Loss :==>>> \", test_epoch_loss)\nprint(\">>>>----------***************************--------------<<<<\")\nprint(\">>>>---------------------------------------------------<<<<\")\nprint(\"##########################################################\")", "##########################################################\n>>>>---------------------------------------------------<<<<\n>>>>----------***************************--------------<<<<\n**** Test Loss :==>>> 0.012705971052249273\n>>>>----------***************************--------------<<<<\n>>>>---------------------------------------------------<<<<\n##########################################################\n" ] ], [ [ "# **Predict Only 12 & 24 Times!**", "_____no_output_____" ] ], [ [ "# split a multivariate sequence into samples\ndef split_sequences12(sequences, n_steps, n_samples=12000, start_from=0):\n X, y = list(), list()\n j = 0\n for i in range(start_from, (start_from + n_samples)):\n # find the end of this pattern\n end_ix = j*12 + n_steps + start_from\n # check if we are beyond the dataset\n # gather input and output parts of the pattern\n j = j + 1\n seq_x = sequences[end_ix-11:end_ix, :]\n seq_y = sequences[end_ix, 0]\n y.append(seq_y)\n X.append(seq_x)\n print(\"End :=> \", end_ix)\n return array(X), array(y)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6ee93fa65a6f953151741c6c6ec183a7dc95d5
31,419
ipynb
Jupyter Notebook
Undergrad/CS-370-T1045/Week 5 /TreasureHuntGame/Bailey_Samuel_ProjectTwoMilestone.ipynb
samuelbailey123/SNHU
b230a00c5605b9c9c19aecf37c4b936bf607a499
[ "MIT" ]
null
null
null
Undergrad/CS-370-T1045/Week 5 /TreasureHuntGame/Bailey_Samuel_ProjectTwoMilestone.ipynb
samuelbailey123/SNHU
b230a00c5605b9c9c19aecf37c4b936bf607a499
[ "MIT" ]
10
2022-02-19T10:33:59.000Z
2022-03-31T08:44:37.000Z
Undergrad/CS-370-T1045/Week 5 /TreasureHuntGame/Bailey_Samuel_ProjectTwoMilestone.ipynb
samuelbailey123/SNHU
b230a00c5605b9c9c19aecf37c4b936bf607a499
[ "MIT" ]
null
null
null
60.07457
2,188
0.667431
[ [ [ "# Treasure Hunt Game Notebook\n\n## Read and Review Your Starter Code\nThe theme of this project is a popular treasure hunt game in which the player needs to find the treasure before the pirate does. While you will not be developing the entire game, you will write the part of the game that represents the intelligent agent, which is a pirate in this case. The pirate will try to find the optimal path to the treasure using deep Q-learning. \n\nYou have been provided with two Python classes and this notebook to help you with this assignment. The first class, TreasureMaze.py, represents the environment, which includes a maze object defined as a matrix. The second class, GameExperience.py, stores the episodes – that is, all the states that come in between the initial state and the terminal state. This is later used by the agent for learning by experience, called \"exploration\". This notebook shows how to play a game. Your task is to complete the deep Q-learning implementation for which a skeleton implementation has been provided. The code blocs you will need to complete has #TODO as a header.\n\nFirst, read and review the next few code and instruction blocks to understand the code that you have been given.", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport os, sys, time, datetime, json, random\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.optimizers import SGD , Adam, RMSprop\nfrom keras.layers.advanced_activations import PReLU\nimport matplotlib.pyplot as plt\nfrom TreasureMaze import TreasureMaze\nfrom GameExperience import GameExperience\n%matplotlib inline", "Using TensorFlow backend.\n" ] ], [ [ "The following code block contains an 8x8 matrix that will be used as a maze object:", "_____no_output_____" ] ], [ [ "maze = np.array([\n [ 1., 0., 1., 1., 1., 1., 1., 1.],\n [ 1., 0., 1., 1., 1., 0., 1., 1.],\n [ 1., 1., 1., 1., 0., 1., 0., 1.],\n [ 1., 1., 1., 0., 1., 1., 1., 1.],\n [ 1., 1., 0., 1., 1., 1., 1., 1.],\n [ 1., 1., 1., 0., 1., 0., 0., 0.],\n [ 1., 1., 1., 0., 1., 1., 1., 1.],\n [ 1., 1., 1., 1., 0., 1., 1., 1.]\n])", "_____no_output_____" ] ], [ [ "This helper function allows a visual representation of the maze object:", "_____no_output_____" ] ], [ [ "def show(qmaze):\n plt.grid('on')\n nrows, ncols = qmaze.maze.shape\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, nrows, 1))\n ax.set_yticks(np.arange(0.5, ncols, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n canvas = np.copy(qmaze.maze)\n for row,col in qmaze.visited:\n canvas[row,col] = 0.6\n pirate_row, pirate_col, _ = qmaze.state\n canvas[pirate_row, pirate_col] = 0.3 # pirate cell\n canvas[nrows-1, ncols-1] = 0.9 # treasure cell\n img = plt.imshow(canvas, interpolation='none', cmap='gray')\n return img", "_____no_output_____" ] ], [ [ "The pirate agent can move in four directions: left, right, up, and down. \n\nWhile the agent primarily learns by experience through exploitation, often, the agent can choose to explore the environment to find previously undiscovered paths. This is called \"exploration\" and is defined by epsilon. This value is typically a lower value such as 0.1, which means for every ten attempts, the agent will attempt to learn by experience nine times and will randomly explore a new path one time. You are encouraged to try various values for the exploration factor and see how the algorithm performs.", "_____no_output_____" ] ], [ [ "LEFT = 0\nUP = 1\nRIGHT = 2\nDOWN = 3\n\n\n# Exploration factor\nepsilon = 0.1\n\n# Actions dictionary\nactions_dict = {\n LEFT: 'left',\n UP: 'up',\n RIGHT: 'right',\n DOWN: 'down',\n}\n\nnum_actions = len(actions_dict)\n", "_____no_output_____" ] ], [ [ "The sample code block and output below show creating a maze object and performing one action (DOWN), which returns the reward. The resulting updated environment is visualized.", "_____no_output_____" ] ], [ [ "qmaze = TreasureMaze(maze)\ncanvas, reward, game_over = qmaze.act(DOWN)\nprint(\"reward=\", reward)\nshow(qmaze)", "reward= -0.04\n" ] ], [ [ "This function simulates a full game based on the provided trained model. The other parameters include the TreasureMaze object and the starting position of the pirate.", "_____no_output_____" ] ], [ [ "def play_game(model, qmaze, pirate_cell):\n qmaze.reset(pirate_cell)\n envstate = qmaze.observe()\n while True:\n prev_envstate = envstate\n # get next action\n q = model.predict(prev_envstate)\n action = np.argmax(q[0])\n\n # apply action, get rewards and new state\n envstate, reward, game_status = qmaze.act(action)\n if game_status == 'win':\n return True\n elif game_status == 'lose':\n return False", "_____no_output_____" ] ], [ [ "This function helps you to determine whether the pirate can win any game at all. If your maze is not well designed, the pirate may not win any game at all. In this case, your training would not yield any result. The provided maze in this notebook ensures that there is a path to win and you can run this method to check.", "_____no_output_____" ] ], [ [ "def completion_check(model, qmaze):\n for cell in qmaze.free_cells:\n if not qmaze.valid_actions(cell):\n return False\n if not play_game(model, qmaze, cell):\n return False\n return True", "_____no_output_____" ] ], [ [ "The code you have been given in this block will build the neural network model. Review the code and note the number of layers, as well as the activation, optimizer, and loss functions that are used to train the model.", "_____no_output_____" ] ], [ [ "def build_model(maze):\n model = Sequential()\n model.add(Dense(maze.size, input_shape=(maze.size,)))\n model.add(PReLU())\n model.add(Dense(maze.size))\n model.add(PReLU())\n model.add(Dense(num_actions))\n model.compile(optimizer='adam', loss='mse')\n return model", "_____no_output_____" ] ], [ [ "# #TODO: Complete the Q-Training Algorithm Code Block\n\nThis is your deep Q-learning implementation. The goal of your deep Q-learning implementation is to find the best possible navigation sequence that results in reaching the treasure cell while maximizing the reward. In your implementation, you need to determine the optimal number of epochs to achieve a 100% win rate.\n\nYou will need to complete the section starting with #pseudocode. The pseudocode has been included for you.", "_____no_output_____" ] ], [ [ "def qtrain(model, maze, **opt):\n\n # exploration factor\n global epsilon \n\n # number of epochs\n n_epoch = opt.get('n_epoch', 15000)\n\n # maximum memory to store episodes\n max_memory = opt.get('max_memory', 1000)\n\n # maximum data size for training\n data_size = opt.get('data_size', 50)\n\n # start time\n start_time = datetime.datetime.now()\n\n # Construct environment/game from numpy array: maze (see above)\n qmaze = TreasureMaze(maze)\n\n # Initialize experience replay object\n experience = GameExperience(model, max_memory=max_memory)\n \n win_history = [] # history of win/lose game\n hsize = qmaze.maze.size//2 # history window size\n win_rate = 0.0\n \n # Training Code\n \n # Epoch 'for' code:\n for i in range (n_epoch):\n Agent_cell = random.choice(qmaze.free_cells)\n qmaze.reset(Agent_cell)\n envstate = qmaze.observe\n \n # State declaration\n State = 'not game over'\n \n # While loop for 'not game over'\n while State == 'not game over':\n previous_envstate = envstate\n q = model.predict(previous_envstate)\n action = random.choice(actions_dict)\n envstate, reward, game_status = qmaze.act(action)\n actionInt = list(actions_dict.keys()) [list(actions_dict.values()).index(action)]\n episode = [previous_envstate, actionInt, reward, envstate, game_status]\n # Store the episode in Experience replay Object\n experience.remember(episode)\n # Call GameExperience.get_data to retrieve training data (input and target)\n inputs, targets = experience.get_data()\n # Pass to model.fit method to train the model\n model.fit(inputs, targets)\n # Evaluated loss with model.evaluate\n win_rate = model.evaluate(inputs, targets)\n print(win_rate)\n print(State)\n \n # If the win rate is above the threshold and your model passes the completion check, that would be your epoch.\n if win_rate > 0.9 and completion_check(model, qmaze):\n epoch = i\n print(i)\n\n #Print the epoch, loss, episodes, win count, and win rate for each epoch\n dt = datetime.datetime.now() - start_time\n t = format_time(dt.total_seconds())\n template = \"Epoch: {:03d}/{:d} | Loss: {:.4f} | Episodes: {:d} | Win count: {:d} | Win rate: {:.3f} | time: {}\"\n print(template.format(epoch, n_epoch-1, loss, n_episodes, sum(win_history), win_rate, t))\n # We simply check if training has exhausted all free cells and if in all\n # cases the agent won.\n if win_rate > 0.9 : epsilon = 0.05\n if sum(win_history[-hsize:]) == hsize and completion_check(model, qmaze):\n print(\"Reached 100%% win rate at epoch: %d\" % (epoch,))\n break\n \n \n # Determine the total time for training\n dt = datetime.datetime.now() - start_time\n seconds = dt.total_seconds()\n t = format_time(seconds)\n\n print(\"n_epoch: %d, max_mem: %d, data: %d, time: %s\" % (epoch, max_memory, data_size, t))\n return seconds\n\n# This is a small utility for printing readable time strings:\ndef format_time(seconds):\n if seconds < 400:\n s = float(seconds)\n return \"%.1f seconds\" % (s,)\n elif seconds < 4000:\n m = seconds / 60.0\n return \"%.2f minutes\" % (m,)\n else:\n h = seconds / 3600.0\n return \"%.2f hours\" % (h,)", "_____no_output_____" ] ], [ [ "## Test Your Model\n\nNow we will start testing the deep Q-learning implementation. To begin, select **Cell**, then **Run All** from the menu bar. This will run your notebook. As it runs, you should see output begin to appear beneath the next few cells. The code below creates an instance of TreasureMaze.", "_____no_output_____" ] ], [ [ "qmaze = TreasureMaze(maze)\nshow(qmaze)", "_____no_output_____" ] ], [ [ "In the next code block, you will build your model and train it using deep Q-learning. Note: This step takes several minutes to fully run.", "_____no_output_____" ] ], [ [ "model = build_model(maze)\nqtrain(model, maze, epochs=1000, max_memory=8*maze.size, data_size=32)", "_____no_output_____" ] ], [ [ "This cell will check to see if the model passes the completion check. Note: This could take several minutes.", "_____no_output_____" ] ], [ [ "completion_check(model, qmaze)\nshow(qmaze)", "_____no_output_____" ] ], [ [ "This cell will test your model for one game. It will start the pirate at the top-left corner and run play_game. The agent should find a path from the starting position to the target (treasure). The treasure is located in the bottom-right corner.", "_____no_output_____" ] ], [ [ "pirate_start = (0, 0)\nplay_game(model, qmaze, pirate_start)\nshow(qmaze)", "_____no_output_____" ] ], [ [ "## Save and Submit Your Work\nAfter you have finished creating the code for your notebook, save your work. Make sure that your notebook contains your name in the filename (e.g. Doe_Jane_ProjectTwo.ipynb). This will help your instructor access and grade your work easily. Download a copy of your IPYNB file and submit it to Brightspace. Refer to the Jupyter Notebook in Apporto Tutorial if you need help with these tasks.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6eec7e076620e26afc488665425e87782f4c6a
85,324
ipynb
Jupyter Notebook
vg-stats.ipynb
seattlechem/data-science
bf1805ac3c39c1a0f8e3ccc92de0d41c0c185932
[ "MIT" ]
null
null
null
vg-stats.ipynb
seattlechem/data-science
bf1805ac3c39c1a0f8e3ccc92de0d41c0c185932
[ "MIT" ]
null
null
null
vg-stats.ipynb
seattlechem/data-science
bf1805ac3c39c1a0f8e3ccc92de0d41c0c185932
[ "MIT" ]
null
null
null
36.047317
151
0.336552
[ [ [ "# Lab 06: Aggregate Analysis\n## Sales Data From Video Game Sales\n### Peter Kim, 03/26/2018", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom datetime import date", "_____no_output_____" ], [ "df = pd.read_csv('./vgsales.csv')\ndf.head(3)\n", "_____no_output_____" ] ], [ [ "### Which company is the most common video game publisher?\n### Answer: Electronic Arts", "_____no_output_____" ] ], [ [ "df['Publisher'].value_counts().head(1)", "_____no_output_____" ] ], [ [ "### What's the most common platform?\n### Answer: DS", "_____no_output_____" ] ], [ [ "df['Platform'].value_counts().head(1)", "_____no_output_____" ] ], [ [ "### What about the most common genre?\n### Answer: Action", "_____no_output_____" ] ], [ [ "df['Genre'].value_counts().head(1)", "_____no_output_____" ] ], [ [ "### What are the top 20 highest grossing games?\n### Answer: See below", "_____no_output_____" ] ], [ [ "ds = df[['Name', 'Global_Sales']].sort_values('Global_Sales', ascending = False)\nds.head(20)", "_____no_output_____" ] ], [ [ "### No. 5 For North American video game sales, what's the median?\n### Answer: 0.08", "_____no_output_____" ] ], [ [ "np.median(df['NA_Sales'])\ndf[df['NA_Sales'] == 0.08]", "_____no_output_____" ] ], [ [ "### No. 6 For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?\n### Answer:", "_____no_output_____" ] ], [ [ "(df['NA_Sales'].head(1) - df['NA_Sales'].mean()) / df['NA_Sales'].std()", "_____no_output_____" ] ], [ [ "### No. 7 The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms?", "_____no_output_____" ] ], [ [ "df[df['Platform'] == 'Wii'][['Platform', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']].groupby(df.Platform).mean()", "_____no_output_____" ], [ "df[df['Platform'] != 'Wii'][['Platform', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']].groupby(df.Platform).mean()", "_____no_output_____" ] ], [ [ "### No. 8 What is the name of game which achieved the highest sales in Japan?\n### Answer: Pokemon Red/Pokemon Blue", "_____no_output_____" ] ], [ [ "df.sort_values('JP_Sales', ascending=False).head(1)", "_____no_output_____" ] ], [ [ "### No. 9 What is the name of game which achieved the highest sales in Europe?\n### Answer: Wii Sports", "_____no_output_____" ] ], [ [ "df.sort_values('EU_Sales', ascending=False).head(1)", "_____no_output_____" ] ], [ [ "### No. 10 What is the top 10 newest released games from this video game sales data?", "_____no_output_____" ] ], [ [ "df.sort_values('Year', ascending=False).head(10)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6efd3c09c599f7b097a9b25245f876edc909b8
1,772
ipynb
Jupyter Notebook
deepL101.ipynb
amirashoori7/ml-algo-trading
cd3ce06798d3038fdec7e5980c99351073f1c9b4
[ "MIT" ]
null
null
null
deepL101.ipynb
amirashoori7/ml-algo-trading
cd3ce06798d3038fdec7e5980c99351073f1c9b4
[ "MIT" ]
null
null
null
deepL101.ipynb
amirashoori7/ml-algo-trading
cd3ce06798d3038fdec7e5980c99351073f1c9b4
[ "MIT" ]
null
null
null
25.314286
119
0.588036
[ [ [ "Deep Learning Intro with python, tensorflow, keras\nHere is the most common HelloWorld! example of deep learning which is \"mnist dataset\" of hand-written digits\n\nA basic neural network consists of an input layer (data)\nAfter this, there are some number of hidden layers (layers between input-output)\n1 hidden layer = 1 neural network\n2=< hidden layers = deep neural network\n\nwith single hidden layer, the model is going to only learn linear relationships\n\nA single neuron is just sum of all of the inputs weights fed through some sort of Activation Function\n\nThe Activation Fn is meant to stimulate a neuron firing or not\n", "_____no_output_____" ] ], [ [ "import tensorflow.keras as keras\nimport tensorflow as tf\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nprint(x_train[0])", "_____no_output_____" ] ] ]
[ "raw", "code" ]
[ [ "raw" ], [ "code" ] ]
4a6f0823884e83cd54874479ce9f482cf3c617ec
51,138
ipynb
Jupyter Notebook
notebooks/detect_rings.ipynb
cbouy/ProLIF
b731499ee487c9c51cd54fde0950ec51756f92cd
[ "Apache-2.0" ]
14
2019-01-02T14:14:03.000Z
2021-10-05T15:03:10.000Z
notebooks/detect_rings.ipynb
xchem/ProLIF
de8d9f096418546c7e4ed9b841154d5fbfa76560
[ "Apache-2.0" ]
2
2019-09-17T16:25:31.000Z
2020-12-23T16:04:18.000Z
notebooks/detect_rings.ipynb
xchem/ProLIF
de8d9f096418546c7e4ed9b841154d5fbfa76560
[ "Apache-2.0" ]
9
2018-07-28T15:56:58.000Z
2022-02-20T09:37:07.000Z
170.46
18,188
0.893973
[ [ [ "from rdkit import Chem\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem.Draw import IPythonConsole\nfrom matplotlib.colors import ColorConverter\nfrom IPython.core.display import display\nfrom copy import deepcopy", "_____no_output_____" ], [ "# draw molecules with atom index\ndef mol_with_atom_index( m ):\n mol = deepcopy(m)\n atoms = mol.GetNumAtoms()\n for idx in range( atoms ):\n mol.GetAtomWithIdx( idx ).SetProp( 'molAtomMapNumber', str( mol.GetAtomWithIdx( idx ).GetIdx() ) )\n return mol", "_____no_output_____" ], [ "smiles = [\n 'OC1=CC=C(C=C1)C2=CC3=CC=CC=C3C=C2',\n 'C1CCC2=CNC=C2C1',\n 'O1C=C2C=CC=CC2=C1',\n]\nmols = [Chem.MolFromSmiles(string) for string in smiles]\nDraw.MolsToGridImage([mol_with_atom_index(mol) for mol in mols])", "_____no_output_____" ], [ "# typical aromatic substructures\npatterns = [\n \"[a]1:[a]:[a]:[a]:[a]:[a]:1\",\n \"[a]1:[a]:[a]:[a]:[a]:1\"\n]\nDraw.MolsToGridImage([Chem.MolFromSmarts(pat) for pat in patterns])", "_____no_output_____" ], [ "for i,mol in enumerate(mols):\n for j,pattern in enumerate(patterns):\n pat = Chem.MolFromSmarts(pattern)\n matches = mol.GetSubstructMatches(pat)\n if matches:\n print('Mol {} - pattern {}: atoms idx'.format(i+1, j+1), matches)\n for match in matches:\n img = Draw.MolToImage(mol, highlightAtoms=match, size=(200,200), fitImage=True,\n highlightColor=ColorConverter().to_rgb('cyan'))\n display(img)", "Mol 1 - pattern 1: atoms idx ((1, 2, 3, 4, 5, 6), (7, 8, 9, 14, 15, 16), (9, 10, 11, 12, 13, 14))\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4a6f1c228d4c0df2471fac733c09dc2f993b1706
5,834
ipynb
Jupyter Notebook
notebooks/CandleJson.ipynb
HarshSharma009/checkStock
3d4a5354dab46706c7ee45488def99f619d5ab8a
[ "Apache-2.0" ]
11
2019-05-13T15:54:07.000Z
2022-03-20T12:12:59.000Z
notebooks/CandleJson.ipynb
HarshSharma009/checkStock
3d4a5354dab46706c7ee45488def99f619d5ab8a
[ "Apache-2.0" ]
5
2020-03-09T14:58:58.000Z
2022-02-10T10:48:15.000Z
notebooks/CandleJson.ipynb
HarshSharma009/checkStock
3d4a5354dab46706c7ee45488def99f619d5ab8a
[ "Apache-2.0" ]
3
2020-05-17T20:53:14.000Z
2021-03-28T20:32:31.000Z
22.968504
211
0.477717
[ [ [ "import plotly\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport plotly.io as pio\nimport pandas as pd", "_____no_output_____" ], [ "json_path = '../static/data/SBER.ME.json'\ndf = pd.read_json(json_path)", "_____no_output_____" ], [ "df = df.tail(112)", "_____no_output_____" ], [ "get_open = lambda item: item['open']\nget_close = lambda item: item['close']\nget_high = lambda item: item['high']\nget_low = lambda item: item['low']", "_____no_output_____" ], [ "df_open = df['history'].map(get_open)\ndf_open.head()", "_____no_output_____" ], [ "df_close = df['history'].map(get_close)\ndf_close.head()", "_____no_output_____" ], [ "df_high = df['history'].map(get_high)\ndf_high.head()", "_____no_output_____" ], [ "df_low = df['history'].map(get_low)\ndf_low.head()", "_____no_output_____" ], [ "df.index", "_____no_output_____" ], [ "trace = go.Candlestick(x=df.index,\n open=df_open,\n high=df_high,\n low=df_low,\n close=df_close)\nlayout = go.Layout(xaxis = dict(rangeslider = dict(visible = False)))\ndata = [trace]", "_____no_output_____" ], [ "fig = go.Figure(data=data,layout=layout)\npy.iplot(fig, filename='simple_candlestick')", "High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~airvan21/0 or inside your plot.ly account where it is named 'simple_candlestick'\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6f230e4fd5cb70dd8172684a2820bd433125b2
18,902
ipynb
Jupyter Notebook
PyTorch_model/Pytorch_training_model.ipynb
Shra1-25/Deep-Learning-implementaion-on-FPGA-using-MATLAB
f1f2364c0432be40b4e3a0bef8ea49f8b1230c6f
[ "MIT" ]
null
null
null
PyTorch_model/Pytorch_training_model.ipynb
Shra1-25/Deep-Learning-implementaion-on-FPGA-using-MATLAB
f1f2364c0432be40b4e3a0bef8ea49f8b1230c6f
[ "MIT" ]
null
null
null
PyTorch_model/Pytorch_training_model.ipynb
Shra1-25/Deep-Learning-implementaion-on-FPGA-using-MATLAB
f1f2364c0432be40b4e3a0bef8ea49f8b1230c6f
[ "MIT" ]
2
2020-11-03T08:28:18.000Z
2021-06-02T09:36:10.000Z
18,902
18,902
0.621204
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "cd drive/My Drive/google_colab_gpu/GSOC 2020/CERN-HSF", "/content/drive/My Drive/google_colab_gpu/GSOC 2020/CERN-HSF\n" ], [ "ls", "cifar10vgg.h5 model3.hdf5\nground_truth.csv model3.json\nKeras_CERN.ipynb modelVGG.hdf5\nKeras_inference.ipynb modelVGG.json\nKeras_model2.ipynb output.csv\nmodel1.h5 pytorch1.pt\nmodel1.hdf5 Pytorch_CERN.ipynb\nmodel1.json SingleElectronPt50_IMGCROPS_n249k_RHv1.hdf5\nmodel2.h5 SingleElectronPt50_IMGCROPS_n249k_RHv1_inference.hdf5\nmodel2.hdf5 SinglePhotonPt50_IMGCROPS_n249k_RHv1.hdf5\nmodel2.json SinglePhotonPt50_IMGCROPS_n249k_RHv1_inference.hdf5\n" ], [ "#import cv2\nimport numpy as np\nimport pandas as pd\n#from google.colab.patches import cv2_imshow\nimport h5py", "_____no_output_____" ], [ "#import numpy as np\n#import matplotlib.pyplot as plt\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\n#import cv2\n#from keras.datasets import mnist\n\n#from keras.utils import np_utils\n#from keras.models import Sequential,load_model\n#from sklearn.model_selection import train_test_split\nimport torch.nn\n#import torchvision.datasets as dsets\n#import torchvision.transforms as transforms\n#from torch.autograd import Variable", "_____no_output_____" ], [ "filename='SingleElectronPt50_IMGCROPS_n249k_RHv1.hdf5'\ndata1 = h5py.File(filename, 'r')\nY1=data1['y']\nX1=data1['X']", "_____no_output_____" ], [ "filename='SinglePhotonPt50_IMGCROPS_n249k_RHv1.hdf5'\ndata0 = h5py.File(filename, 'r')\nY0=data0['y']\nX0=data0['X']", "_____no_output_____" ], [ "X_final=np.concatenate((X0[:],X1[:]),axis=0)\nY_final=np.concatenate((Y0[:],Y1[:]),axis=0)", "_____no_output_____" ], [ "X_train,X_valid, Y_train, Y_valid = train_test_split(X_final,Y_final,test_size = 0.2, random_state = 42)\nprint(X_train.shape,Y_train.shape)\nprint(X_valid.shape,Y_valid.shape)", "(398400, 32, 32, 2) (398400,)\n(99600, 32, 32, 2) (99600,)\n" ], [ "X_train0=(X_train[:,:,:,0].reshape((X_train.shape[0],1,X_train.shape[1],X_train.shape[2])))\nX_valid0=(X_valid[:,:,:,0].reshape((X_valid.shape[0],1,X_valid.shape[1],X_valid.shape[2])))\nX_train1=(X_train[:,:,:,1].reshape((X_train.shape[0],1,X_train.shape[1],X_train.shape[2])))\nX_valid1=(X_valid[:,:,:,1].reshape((X_valid.shape[0],1,X_valid.shape[1],X_valid.shape[2])))\nX_train.shape,X_valid.shape,X_train0.shape,X_valid0.shape,X_train1.shape,X_valid1.shape", "_____no_output_____" ], [ "X_train0, Y_train, X_valid0, Y_valid, X_train1, X_valid1 = map(torch.tensor, (X_train0, Y_train_tp, X_valid0, Y_valid_tp, X_train1, X_valid1))", "_____no_output_____" ], [ "import torch\nfrom torch.utils import data\n\nclass Dataset(data.Dataset):\n 'Characterizes a dataset for PyTorch'\n def __init__(self, inputs, labels):\n 'Initialization'\n self.labels = labels\n self.inputs = inputs\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.inputs)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n\n # Get data and get label\n X = self.inputs[index]\n y = self.labels[index]\n\n return X, y", "_____no_output_____" ], [ "train_loader = torch.utils.data.DataLoader(dataset={'input':X_train0,'output':Y_train},\n batch_size=1024,\n shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(dataset={'input':X_valid0},\n batch_size=1024,\n shuffle=False)", "_____no_output_____" ], [ "window_height=32\nwindow_width=32", "_____no_output_____" ], [ "import torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)\n #self.relu1 = nn.Relu()\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1)\n #self.relu2 = nn.Relu()\n self.pool = nn.MaxPool2d(2, 2)\n self.fc1 = nn.Linear(32*8*8, 128)\n #self.relu3 = nn.Relu()\n self.fc2 = nn.Linear(128, 64)\n #self.relu4 = nn.Relu()\n self.fc3 = nn.Linear(64, 1)\n #self.sigmoid1 = nn.Sigmoid()\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.sigmoid(self.fc3(x))\n return x\n\n\nnet = Net()\nnet.cuda()\nsummary(net, (1, 32, 32), device='cuda')", "----------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================\n Conv2d-1 [-1, 16, 32, 32] 160\n MaxPool2d-2 [-1, 16, 16, 16] 0\n Conv2d-3 [-1, 32, 16, 16] 4,640\n MaxPool2d-4 [-1, 32, 8, 8] 0\n Linear-5 [-1, 128] 262,272\n Linear-6 [-1, 64] 8,256\n Linear-7 [-1, 1] 65\n================================================================\nTotal params: 275,393\nTrainable params: 275,393\nNon-trainable params: 0\n----------------------------------------------------------------\nInput size (MB): 0.00\nForward/backward pass size (MB): 0.24\nParams size (MB): 1.05\nEstimated Total Size (MB): 1.29\n----------------------------------------------------------------\n" ], [ "use_cuda = True", "_____no_output_____" ], [ "if use_cuda and torch.cuda.is_available():\n net.cuda()", "_____no_output_____" ], [ "batch_size=1024\nimport torch\nfrom torch.utils import data\n#import cudnn\n#from my_classes import Dataset\n\n\n# CUDA for PyTorch\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n#cudnn.benchmark = True\n\n# Parameters\nparams = {'batch_size': batch_size,\n 'shuffle': True,\n }\nmax_epochs = 100\n\n# Generators\ntraining_set = Dataset(X_train0, Y_train)\ntraining_generator = data.DataLoader(training_set, **params)\n\nvalidation_set = Dataset(X_valid0, Y_valid)\nvalidation_generator = data.DataLoader(validation_set, **params)", "_____no_output_____" ], [ "import torch.optim as optim\n\ncriterion = nn.BCELoss()\noptimizer = optim.Adam(net.parameters(), lr=0.001)", "_____no_output_____" ], [ "num_epochs=10\ncorrect=0\ntotal=0\nfor epoch in range(num_epochs):\n for i, (inputs, labels) in enumerate(training_generator): # Load a batch of images with its (index, data, class)\n #images = Variable(images.view(-1, 28*28)) # Convert torch tensor to Variable: change image from a vector of size 784 to a matrix of 28 x 28\n #labels = Variable(labels)\n \n if use_cuda and torch.cuda.is_available():\n inputs = inputs.cuda()\n labels = labels.cuda()\n \n optimizer.zero_grad() # Intialize the hidden weight to all zeros\n outputs = net(inputs)\n # Forward pass: compute the output class given a image\n loss = criterion(outputs, labels) # Compute the loss: difference between the output class and the pre-given label\n loss.backward() # Backward pass: compute the weight\n optimizer.step() # Optimizer: update the weights of hidden nodes\n outputs[outputs>0.5]=1\n outputs[outputs<0.5]=0\n total += labels.shape[0] # Increment the total count\n correct += (outputs == labels).sum()\n if (i+1) % 100 == 0: # Logging\n print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Accuracy: %.4f'\n %(epoch+1, num_epochs, i+1, len(X_train0)//batch_size, loss.item(), ((100 * correct.item() / total))))\n #outputs = net(X_valid0.cuda()) # Forward pass: compute the output class given a image\n #loss = criterion(outputs, Y_valid.cuda())", "Epoch [1/10], Step [100/389], Loss: 0.5880, Accuracy: 72.3496\nEpoch [1/10], Step [200/389], Loss: 0.5885, Accuracy: 72.2695\nEpoch [1/10], Step [300/389], Loss: 0.5496, Accuracy: 72.4202\nEpoch [2/10], Step [100/389], Loss: 0.5557, Accuracy: 72.4920\nEpoch [2/10], Step [200/389], Loss: 0.5232, Accuracy: 72.4929\nEpoch [2/10], Step [300/389], Loss: 0.5393, Accuracy: 72.4763\nEpoch [3/10], Step [100/389], Loss: 0.5464, Accuracy: 72.4199\nEpoch [3/10], Step [200/389], Loss: 0.5709, Accuracy: 72.4400\nEpoch [3/10], Step [300/389], Loss: 0.5871, Accuracy: 72.4235\nEpoch [4/10], Step [100/389], Loss: 0.5619, Accuracy: 72.4482\nEpoch [4/10], Step [200/389], Loss: 0.5391, Accuracy: 72.4549\nEpoch [4/10], Step [300/389], Loss: 0.5510, Accuracy: 72.4521\nEpoch [5/10], Step [100/389], Loss: 0.5422, Accuracy: 72.4495\nEpoch [5/10], Step [200/389], Loss: 0.5580, Accuracy: 72.4501\nEpoch [5/10], Step [300/389], Loss: 0.5097, Accuracy: 72.4626\nEpoch [6/10], Step [100/389], Loss: 0.5450, Accuracy: 72.4890\nEpoch [6/10], Step [200/389], Loss: 0.5463, Accuracy: 72.4815\nEpoch [6/10], Step [300/389], Loss: 0.5445, Accuracy: 72.4870\nEpoch [7/10], Step [100/389], Loss: 0.5239, Accuracy: 72.5034\nEpoch [7/10], Step [200/389], Loss: 0.5342, Accuracy: 72.5117\nEpoch [7/10], Step [300/389], Loss: 0.5417, Accuracy: 72.5158\nEpoch [8/10], Step [100/389], Loss: 0.5691, Accuracy: 72.5185\nEpoch [8/10], Step [200/389], Loss: 0.5395, Accuracy: 72.5260\nEpoch [8/10], Step [300/389], Loss: 0.5453, Accuracy: 72.5287\nEpoch [9/10], Step [100/389], Loss: 0.5857, Accuracy: 72.5350\nEpoch [9/10], Step [200/389], Loss: 0.5544, Accuracy: 72.5408\nEpoch [9/10], Step [300/389], Loss: 0.5705, Accuracy: 72.5450\nEpoch [10/10], Step [100/389], Loss: 0.5496, Accuracy: 72.5438\nEpoch [10/10], Step [200/389], Loss: 0.5598, Accuracy: 72.5497\nEpoch [10/10], Step [300/389], Loss: 0.5621, Accuracy: 72.5559\n" ], [ "correct = 0\ntotal = 0\nfor inputs, labels in validation_generator:\n #inputs = Variable(images.view(-1, 28*28))\n \n if use_cuda and torch.cuda.is_available():\n inputs = inputs.cuda()\n labels = labels.cuda()\n \n \n outputs = net(inputs)\n outputs[outputs>0.5]=1\n outputs[outputs<0.5]=0\n total += labels.shape[0] # Increment the total count\n correct += (outputs == labels).sum() # Increment the correct count\n \nprint('Validation Accuracy of the network on the ' + str(total)+' test images: %.4f' % ((100 * correct.item() / total))+' correct: '+str((correct).item()))", "Accuracy of the network on the 99600 test images: 72.2791 correct: 71990\n" ], [ "torch.save(net.state_dict(), 'pytorch1.pt')", "_____no_output_____" ], [ "net = Net()\nnet.load_state_dict(torch.load('pytorch1.pt'))\nif use_cuda and torch.cuda.is_available():\n net.cuda()\nnet.eval()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6f35373c4f07cc411f69c7b69bc7608bb1fd0b
268,543
ipynb
Jupyter Notebook
seaborn Exercise.ipynb
pbj0812/TIL
ab765ceecd348ac56aa1315bca96c2a2c227ea1d
[ "MIT" ]
null
null
null
seaborn Exercise.ipynb
pbj0812/TIL
ab765ceecd348ac56aa1315bca96c2a2c227ea1d
[ "MIT" ]
3
2018-05-05T12:15:23.000Z
2018-05-05T12:25:42.000Z
seaborn Exercise.ipynb
pbj0812/TIL
ab765ceecd348ac56aa1315bca96c2a2c227ea1d
[ "MIT" ]
null
null
null
277.994824
55,000
0.91156
[ [ [ "import seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "tips = sns.load_dataset('tips')\ntips.head()", "_____no_output_____" ], [ "sns.distplot(tips['total_bill'], bins = 40)", "c:\\users\\user\\appdata\\local\\programs\\python\\python35\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ], [ "sns.jointplot(x = 'total_bill', y = 'tip', data = tips)", "c:\\users\\user\\appdata\\local\\programs\\python\\python35\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ], [ "sns.pairplot(tips)", "_____no_output_____" ], [ "sns.pairplot(tips, hue = 'sex')", "_____no_output_____" ], [ "sns.barplot(x = 'sex', y = 'total_bill', data = tips)", "_____no_output_____" ], [ "sns.countplot(x = 'sex', data = tips)", "_____no_output_____" ], [ "sns.countplot(x = 'day', data = tips)", "_____no_output_____" ], [ "sns.boxplot(x = 'day', y = 'total_bill', data = tips)", "_____no_output_____" ], [ "tc = tips.corr()\ntc", "_____no_output_____" ], [ "sns.heatmap(tc, annot = True)", "_____no_output_____" ], [ "flights = sns.load_dataset('flights')\nflights.head()", "_____no_output_____" ], [ "fp = flights.pivot_table(index = 'month', columns = 'year', values = 'passengers')\nfp", "_____no_output_____" ], [ "sns.heatmap(fp)", "_____no_output_____" ], [ "sns.clustermap(fp)", "_____no_output_____" ], [ "sns.lmplot(x = 'total_bill', y = 'tip', data = tips, hue = 'sex', markers = ['o', 'v'])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6f4127be58eef064efd787fe114cf1308625b1
30,275
ipynb
Jupyter Notebook
examples/fracture-gcmc-tutorial/dislocation.ipynb
sinamoeini/mapp4py
923ef57ee5bdb6231bec2885c09a58993b6c0f1f
[ "MIT" ]
3
2018-06-06T05:43:36.000Z
2020-07-18T14:31:37.000Z
examples/fracture-gcmc-tutorial/dislocation.ipynb
sinamoeini/mapp4py
923ef57ee5bdb6231bec2885c09a58993b6c0f1f
[ "MIT" ]
null
null
null
examples/fracture-gcmc-tutorial/dislocation.ipynb
sinamoeini/mapp4py
923ef57ee5bdb6231bec2885c09a58993b6c0f1f
[ "MIT" ]
7
2018-01-16T03:21:20.000Z
2020-07-20T19:36:13.000Z
26.487314
441
0.482147
[ [ [ "# Introdcution", "_____no_output_____" ], [ "This trial describes how to create edge and screw dislocations in iron BCC strating with one unitcell containing two atoms ", "_____no_output_____" ], [ "## Background", "_____no_output_____" ], [ "The elastic solution for displacement field of dislocations is provided in the paper [Dislocation Displacement Fields in Anisotropic Media](https://doi.org/10.1063/1.1657954).", "_____no_output_____" ], [ "## Theoritical", "_____no_output_____" ], [ "The [paper](https://doi.org/10.1063/1.1657954) mentioned in backgroud subsection deals with only one dislocation. Here we describe how to extend the solution to periodic array of dislocations. Since we are dealing with linear elasticity we can superpose (sum up) the displacement field of all the individual dislocations. Looking at the Eqs. (2-8) of abovementioned reference this boils done to finding a closed form soloution for \n\n$$\\sum_{m=-\\infty}^{\\infty} \\log\\left(z-ma \\right).$$\n\nWhere $z= x+yi$ and $a$ is a real number, equivakent to $\\mathbf{H}_{00}$ that defines the periodicity of dislocations on x direction. \n\n\nLet us simplify the problem a bit further. Since this is the component displacement field we can add or subtract constant term so for each $\\log\\left(z-ma \\right)$ we subtract a factor of $log\\left(a \\right)$, leading to\n\n$$\\sum_{m=-\\infty}^{\\infty} \\log\\left(\\frac{z}{a}-m \\right).$$\n\nLets change $z/a$ to $z$ and when we arrive the solution we will change ot back\n\n$$\\sum_{m=-\\infty}^{\\infty} \\log\\left(z-m \\right).$$\n", "_____no_output_____" ], [ "Objective is to find a closed form solution for\n\n$$f\\left(z\\right)=\\sum_{m=-\\infty}^{\\infty} \\log\\left(z-m \\right).$$\n\nFirst note that\n\n$$\nf'\\left(z\\right)=\\frac{1}{z}+\\sum_{m=1}^{\\infty}\\frac{1}{z-m}+\\frac{1}{z+m},\n$$\n\n\nand also\n$$\n\\frac{1}{z\\mp m}=\\mp \\frac{1}{m}\\sum_{n=0}^{\\infty}\n\\left(\\pm \\frac{z}{m}\\right)^n.\n$$\n\n\n\n\nThis leads to\n$$\n\\frac{1}{z-m}+\\frac{1}{z+m}=-\\frac{2}{z}\\sum_{n=1}^{\\infty}\\left(\\frac{z}{m}\\right)^{2n},\n$$\nand subsequently\n$$\nf'\\left(z\\right)=\\frac{1}{z}-\\frac{2}{z}\\sum_{n=1}^{\\infty}\\left(z\\right)^{2n}\\sum_{m=1}^{\\infty}m^{-2n},\n$$\n$$\n=\\frac{1}{z}-\\frac{2}{z}\\sum_{n=1}^{\\infty}\\left(z\\right)^{2n}\\zeta\\left(2n\\right).\n$$\nWhere $\\zeta$ is Riemann zeta function. Since $\\zeta\\left(0\\right)=-1/2$, it simplifies to:\n$$\nf'\\left(z\\right)=-\\frac{2}{z}\\sum_{n=0}^{\\infty}\\left(z\\right)^{2n}\\zeta\\left(2n\\right)\n$$\nNote that\n$$\n-\\frac{\\pi z\\cot\\left(\\pi z\\right)}{2}=\\sum_{n=0}^{\\infty}z^{2n} \\zeta\\left(2n\\right)\n$$\n\nI have no idea how I figured this out but it is true. Therefore,\n\n$$\nf'\\left(z\\right)=\\pi\\cot\\left(\\pi z\\right).\n$$", "_____no_output_____" ], [ "At this point one can naively assume that the problem is solved (like I did) and the answer is something like:\n$$\nf\\left(z\\right)=\\log\\left[\\sin\\left(\\pi z\\right)\\right]+C,\n$$\nWhere $C$ is a constant. However, after checking this against numerical vlaues you will see that this is completely wrong. ", "_____no_output_____" ], [ "The issue here is that startegy was wrong at the very begining. The sum of the displacelment of infinte dislocations will not converge since we have infinite discountinuity in displacement field. In other words they do not cancel each other they feed each other.\n\n\nBut there is still a way to salvage this. Luckily, displacement is relative quantity and we are dealing with crystals. We can easily add a discontinuity in form an integer number burger vectors to a displacement field and nothing will be affected. \n\nSo here is the trick: We will focus only on the displacement field of one unit cell dislocation (number 0). At each iteration we add two dislocation to its left and right. \n\nAt $n$th iterations we add a discontinuity of the form\n\n$$\n-\\mathrm{Sign}\\left[\\mathrm{Im}\\left(z\\right)\\right] \\pi i\n$$\n\nand a constant of the form:\n$$\n-2\\log n.\n$$\n\nIn other words and we need to evaluate: \n$$\n\\lim_{m\\to\\infty}\\sum_{n=-m}^{m}\n\\biggl\\{\n\\log\\left(z-n\\right)\n-\\mathrm{Sign}\\left[\\mathrm{Im}\\left(z\\right)\\right] \\pi i \n-2\\log\\left(n \\right)\n\\biggr\\} + \\pi,\n$$\n\nwhich simplifies to \n$$\n\\lim_{m\\to\\infty}\\sum_{n=-m}^{m}\\log\\left(z-n\\right)\n-\\mathrm{Sign}\\left[\\mathrm{Im}\\left(z\\right)\\right] m \\pi i \n-2\\log\\left(\\frac{m\\!!}{\\sqrt{\\pi}} \\right)\n$$", "_____no_output_____" ], [ "Note that we added an extra $\\pi$ to displacement field for aesthetic reasons. After a lot of manipulations and tricks (meaning I dont't remember how I got here) we arrive at the following relation:\n$$\n\\lim_{m\\to\\infty}\\sum_{n=-m}^{m}\\log\\left(z-n\\right)\n-\\mathrm{Sign}\\left[\\mathrm{Im}\\left(z\\right)\\right] m \\pi i \n-2\\log\\left(\\frac{m\\!!}{\\sqrt{\\pi}} \\right)=\\log\\left[\\sin\\left(\\pi z\\right)\\right]\n$$\nHowever, this is only valid when \n$$-1/2 \\le\\mathrm{Re}\\left(z\\right)\\lt 1/2.$$ \n\nIf one exceeds this domain the answer is:\n\n$$\n\\boxed{\n\\log\\left[\\sin\\left(\\pi z\\right)\\right]-\\mathrm{Sign}\\left[\\mathrm{Im}\\left(z\\right)\\right]\\left \\lceil{\\mathrm{Re}\\left(\\frac{z}{2}\\right)}-\\frac{3}{4}\\right \\rceil 2 \\pi i \n}\n$$\nWhere $\\lceil . \\rceil$ is the cieling function. Of course there is probably a nicer form. Feel free to derive it\n\n", "_____no_output_____" ], [ "## Final formulation ", "_____no_output_____" ], [ "To account for peridicity of dislocations in $x$ direction, the expression $\\log\\left(z\\right)$ in Eqs(2-7) of the [paper](https://doi.org/10.1063/1.1657954), it should be replaced by:\n\n$$\\lim_{m\\to\\infty}\\sum_{n=-m}^{m}\\log\\left(z-na\\right)\n-\\mathrm{Sign}\\left[\\mathrm{Im}\\left(z\\right)\\right] m \\pi i \n-2\\log\\left(\\frac{m\\,\\,\\!!}{\\sqrt{\\pi}} \\right),$$\n\nwhich has the closed form:\n$$\n\\boxed{\n\\log\\left[\\sin\\left(\\pi\\frac{z}{a}\\right)\\right]-\\mathrm{Sign}\\left[\\mathrm{Im}\\left(\\frac{z}{a}\\right)\\right]\\left \\lceil{\\mathrm{Re}\\left(\\frac{z}{2a}\\right)}-\\frac{3}{4}\\right \\rceil 2 \\pi i.\n}\n$$", "_____no_output_____" ], [ "# Preperation", "_____no_output_____" ], [ "## Import packages", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport mapp4py\nfrom mapp4py import md\nfrom lib.elasticity import rot, cubic, resize, displace, HirthEdge, HirthScrew", "_____no_output_____" ] ], [ [ "## Block the output of all cores except for one", "_____no_output_____" ] ], [ [ "from mapp4py import mpi\nif mpi().rank!=0:\n with open(os.devnull, 'w') as f:\n sys.stdout = f;", "_____no_output_____" ] ], [ [ "## Define an `md.export_cfg` object ", "_____no_output_____" ], [ "`md.export_cfg` has a call method that we can use to create quick snapshots of our simulation box", "_____no_output_____" ] ], [ [ "xprt = md.export_cfg(\"\");", "_____no_output_____" ] ], [ [ "# Screw dislocation", "_____no_output_____" ] ], [ [ "sim=md.atoms.import_cfg('configs/Fe_300K.cfg');\nnlyrs_fxd=2\na=sim.H[0][0];\nb_norm=0.5*a*np.sqrt(3.0);\n\nb=np.array([1.0,1.0,1.0])\ns=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)", "_____no_output_____" ] ], [ [ "## Create a $\\langle110\\rangle\\times\\langle112\\rangle\\times\\frac{1}{2}\\langle111\\rangle$ cell", "_____no_output_____" ], [ "### create a $\\langle110\\rangle\\times\\langle112\\rangle\\times\\langle111\\rangle$ cell", "_____no_output_____" ], [ "Since `mapp4py.md.atoms.cell_chenge()` only accepts integer values start by creating a $\\langle110\\rangle\\times\\langle112\\rangle\\times\\langle111\\rangle$ cell", "_____no_output_____" ] ], [ [ "sim.cell_change([[1,-1,0],[1,1,-2],[1,1,1]])", "_____no_output_____" ] ], [ [ "### Remove half of the atoms and readjust the position of remaining", "_____no_output_____" ], [ "Now one needs to cut the cell in half in $[111]$ direction. We can achive this in three steps:\n\n1. Remove the atoms that are above located above $\\frac{1}{2}[111]$\n2. Double the position of the remiaing atoms in the said direction\n3. Shrink the box affinly to half on that direction", "_____no_output_____" ] ], [ [ "H=np.array(sim.H);\ndef _(x):\n if x[2] > 0.5*H[2, 2] - 1.0e-8:\n return False;\n else:\n x[2]*=2.0;\nsim.do(_);\n\n_ = np.full((3,3), 0.0)\n_[2, 2] = - 0.5\nsim.strain(_)", "_____no_output_____" ] ], [ [ "### Readjust the postions", "_____no_output_____" ] ], [ [ "displace(sim,np.array([sim.H[0][0]/6.0,sim.H[1][1]/6.0,0.0]))", "_____no_output_____" ] ], [ [ "## Replicating the unit cell", "_____no_output_____" ] ], [ [ "max_natms=100000\nH=np.array(sim.H);\nn_per_area=sim.natms/(H[0,0] * H[1,1]);\n_ =np.sqrt(max_natms/n_per_area);\nN0 = np.array([\n np.around(_ / sim.H[0][0]),\n np.around(_ / sim.H[1][1]), \n 1], dtype=np.int32)\n\nsim *= N0;", "_____no_output_____" ], [ "H = np.array(sim.H);\nH_new = np.array(sim.H);\nH_new[1][1] += 50.0\nresize(sim, H_new, np.full((3),0.5) @ H)", "_____no_output_____" ], [ "C_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);\nQ=np.array([np.cross(s,b)/np.linalg.norm(np.cross(s,b)),s/np.linalg.norm(s),b/np.linalg.norm(b)])\nhirth = HirthScrew(rot(C_Fe,Q), rot(b*0.5*a,Q))", "_____no_output_____" ], [ "ctr = np.full((3),0.5) @ H_new;\ns_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])\n\ndef _(x,x_d,x_dof):\n sy=(x[1]-ctr[1])/H[1, 1];\n x0=(x-ctr)/H[0, 0];\n\n if sy>s_fxd or sy<=-s_fxd:\n x_dof[1]=x_dof[2]=False;\n x+=b_norm*hirth.ave_disp(x0)\n else:\n x+=b_norm*hirth.disp(x0)\n\nsim.do(_) ", "_____no_output_____" ], [ "H = np.array(sim.H);\nH_inv = np.array(sim.B);\nH_new = np.array(sim.H);\n\nH_new[0,0]=np.sqrt(H[0,0]**2+(0.5*b_norm)**2)\nH_new[2,0]=H[2,2]*0.5*b_norm/H_new[0,0]\nH_new[2,2]=np.sqrt(H[2,2]**2-H_new[2,0]**2)\nF = np.transpose(H_inv @ H_new);\nsim.strain(F - np.identity(3))", "_____no_output_____" ], [ "xprt(sim, \"dumps/screw.cfg\")", "_____no_output_____" ] ], [ [ "## putting it all together", "_____no_output_____" ] ], [ [ "def make_scrw(nlyrs_fxd,nlyrs_vel,vel):\n #this is for 0K\n #c_Fe=cubic(1.5187249951755375,0.9053185628093443,0.7249256807942608);\n #this is for 300K\n c_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);\n \n #N0=np.array([80,46,5],dtype=np.int32)\n\n sim=md.atoms.import_cfg('configs/Fe_300K.cfg');\n a=sim.H[0][0];\n b_norm=0.5*a*np.sqrt(3.0);\n\n b=np.array([1.0,1.0,1.0])\n s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)\n Q=np.array([np.cross(s,b)/np.linalg.norm(np.cross(s,b)),s/np.linalg.norm(s),b/np.linalg.norm(b)])\n c0=rot(c_Fe,Q)\n \n hirth = HirthScrew(rot(c_Fe,Q),np.dot(Q,b)*0.5*a)\n\n\n sim.cell_change([[1,-1,0],[1,1,-2],[1,1,1]])\n displace(sim,np.array([sim.H[0][0]/6.0,sim.H[1][1]/6.0,0.0]))\n\n max_natms=1000000\n n_per_vol=sim.natms/sim.vol;\n _=np.power(max_natms/n_per_vol,1.0/3.0);\n N1=np.full((3),0,dtype=np.int32);\n for i in range(0,3):\n N1[i]=int(np.around(_/sim.H[i][i]));\n\n N0=np.array([N1[0],N1[1],1],dtype=np.int32);\n sim*=N0;\n\n sim.kB=8.617330350e-5\n sim.create_temp(300.0,8569643);\n\n H=np.array(sim.H);\n H_new=np.array(sim.H);\n H_new[1][1]+=50.0\n resize(sim, H_new, np.full((3),0.5) @ H)\n ctr=np.dot(np.full((3),0.5),H_new);\n\n\n s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])\n s_vel=0.5-0.5*float(nlyrs_vel)/float(N0[1])\n\n def _(x,x_d,x_dof):\n sy=(x[1]-ctr[1])/H[1][1];\n x0=(x-ctr)/H[0][0];\n \n if sy>s_fxd or sy<=-s_fxd:\n x_d[1]=0.0;\n x_dof[1]=x_dof[2]=False;\n x+=b_norm*hirth.ave_disp(x0)\n else:\n x+=b_norm*hirth.disp(x0)\n \n if sy<=-s_vel or sy>s_vel:\n x_d[2]=2.0*sy*vel;\n\n sim.do(_) \n H = np.array(sim.H);\n H_inv = np.array(sim.B);\n H_new = np.array(sim.H);\n\n\n H_new[0,0]=np.sqrt(H[0,0]**2+(0.5*b_norm)**2)\n H_new[2,0]=H[2,2]*0.5*b_norm/H_new[0,0]\n H_new[2,2]=np.sqrt(H[2,2]**2-H_new[2,0]**2)\n F = np.transpose(H_inv @ H_new);\n sim.strain(F - np.identity(3))\n return N1[2],sim;", "_____no_output_____" ] ], [ [ "# Edge dislocation", "_____no_output_____" ] ], [ [ "sim=md.atoms.import_cfg('configs/Fe_300K.cfg');\nnlyrs_fxd=2\na=sim.H[0][0];\nb_norm=0.5*a*np.sqrt(3.0);\n\nb=np.array([1.0,1.0,1.0])\ns=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)", "_____no_output_____" ], [ "sim.cell_change([[1,1,1],[1,-1,0],[1,1,-2]])\nH=np.array(sim.H);\n\ndef _(x):\n if x[0] > 0.5*H[0, 0] - 1.0e-8:\n return False;\n else:\n x[0]*=2.0;\nsim.do(_);\n_ = np.full((3,3), 0.0)\n_[0,0] = - 0.5\nsim.strain(_)\ndisplace(sim,np.array([0.0,sim.H[1][1]/4.0,0.0]))", "_____no_output_____" ], [ "max_natms=100000\nH=np.array(sim.H);\nn_per_area=sim.natms/(H[0, 0] * H[1, 1]);\n_ =np.sqrt(max_natms/n_per_area);\nN0 = np.array([\n np.around(_ / sim.H[0, 0]),\n np.around(_ / sim.H[1, 1]), \n 1], dtype=np.int32)\n\nsim *= N0;", "_____no_output_____" ], [ "# remove one layer along ... direction\nH=np.array(sim.H);\nfrac=H[0,0] /N0[0]\ndef _(x):\n if x[0] < H[0, 0] /N0[0] and x[1] >0.5*H[1, 1]:\n return False;\n\nsim.do(_)", "_____no_output_____" ], [ "H = np.array(sim.H);\nH_new = np.array(sim.H);\nH_new[1][1] += 50.0\nresize(sim, H_new, np.full((3),0.5) @ H)", "_____no_output_____" ], [ "C_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);\n_ = np.cross(b,s)\nQ = np.array([b/np.linalg.norm(b), s/np.linalg.norm(s), _/np.linalg.norm(_)])\nhirth = HirthEdge(rot(C_Fe,Q), rot(b*0.5*a,Q))", "_____no_output_____" ], [ "_ = (1.0+0.5*(N0[0]-1.0))/N0[0];\nctr = np.array([_,0.5,0.5]) @ H_new;\nfrac = H[0][0]/N0[0]\n\ns_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])\n\ndef _(x,x_d,x_dof):\n sy=(x[1]-ctr[1])/H[1, 1];\n x0=(x-ctr);\n if(x0[1]>0.0):\n x0/=(H[0, 0]-frac)\n else:\n x0/= H[0, 0]\n\n\n if sy>s_fxd or sy<=-s_fxd:\n x+=b_norm*hirth.ave_disp(x0);\n x_dof[0]=x_dof[1]=False;\n else:\n x+=b_norm*hirth.disp(x0);\n\n x[0]-=0.25*b_norm;\n\nsim.do(_)", "_____no_output_____" ], [ "H = np.array(sim.H)\nH_new = np.array(sim.H);\nH_new[0, 0] -= 0.5*b_norm;\nresize(sim, H_new, np.full((3),0.5) @ H)", "_____no_output_____" ], [ "xprt(sim, \"dumps/edge.cfg\")", "_____no_output_____" ] ], [ [ "## putting it all together", "_____no_output_____" ] ], [ [ "def make_edge(nlyrs_fxd,nlyrs_vel,vel):\n #this is for 0K\n #c_Fe=cubic(1.5187249951755375,0.9053185628093443,0.7249256807942608);\n #this is for 300K\n c_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241);\n \n #N0=np.array([80,46,5],dtype=np.int32)\n\n sim=md.atoms.import_cfg('configs/Fe_300K.cfg');\n a=sim.H[0][0];\n b_norm=0.5*a*np.sqrt(3.0);\n\n b=np.array([1.0,1.0,1.0])\n s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0)\n\n # create rotation matrix\n _ = np.cross(b,s)\n Q=np.array([b/np.linalg.norm(b), s/np.linalg.norm(s), _/np.linalg.norm(_)])\n hirth = HirthEdge(rot(c_Fe,Q),np.dot(Q,b)*0.5*a)\n\n # create a unit cell \n sim.cell_change([[1,1,1],[1,-1,0],[1,1,-2]])\n H=np.array(sim.H);\n def f0(x):\n if x[0]>0.5*H[0][0]-1.0e-8:\n return False;\n else:\n x[0]*=2.0;\n sim.do(f0);\n _ = np.full((3,3), 0.0)\n _[0,0] = - 0.5\n sim.strain(_)\n displace(sim,np.array([0.0,sim.H[1][1]/4.0,0.0]))\n\n max_natms=1000000\n n_per_vol=sim.natms/sim.vol;\n _=np.power(max_natms/n_per_vol,1.0/3.0);\n N1=np.full((3),0,dtype=np.int32);\n for i in range(0,3):\n N1[i]=int(np.around(_/sim.H[i][i]));\n\n N0=np.array([N1[0],N1[1],1],dtype=np.int32);\n N0[0]+=1;\n sim*=N0;\n\n\n # remove one layer along ... direction\n H=np.array(sim.H);\n frac=H[0][0]/N0[0]\n def _(x):\n if x[0] < H[0][0]/N0[0] and x[1]>0.5*H[1][1]:\n return False;\n\n sim.do(_)\n \n \n\n sim.kB=8.617330350e-5\n sim.create_temp(300.0,8569643);\n\n\n H = np.array(sim.H);\n H_new = np.array(sim.H);\n H_new[1][1] += 50.0\n ctr=np.dot(np.full((3),0.5),H);\n resize(sim,H_new, np.full((3),0.5) @ H)\n l=(1.0+0.5*(N0[0]-1.0))/N0[0];\n ctr=np.dot(np.array([l,0.5,0.5]),H_new);\n frac=H[0][0]/N0[0]\n\n s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1])\n s_vel=0.5-0.5*float(nlyrs_vel)/float(N0[1])\n\n def f(x,x_d,x_dof):\n sy=(x[1]-ctr[1])/H[1][1];\n x0=(x-ctr);\n if(x0[1]>0.0):\n x0/=(H[0][0]-frac)\n else:\n x0/= H[0][0]\n\n\n if sy>s_fxd or sy<=-s_fxd:\n x_d[1]=0.0;\n x_dof[0]=x_dof[1]=False;\n x+=b_norm*hirth.ave_disp(x0);\n else:\n x+=b_norm*hirth.disp(x0);\n \n if sy<=-s_vel or sy>s_vel:\n x_d[0]=2.0*sy*vel;\n x[0]-=0.25*b_norm;\n\n sim.do(f)\n H = np.array(sim.H)\n H_new = np.array(sim.H);\n H_new[0, 0] -= 0.5*b_norm;\n resize(sim, H_new, np.full((3),0.5) @ H)\n return N1[2], sim;", "_____no_output_____" ], [ "nlyrs_fxd=2\nnlyrs_vel=7;\nvel=-0.004;\nN,sim=make_edge(nlyrs_fxd,nlyrs_vel,vel)", "_____no_output_____" ], [ "xprt(sim, \"dumps/edge.cfg\")", "_____no_output_____" ], [ "_ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float);\nQ = np.linalg.inv(np.sqrt(_ @ _.T)) @ _;", "_____no_output_____" ], [ "C = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q)", "_____no_output_____" ], [ "B = np.linalg.inv(\n np.array([\n [C[0, 0, 0, 0], C[0, 0, 1, 1], C[0, 0, 0, 1]],\n [C[0, 0, 1, 1], C[1, 1, 1, 1], C[1, 1, 0, 1]],\n [C[0, 0, 0, 1], C[1, 1, 0, 1], C[0, 1, 0, 1]]\n ]\n))", "_____no_output_____" ], [ "_ = np.roots([B[0, 0], -2.0*B[0, 2],2.0*B[0, 1]+B[2, 2], -2.0*B[1, 2], B[1, 1]])\n\nmu = np.array([_[0],0.0]);\n\nif np.absolute(np.conjugate(mu[0]) - _[1]) > 1.0e-12:\n mu[1] = _[1];\nelse:\n mu[1] = _[2]\n\nalpha = np.real(mu);\nbeta = np.imag(mu);\n\np = B[0,0] * mu**2 - B[0,2] * mu + B[0, 1]\nq = B[0,1] * mu - B[0, 2] + B[1, 1]/ mu\n\nK = np.stack([p, q]) * np.array(mu[1], mu[0]) /(mu[1] - mu[0])\n\nK_r = np.real(K)\nK_i = np.imag(K)", "_____no_output_____" ], [ "Tr = np.stack([\n np.array(np.array([[1.0, alpha[0]], [0.0, beta[0]]])), \n np.array([[1.0, alpha[1]], [0.0, beta[1]]])\n], axis=1)\n\n\ndef u_f0(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) + x[0])\ndef u_f1(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) - x[0]) * np.sign(x[1]) \n\n\ndef disp(x): \n _ = Tr @ x\n return K_r @ u_f0(_) + K_i @ u_f1(_)", "_____no_output_____" ] ], [ [ "## Putting it all together ", "_____no_output_____" ] ], [ [ "_ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float);\nQ = np.linalg.inv(np.sqrt(_ @ _.T)) @ _;\nC = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q)\ndisp = crack(C)", "_____no_output_____" ], [ "n = 300;\nr = 10;\ndisp_scale = 0.3;\n\nn0 = int(np.round(n/ (1 +np.pi), ))\nn1 = n - n0\n\nxs = np.concatenate((\n np.stack([np.linspace(0, -r , n0), np.full((n0,), -1.e-8)]),\n r * np.stack([np.cos(np.linspace(-np.pi, np.pi , n1)),np.sin(np.linspace(-np.pi, np.pi , n1))]), \n np.stack([np.linspace(-r, 0 , n0), np.full((n0,), 1.e-8)]),\n ), axis =1)\n\nxs_def = xs + disp_scale * disp(xs)\n\nfig, ax = plt.subplots(figsize=(10.5,5), ncols = 2)\nax[0].plot(xs[0], xs[1], \"b-\", label=\"non-deformed\");\nax[1].plot(xs_def[0], xs_def[1], \"r-.\", label=\"deformed\");", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6f5367477176fd4ab37b6fa577bd180c8f4576
14,543
ipynb
Jupyter Notebook
notebooks/rolling_updates.ipynb
xaniasd/seldon-core
309d3e1cbbe63b0b2c2a00cd0b04241832c5e50e
[ "Apache-2.0" ]
null
null
null
notebooks/rolling_updates.ipynb
xaniasd/seldon-core
309d3e1cbbe63b0b2c2a00cd0b04241832c5e50e
[ "Apache-2.0" ]
120
2020-04-27T09:48:02.000Z
2021-07-26T06:26:10.000Z
notebooks/rolling_updates.ipynb
xaniasd/seldon-core
309d3e1cbbe63b0b2c2a00cd0b04241832c5e50e
[ "Apache-2.0" ]
null
null
null
26.346014
191
0.516125
[ [ [ "# Rolling Update Tests\n\nCheck rolling updates function as expected.", "_____no_output_____" ] ], [ [ "import json\nimport time", "_____no_output_____" ], [ "!kubectl create namespace seldon", "_____no_output_____" ], [ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon", "_____no_output_____" ] ], [ [ "## Change Image", "_____no_output_____" ] ], [ [ "!kubectl apply -f resources/fixed_v1.yaml", "_____no_output_____" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \\\n -o jsonpath='{.items[0].metadata.name}')", "_____no_output_____" ], [ "for i in range(60):\n state=!kubectl get sdep fixed -o jsonpath='{.status.state}'\n state=state[0]\n print(state)\n if state==\"Available\":\n break\n time.sleep(1)\nassert(state==\"Available\")", "_____no_output_____" ], [ "!curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \\\n -H \"Content-Type: application/json\"", "_____no_output_____" ], [ "!kubectl apply -f resources/fixed_v2.yaml", "_____no_output_____" ], [ "time.sleep(5) # To allow operator to start the update\nfor i in range(120):\n responseRaw=!curl -s -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H \"Content-Type: application/json\"\n try:\n response = json.loads(responseRaw[0])\n except:\n print(\"Failed to parse json\",responseRaw)\n continue\n assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)\n jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json\n data=\"\".join(jsonRaw)\n resources = json.loads(data)\n numReplicas = int(resources[\"items\"][0][\"status\"][\"replicas\"])\n if numReplicas == 3:\n break\n time.sleep(1)\nprint(\"Rollout Success\")", "_____no_output_____" ], [ "!kubectl delete -f resources/fixed_v1.yaml", "_____no_output_____" ] ], [ [ "## Separate Service Orchestrator", "_____no_output_____" ] ], [ [ "!kubectl apply -f resources/fixed_v1_sep.yaml", "_____no_output_____" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \\\n -o jsonpath='{.items[0].metadata.name}')", "_____no_output_____" ], [ "for i in range(60):\n state=!kubectl get sdep fixed -o jsonpath='{.status.state}'\n state=state[0]\n print(state)\n if state==\"Available\":\n break\n time.sleep(1)\nassert(state==\"Available\")", "_____no_output_____" ], [ "!curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \\\n -H \"Content-Type: application/json\"", "_____no_output_____" ], [ "!kubectl apply -f resources/fixed_v2_sep.yaml", "_____no_output_____" ], [ "time.sleep(5) # To allow operator to start the update\nfor i in range(120):\n responseRaw=!curl -s -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H \"Content-Type: application/json\"\n try:\n response = json.loads(responseRaw[0])\n except:\n print(\"Failed to parse json\",responseRaw)\n continue \n assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)\n jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json\n data=\"\".join(jsonRaw)\n resources = json.loads(data)\n numReplicas = int(resources[\"items\"][0][\"status\"][\"replicas\"])\n if numReplicas == 1:\n break\n time.sleep(1)\nprint(\"Rollout Success\")", "_____no_output_____" ], [ "!kubectl delete -f resources/fixed_v1_sep.yaml", "_____no_output_____" ] ], [ [ "## Two PodSpecs", "_____no_output_____" ] ], [ [ "!kubectl apply -f resources/fixed_v1_2podspecs.yaml", "_____no_output_____" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \\\n -o jsonpath='{.items[0].metadata.name}')", "_____no_output_____" ], [ "for i in range(60):\n state=!kubectl get sdep fixed -o jsonpath='{.status.state}'\n state=state[0]\n print(state)\n if state==\"Available\":\n break\n time.sleep(1)\nassert(state==\"Available\")", "_____no_output_____" ], [ "!curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \\\n -H \"Content-Type: application/json\"", "_____no_output_____" ], [ "!kubectl apply -f resources/fixed_v2_2podspecs.yaml", "_____no_output_____" ], [ "time.sleep(5) # To allow operator to start the update\nfor i in range(120):\n responseRaw=!curl -s -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H \"Content-Type: application/json\"\n try:\n response = json.loads(responseRaw[0])\n except:\n print(\"Failed to parse json\",responseRaw)\n continue\n assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)\n jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json\n data=\"\".join(jsonRaw)\n resources = json.loads(data)\n numReplicas = int(resources[\"items\"][0][\"status\"][\"replicas\"])\n if numReplicas == 1:\n break\n time.sleep(1)\nprint(\"Rollout Success\")", "_____no_output_____" ], [ "!kubectl delete -f resources/fixed_v1_2podspecs.yaml", "_____no_output_____" ] ], [ [ "## Two Models", "_____no_output_____" ] ], [ [ "!kubectl apply -f resources/fixed_v1_2models.yaml", "_____no_output_____" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \\\n -o jsonpath='{.items[0].metadata.name}')", "_____no_output_____" ], [ "for i in range(60):\n state=!kubectl get sdep fixed -o jsonpath='{.status.state}'\n state=state[0]\n print(state)\n if state==\"Available\":\n break\n time.sleep(1)\nassert(state==\"Available\")", "_____no_output_____" ], [ "!curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \\\n -H \"Content-Type: application/json\"", "_____no_output_____" ], [ "!kubectl apply -f resources/fixed_v2_2models.yaml", "_____no_output_____" ], [ "time.sleep(5) # To allow operator to start the update\nfor i in range(120):\n responseRaw=!curl -s -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H \"Content-Type: application/json\"\n try:\n response = json.loads(responseRaw[0])\n except:\n print(\"Failed to parse json\",responseRaw)\n continue\n assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)\n jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json\n data=\"\".join(jsonRaw)\n resources = json.loads(data)\n numReplicas = int(resources[\"items\"][0][\"status\"][\"replicas\"])\n if numReplicas == 3:\n break\n time.sleep(1)\nprint(\"Rollout Success\")", "_____no_output_____" ], [ "!kubectl delete -f resources/fixed_v2_2models.yaml", "_____no_output_____" ] ], [ [ "## Model name changes\n\nThis will not do a rolling update but create a new deployment.", "_____no_output_____" ] ], [ [ "!kubectl apply -f resources/fixed_v1.yaml", "_____no_output_____" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \\\n -o jsonpath='{.items[0].metadata.name}')", "_____no_output_____" ], [ "for i in range(60):\n state=!kubectl get sdep fixed -o jsonpath='{.status.state}'\n state=state[0]\n print(state)\n if state==\"Available\":\n break\n time.sleep(1)\nassert(state==\"Available\")", "_____no_output_____" ], [ "!curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \\\n -H \"Content-Type: application/json\"", "_____no_output_____" ], [ "!kubectl apply -f resources/fixed_v2_new_name.yaml", "_____no_output_____" ], [ "time.sleep(5)\nfor i in range(120):\n responseRaw=!curl -s -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H \"Content-Type: application/json\"\n try:\n response = json.loads(responseRaw[0])\n except:\n print(\"Failed to parse json\",responseRaw)\n continue\n assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)\n jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json\n data=\"\".join(jsonRaw)\n resources = json.loads(data)\n numItems = len(resources[\"items\"])\n if numItems == 1:\n break\n time.sleep(1)\nprint(\"Rollout Success\")", "_____no_output_____" ], [ "!kubectl delete -f resources/fixed_v2_new_name.yaml", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a6f6579d3d602b1fb11fc7527bc81c635c7abbf
132
ipynb
Jupyter Notebook
01-Lesson-Plans/11-Classification/2/Activities/04-Stu_Predicting_Fraud/Solved/preventing-fraud.ipynb
tatianegercina/FinTech
b40687aa362d78674e223eb15ecf14bc59f90b62
[ "ADSL" ]
1
2021-04-13T07:14:34.000Z
2021-04-13T07:14:34.000Z
01-Lesson-Plans/11-Classification/2/Activities/04-Stu_Predicting_Fraud/Solved/preventing-fraud.ipynb
tatianegercina/FinTech
b40687aa362d78674e223eb15ecf14bc59f90b62
[ "ADSL" ]
2
2021-06-02T03:14:19.000Z
2022-02-11T23:21:24.000Z
01-Lesson-Plans/11-Classification/2/Activities/04-Stu_Predicting_Fraud/Solved/preventing-fraud.ipynb
tatianegercina/FinTech
b40687aa362d78674e223eb15ecf14bc59f90b62
[ "ADSL" ]
1
2021-05-07T13:26:50.000Z
2021-05-07T13:26:50.000Z
33
75
0.886364
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a6f759028ca5c6fa199b24e16011d707a37165c
19,452
ipynb
Jupyter Notebook
datasciencester-chp7.ipynb
jonathanelbailey/data-science
e9d6a59614b5439bc02bae0f6b41101cc2246335
[ "MIT" ]
null
null
null
datasciencester-chp7.ipynb
jonathanelbailey/data-science
e9d6a59614b5439bc02bae0f6b41101cc2246335
[ "MIT" ]
null
null
null
datasciencester-chp7.ipynb
jonathanelbailey/data-science
e9d6a59614b5439bc02bae0f6b41101cc2246335
[ "MIT" ]
null
null
null
27.167598
509
0.567191
[ [ [ "# Hypothesis and Inference", "_____no_output_____" ], [ "In this chapter, we test hypotheses. Firstly, let's test the hypothesis that a series of coin flips will be fair. It also build upon previous functions found in earlier chapters.\n\n### Assumptions:\n\n1. each flip is a Bernoulli trial, meaning that `X` a binomial `(n,p)` random variable.\n2. `X` can be approximated using normal distribution.\n3. Normal CDF is the probability that a var is below a threshold.\n4. anything not below the threshold is considered to be above the threshold.\n5. A var that's less than `hi` but not less than `lo` is considered to be between threshold.\n6. A var that is not between is considered outside.", "_____no_output_____" ] ], [ [ "import math\n\n# Bernoulli trial #1\ndef normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma\n\n# normal distribution function that determines a value below threshold. #2,#3\ndef normal_cdf(x, mu=0, sigma=1):\n return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2\n\nnormal_probability_below = normal_cdf\n\n# normal distribution that determines a value above threshold #4\ndef normal_probability_above(lo, mu=0, sigma=1):\n return 1 - normal_cdf(lo, mu, sigma)\n\n# normal distribution functino that determines a value between #5\ndef normal_probability_between(lo, hi, mu=0, sigma=1):\n return normal_cdf(hi, mu, sigma) - normal_cdf(lo, mu, sigma)\n\n# normal distribution function that determines a value outside #6\ndef normal_probability_outside(lo, hi, mu=0, sigma=1):\n return 1 - normal_probability_between(lo, hi, mu, sigma)", "_____no_output_____" ] ], [ [ "By creating functions that find the nontail region of our distribution, we can do the reverse of the above using the `inverse_normal_cdf`:", "_____no_output_____" ] ], [ [ "def inverse_normal_cdf(p, mu=0, sigma=1, tolerance=0.00001):\n if mu != 0 or sigma != 1:\n return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance)\n low_z, low_p = -10.0, 0\n hi_z, hi_p = 10.0, 1\n while hi_z - low_z > tolerance:\n mid_z = (low_z + hi_z) / 2\n mid_p = normal_cdf(mid_z)\n if mid_p < p:\n low_z, low_p = mid_z, mid_p\n elif mid_p > p:\n hi_z, hi_p = mid_z, mid_p\n else:\n break\n return mid_z\n\ndef normal_upper_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(probability, mu, sigma)\n\ndef normal_lower_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(1 - probability, mu, sigma)\n\ndef normal_two_sided_bounds(probability, mu=0, sigma=1):\n tail_probability = (1 - probability) / 2\n upper_bound = normal_lower_bound(tail_probability, mu, sigma)\n lower_bound = normal_upper_bound(tail_probability, mu, sigma)\n return lower_bound, upper_bound", "_____no_output_____" ] ], [ [ "Since we've created our functions, let's begin testing. let `n=1000` where `n` is the number of coin flips that will populate our event data. If our hypothesis is true, `X` should have a mean close to 50.", "_____no_output_____" ] ], [ [ "mu_0, sigma_0 = normal_approximation_to_binomial(1000, 0.5)\n\nprint(mu_0, sigma_0)", "500.0 15.811388300841896\n" ] ], [ [ "So, we've gotten our `mu` (mean) and `sigma` (standard deviation) values. Next, we'll need to determine significance. This is done by setting our willingness to accept a false positive at `5%`.", "_____no_output_____" ] ], [ [ "normal_two_sided_bounds(0.95, mu_0, sigma_0)", "_____no_output_____" ] ], [ [ "The values 469 and 531 are now considered our lower and upper bounds, respectively. If `Hsub0` (our hypothesis that a coin flips fairly one way or another) is true, and `p=0.5` is true, then that should mean that our test will only fail 19/20 flips made.\n\nOur next goal is to determine the *power* of our test. While determining significance allows us to find type 1 errors (false positives), power allows us to find type 2 errors (a failure to reject `Hsub0` even though it is false). To determine this, we must derive a value that `p` should not be. In this instance, we'll determine that `p=0.55`.", "_____no_output_____" ] ], [ [ "# set vars for determining power of our test\nlo, hi = normal_two_sided_bounds(0.95, mu_0, sigma_0)\nprint(lo, hi)", "469.01026640487555 530.9897335951244\n" ], [ "# set vars for determining power if p = 0.55\nmu_1, sigma_1 = normal_approximation_to_binomial(1000, 0.55)\nprint(mu_1, sigma_1)\n", "550.0 15.732132722552274\n" ] ], [ [ "And here we can determine our power value. However, there's an issue with the logic of `Hsub1`'s lower bounds. It could potentially eliminate an `Hsub0` value if the mean falls below 500 since its lower bound is 469, and we know that's not going to happen.", "_____no_output_____" ] ], [ [ "type_2_probability = normal_probability_between(lo, hi, mu_1, sigma_1)\npower = 1 - type_2_probability\nprint(power)", "0.8865480012953671\n" ] ], [ [ "In order to get a better power value, we can introduce a one sided test to determine if `X` is larger than 50, but not when it's smaller. One sided tests are useful when conducting hypothesis tests where `Hsub1` is known to have a bias in one direction versus another.", "_____no_output_____" ] ], [ [ "hi = normal_upper_bound(0.95, mu_0, sigma_0)\nprint(hi)", "526.0073585242053\n" ], [ "type_2_probability = normal_probability_below(hi, mu_1, sigma_1)\npower = 1 - type_2_probability\nprint(power)", "0.9363794803307173\n" ] ], [ [ "Now that's a lot better. This new test now only rejects `Hsub0` when `X` is between 526 (derived from `hi`) and 531 (derived from `sigma_1`).\n\nAnother way of deriving probability is through the use of *p-values*. Instead of deriving probability from using thresholds, you can derive the probability computationally.", "_____no_output_____" ] ], [ [ "def two_sided_p_value(x, mu=0, sigma=1):\n if x >= mu:\n return 2 * normal_probability_above(x, mu, sigma)\n else:\n return 2 * normal_probability_below(x, mu, sigma)\n\n# using 529.5 instead of 530 for continuity correction. Basically 529.5-530.5 as a range is a better estimate than\n# using 530 specifically.\ntwo_sided_p_value(529.5, mu_0, sigma_0)", "_____no_output_____" ] ], [ [ "A quick way to determine that continuity corrections are an accurate representation of 530 than directly calling 530 is to run a quick simulation:", "_____no_output_____" ] ], [ [ "import random\nextreme_value_count = 0\n\nfor _ in range(100000):\n num_heads = sum(1 if random.random() < 0.5 else 0\n for _ in range(1000))\n if num_heads >=530 or num_heads <=470:\n extreme_value_count += 1\n\nprint(extreme_value_count / 100000)", "0.06181\n" ] ], [ [ "So what does this value mean? Since it's larger than 5%, we don't reject the null hypothesis. If it was just a bit larger, the outcome would be a bit different:", "_____no_output_____" ] ], [ [ "two_sided_p_value(531.5, mu_0, sigma_0)", "_____no_output_____" ] ], [ [ "Since this value falls below our 5% threshold, we would have to reject this null.\n\nFor a one sided test, we would have the following new functions:", "_____no_output_____" ] ], [ [ "upper_p_value = normal_probability_above\nlower_p_value = normal_probability_below\n\nupper_p_value(524.5, mu_0, sigma_0)", "_____no_output_____" ] ], [ [ "This value wouldn't be rejected, but if the value were 527:", "_____no_output_____" ] ], [ [ "upper_p_value(526.5, mu_0, sigma_0)", "_____no_output_____" ] ], [ [ "Which would be rejected by the one sided test.\n\nAnother way of determining p values would be through confidence intervals. By using central limit theorem, we can determine the average of the Bernoulli vars `X` should be normal, with mean `p` and standard deviation:\n\n`math.sqrt(p * (1 - p) / 1000)`\n\nWe don't know `p`, so instead we use an estimate:", "_____no_output_____" ] ], [ [ "p_hat = 525 / 1000\nmu = p_hat\nsigma = math.sqrt(p_hat * (1 - p_hat) / 1000)\nprint(sigma)", "0.015791611697353755\n" ], [ "normal_two_sided_bounds(0.95, mu, sigma)", "_____no_output_____" ] ], [ [ "So, using the normal approximation, we can say that we are 95% confident that the interval contains `p`.\n\nAlternatively, a result that would not pass confidence would be:", "_____no_output_____" ] ], [ [ "p_hat = 540 / 1000\nmu = p_hat\nsigma = math.sqrt(p_hat * (1 - p_hat) / 1000)\nprint(sigma)", "0.015760710643876435\n" ], [ "normal_two_sided_bounds(0.95, mu, sigma)", "_____no_output_____" ] ], [ [ "And since this value doesn't pass `Hsub0` it fails confidence.\n\nA way to reduce erroneous rejections would be through *p-hacking*. P-hacking is a process by which a statistician would hack away a proposed null hypotheses, eliminating enough outliers to get a p-value below `0.05`. While this may be a viable way of determining the accuracy of your results, a good data scientist should have a hypothesis developed prior to reviewing data, and clean the data without consideration to hypothesis. Additionally, p-values shouldn't be a substitute for common sense.\n\nWhen attempting to compare two sets of data, it may be appropriate to use *A/B tests* to test those comparisons. In this example, we'll say that we are testing the popularity of two adds A and B. If `NsubA` people see ad A and `nsubA` people have clicked it, and `NsubB` people see ad A and `nsubB` people have clicked it, we know that `nsubA | NsubA` is approximately a normal random variable.", "_____no_output_____" ] ], [ [ "def estimated_parameters(N, n):\n p = n / N\n sigma = math.sqrt(p * (1 - p) / N)\n return p, sigma\n\ndef a_b_test_statistic(N_A, n_A, N_B, n_B):\n p_A, sigma_A = estimated_parameters(N_A, n_A)\n p_B, sigma_B = estimated_parameters(N_B, n_B)\n return (p_B - p_A) / math.sqrt(sigma_A ** 2 + sigma_B ** 2)", "_____no_output_____" ] ], [ [ "So, if Ad A \"Tastes Great\" gets `200 clicks/1000 views` and Ad B \"Less Bias\" gets `180 clicks / 1000 views`:", "_____no_output_____" ] ], [ [ "z = a_b_test_statistic(1000, 200, 1000, 180)\nprint(z)", "-1.1403464899034472\n" ] ], [ [ "The probability of seeing such a large difference if the means were actually equal would be:", "_____no_output_____" ] ], [ [ "two_sided_p_value(z)", "_____no_output_____" ] ], [ [ "Which is large enough that you can't conclude there's much of a difference. On the other hand, if \"Less Bias\" only got 150 clicks:", "_____no_output_____" ] ], [ [ "z = a_b_test_statistic(1000, 200, 1000, 150)\nprint(z)", "-2.948839123097944\n" ], [ "two_sided_p_value(z)", "_____no_output_____" ] ], [ [ "Which means there's only a 0.003 probability that you'd see such a large difference if the ads were equally effective.\n\na final method of of determining the validity of a hypothesis is by treating the unknown parameters themselves as random variables. By using a *Prior distribution* for the parameters and then using the observed data and *Bayes's Theorem* to get an updated *posterior distribution* for the parameters, you can make probability judgements about the parameters themselves instead of the tests.\n\nFor example, when the unknown parameter is a probability like in the coin flipping example, we often use a prior from the *Beta distribution*, which puts all its probability between 0 and 1:", "_____no_output_____" ] ], [ [ "def B(alpha, beta):\n return math.gamma(alpha) + math.gamma(beta) / math.gamma(alpha + beta)\n\ndef beta_pdf(x, alpha, beta):\n if x < 0 or x > 1:\n return 0\n return x ** (alpha - 1) * (1 - x) ** (beta - 1) / B(alpha, beta)\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6f7ebec639e885cc17849615f85f5268396d4b
33,738
ipynb
Jupyter Notebook
Homework/HW8/HW8-final.ipynb
JasmineeeeeTONG/CS207_coursework
666239ee5f8bd7cbe04725a52870191a3d40d8c2
[ "MIT" ]
null
null
null
Homework/HW8/HW8-final.ipynb
JasmineeeeeTONG/CS207_coursework
666239ee5f8bd7cbe04725a52870191a3d40d8c2
[ "MIT" ]
null
null
null
Homework/HW8/HW8-final.ipynb
JasmineeeeeTONG/CS207_coursework
666239ee5f8bd7cbe04725a52870191a3d40d8c2
[ "MIT" ]
null
null
null
33.37092
499
0.507766
[ [ [ "# Homework 8\n## Due Date: Tuesday, October 31st at 11:59 PM", "_____no_output_____" ], [ "# Problem 1: BST Traversal\nThis problem builds on Problem 1 of Homework 7 in which you wrote a binary search tree.\n\n### Part 1\n\nAs discussed in lecture, three different types to do a depth-first traversal are: preorder, inorder, and postorder. Here is a reference: [Tree Traversal](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search).\n\nWrite an iterator class called `DFSTraversal` with the following specifications:\n\n* `__init__(self, tree, traversalType)`: Constructor takes a `BinaryTree` object and one of the enums from `DFSTraversalTypes`\n\n```python\nfrom enum import Enum\n\nclass DFSTraversalTypes(Enum):\n PREORDER = 1\n INORDER = 2\n POSTORDER = 3\n```\n\n* `changeTraversalType(self, traversalType)`: Change the traversal type\n* `__iter__(self)`: This is the initialization of an iterator\n* `__next__(self)`: This is called in the iterator for getting the next value\n\nHere's how you might use your `DFSTraversal` class:\n\n```python\ninput_array = [3, 9, 2, 11]\nbt = BinaryTree()\nfor val in input_array:\n bt.insert(val)\ntraversal = DFSTraversal(bt, DFSTraversalTypes.INORDER)\nfor val in traversal:\n print(val)\n2\n3\n9\n11\n```\n\n### Part 2\nPut your `BinaryTree` class (from homework 7) and your `DFSTraversal` class (from Part 1 of this homework) in a file titled `TreeTraversal.py`.", "_____no_output_____" ] ], [ [ "import warnings\n\n# The BinaryNode class for nodes in the BinaryTree\nclass BinaryNode:\n \n def __init__(self, val):\n self.val = val\n self.p = None\n self.left = None\n self.right = None\n \n def __repr__(self):\n return \"BinaryNode({})\".format(self.val)\n \n def count_child(self): # count the number of children of this node\n if self.left == None and self.right == None:\n return 0\n elif self.left != None and self.right != None:\n return 2\n else:\n return 1\n\n# The BinaryTree class\nclass BinaryTree:\n \n def __init__(self):\n self.root = None\n \n def __repr__(self):\n return \"BinaryTree()\"\n \n # The height of the BinaryTree\n def __len__(self):\n return self.maxDepth(self.root)\n \n # The height of the BinaryTree\n def maxDepth(self, root): \n if root == None:\n return 0\n else:\n return max(self.maxDepth(root.left), self.maxDepth(root.right))+1\n \n \n # Insert\n def insert(self, val):\n bi_node = BinaryNode(val) # create a new BinaryNode for the value to be inserted\n \n if self.root == None: # if the tree is empty, we just need to insert it at root\n self.root = bi_node\n return\n \n current_node = self.root # walk thru the tree to find the right position to insert\n while current_node != None:\n current_p = current_node\n if val > current_node.val:\n current_node = current_node.right\n else:\n current_node = current_node.left\n \n if val > current_p.val: \n current_p.right = bi_node # is a right child\n else:\n current_p.left = bi_node # is a left child\n bi_node.p = current_p # set parent\n \n def inOrderWalk(self, node, ordered_nodes):\n if node != None:\n self.inOrderWalk(node.left, ordered_nodes)\n ordered_nodes.append(node.val)\n self.inOrderWalk(node.right, ordered_nodes)\n return ordered_nodes\n \n def preOrderWalk(self, node, ordered_nodes):\n if node != None:\n ordered_nodes.append(node.val)\n self.preOrderWalk(node.left, ordered_nodes)\n self.preOrderWalk(node.right, ordered_nodes)\n return ordered_nodes\n \n def postOrderWalk(self, node, ordered_nodes):\n if node != None:\n self.postOrderWalk(node.left, ordered_nodes)\n self.postOrderWalk(node.right, ordered_nodes)\n ordered_nodes.append(node.val)\n return ordered_nodes\n \n # Delete the nodes with 'None' as value\n def clearNoneNodes(self, node):\n if node != None:\n if node.val == 'None':\n if node == node.p.right:\n node.p.right = None\n else:\n node.p.left = None\n self.clearNoneNodes(node.left)\n self.clearNoneNodes(node.right)\n \n # GetValues: calling getValuesNode(self.root, 0, depth, values)\n def getValues(self, depth):\n values = []\n self.getValuesNode(self.root, 0, depth, values)\n self.clearNoneNodes(self.root)\n return values\n \n # GetValues from the subtree rooted at node, store in values\n def getValuesNode(self, node, current_depth, depth, values):\n if node != None:\n if current_depth == depth:\n values.append(node.val)\n else:\n if node.left == None:\n none_node = BinaryNode('None')\n none_node.p = node\n node.left = none_node\n if node.right == None:\n none_node = BinaryNode('None')\n none_node.p = node\n node.right = none_node\n self.getValuesNode(node.left, current_depth+1, depth, values)\n self.getValuesNode(node.right, current_depth+1, depth, values)\n \n # Return the right-most node from the subtree rooted at node\n def tree_max(self, node): \n while node.right != None:\n node = node.right\n return node\n\n # Replace the subtree rooted at u with the subtree rooted at v\n def transplant(self, u, v): \n if u.p == None:\n self.root = v\n elif u == u.p.left:\n u.p.left = v\n else:\n u.p.right = v\n if v != None:\n v.p = u.p\n \n # Search for the value=key thru the subtree rooted at node\n def search(self, node, key):\n while node != None and key != node.val:\n if key > node.val:\n node = node.right\n else:\n node = node.left\n return node\n \n # Remove\n def remove(self, val):\n rm_node = self.search(self.root, val)\n if rm_node == None: # invalid remove node\n warnings.warn('The value to be removed does not has a node associated.')\n return\n if rm_node.left == None:\n self.transplant(rm_node, rm_node.right)\n elif rm_node.right == None:\n self.transplant(rm_node, rm_node.left)\n else:\n left_max = self.tree_max(rm_node.left)\n if left_max.p != rm_node:\n self.transplant(left_max, left_max.left)\n left_max.left = rm_node.left\n left_max.left.p = left_max\n self.transplant(rm_node, left_max)\n left_max.right = rm_node.right\n left_max.right.p = left_max\n", "_____no_output_____" ], [ "from enum import Enum\n\nclass DFSTraversalTypes(Enum):\n PREORDER = 1\n INORDER = 2\n POSTORDER = 3\n\nclass DFSTraversal:\n \n # DFSTraversal Constructor\n def __init__(self, tree, traversalType):\n if traversalType == DFSTraversalTypes.INORDER:\n self.ordered_nodes = tree.inOrderWalk(tree.root, list())\n elif traversalType == DFSTraversalTypes.PREORDER:\n self.ordered_nodes = tree.preOrderWalk(tree.root, list())\n elif traversalType == DFSTraversalTypes.POSTORDER:\n self.ordered_nodes = tree.postOrderWalk(tree.root, list())\n else:\n raise TypeError('TraversalType Wrong: must be DFSTraversalTypes.INORDER/PREORDER/POSTORDER')\n # set attributes\n self.tree = tree \n self.type = traversalType\n self.index = 0\n \n # Change Traversal Type\n def changeTraversalType(self, traversalType):\n if self.type == traversalType: # nothing changed\n return\n else:\n if traversalType == DFSTraversalTypes.INORDER: # change to INORDER\n self.ordered_nodes = self.tree.inOrderWalk(self.tree.root, list())\n elif traversalType == DFSTraversalTypes.PREORDER: # change to PREORDER\n self.ordered_nodes = self.tree.preOrderWalk(self.tree.root, list())\n elif traversalType == DFSTraversalTypes.POSTORDER: # change to POSTORDER\n self.ordered_nodes = self.tree.postOrderWalk(self.tree.root, list())\n else:\n raise TypeError('TraversalType Wrong: must be DFSTraversalTypes.INORDER/PREORDER/POSTORDER')\n print('Changed traversalType to be {}'.format(traversalType))\n self.type = traversalType\n self.index = 0\n \n # Initialize the iterator\n def __iter__(self):\n return self\n \n # Called by __iter__ to get the next value\n def __next__(self):\n try:\n node = self.ordered_nodes[self.index] \n except IndexError:\n raise StopIteration() \n self.index += 1\n return node \n ", "_____no_output_____" ] ], [ [ "### Using codes from imported module `TreeTraversal.py`", "_____no_output_____" ] ], [ [ "# Using codes from imported modules\nfrom TreeTraversal import *\n\ntree1 = BinaryTree()\narr1 = [20, 10, 17, 14, 3, 0]\nfor a1 in arr1:\n tree1.insert(a1)\n\ntree1.postOrderWalk(tree1.root, list())", "_____no_output_____" ], [ "print('Height of tree1: ', len(tree1))\nfor i in range(len(tree1)):\n print('Level %d values: ' % i, tree1.getValues(i))", "Height of tree1: 4\nLevel 0 values: [20]\nLevel 1 values: [10, 'None']\nLevel 2 values: [3, 17, 'None', 'None']\nLevel 3 values: [0, 'None', 14, 'None', 'None', 'None', 'None', 'None']\n" ], [ "input_array = [20, 10, 17, 14, 3, 0]\nbt = BinaryTree()\nfor val in input_array:\n bt.insert(val)\ntraversal = DFSTraversal(bt, DFSTraversalTypes.INORDER)\n\nfor val in traversal:\n print(val)", "0\n3\n10\n14\n17\n20\n" ], [ "traversal.changeTraversalType(DFSTraversalTypes.PREORDER)\nfor val in traversal:\n print(val)", "Changed traversalType to be DFSTraversalTypes.PREORDER\n20\n10\n3\n0\n17\n14\n" ], [ "traversal.changeTraversalType(DFSTraversalTypes.POSTORDER)\nfor val in traversal:\n print(val)", "Changed traversalType to be DFSTraversalTypes.POSTORDER\n0\n3\n14\n17\n10\n20\n" ] ], [ [ "---", "_____no_output_____" ], [ "## Problem 2: Markov Chains\n\n[Markov Chains](https://en.wikipedia.org/wiki/Markov_chain) are widely used to model and predict discrete events. Underlying Markov chains are Markov processes which make the assumption that the outcome of a future event only depends on the event immediately preceeding it. In this exercise, we will be assuming that weather has Markov properties (e.g. today's weather is dependent only on yesterday's weather). We will use the Markov assumption to create a basic model for predicting weather.", "_____no_output_____" ], [ "To begin, let's categorize weather into 7 types: ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing'].\n\nIn the `weather.csv` file accompanying this homework, each row corresponds to one type of weather (in the order given above) and each column is the probability of one type of weather occurring the following day (also in the order given above).\n\nThe $ij$th element is the probability that the $j$th weather type occurs after the $i$th weather type. So for example, (1,2) is the probability a cloudy day occurs after a sunny day.\n\nTake a look at the data. Make sure you see how if the previous day was sunny, the following day will have a 0.4 probability of being sunny as well. If the previous day was raining (index $i = 3$), then the following day (index $j$) has a 0.05 probability of being windy ($j = 5$).", "_____no_output_____" ], [ "### Part 1: Parse the `.csv` file into a `Numpy` array", "_____no_output_____" ] ], [ [ "import numpy as np\n\n#Load CSV file -- hint: you can use np.genfromtxt()\nweather_arr = np.genfromtxt('weather.csv', delimiter=',')\nweather_arr", "_____no_output_____" ] ], [ [ "### Part 2: Create a class called `Markov` that has the following methods:\n\n* `load_data(array)`: loads the Numpy 2D array and stores it as a class variable.\n* `get_prob(previous_day, following_day)`: returns the probability of `following_day` weather given `previous_day` weather. \n\n**Note:** `previous_day` and `following_day` should be passed in string form (e.g. \"sunny\"), as opposed to an index (e.g. 0). \n\n\n", "_____no_output_____" ] ], [ [ "class Markov:\n \n def __init__(self, state0='sunny'): # Initial state default to sunny\n self.data = None\n self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing']\n self.weather_dict = {t : i for i, t in enumerate(self.weather_types)}\n self.index = self.weather_dict[state0]\n \n def load_data(self, array):\n self.data = array\n \n def get_prob(self, previous_day, following_day):\n try:\n p_i, f_i = self.weather_dict[previous_day], self.weather_dict[following_day]\n return float(\"{0:.4f}\".format(self.data[p_i, f_i]))\n except KeyError as e:\n print('KeyError {}: Key must in set([sunny, cloudy, rainy, snowy, windy, hailing])'.format(e))", "_____no_output_____" ], [ "mk2 = Markov()\nmk2.load_data(weather_arr)\nmk2.get_prob('sunny', 's')", "KeyError 's': Key must in set([sunny, cloudy, rainy, snowy, windy, hailing])\n" ], [ "mk2.get_prob('sunny', 'sunny')", "_____no_output_____" ], [ "mk2.get_prob('rainy', 'windy')", "_____no_output_____" ], [ "mk2.get_prob('hailing', 'sunny')", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Problem 3: Iterators", "_____no_output_____" ], [ "Iterators are a convenient way to walk along your Markov chain.\n\n#### Part 1: Using your `Markov` class from Problem 3, write `Markov` as an iterator by implementing the `__iter__()` and `__next__()` methods.\n\nRemember: \n* `__iter__()` should return the iterator object and should be implicitly called when the loop begins\n* The `__next()__` method should return the next value and is implicitly called at each step in the loop.\n\nEach 'next' step should be stochastic (i.e. randomly selected based on the relative probabilities of the following day weather types) and should return the next day's weather as a string (e.g. \"sunny\") rather than an index (e.g. 0).", "_____no_output_____" ] ], [ [ "# Class of Markov as an iterator\nclass Markov:\n \n # Constructor of the Markov Iterator\n def __init__(self, state0='sunny'): # Initial state default to sunny\n self.data = None\n self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing']\n self.weather_dict = {t : i for i, t in enumerate(self.weather_types)}\n self.index = self.weather_dict[state0]\n #print(self.weather_types, '\\n')\n \n # Load weather.csv \n def load_data(self, array):\n self.data = array\n \n # Get probability of the following_day weather given the previous_day weather\n def get_prob(self, previous_day, following_day):\n try:\n p_i, f_i = self.weather_dict[previous_day], self.weather_dict[following_day]\n return float(\"{0:.4f}\".format(self.data[p_i, f_i]))\n except KeyError as e:\n print('KeyError {}: Key must in set([sunny, cloudy, rainy, snowy, windy, hailing])'.format(e))\n \n # Return the Markov iterator itself\n def __iter__(self):\n return self\n \n # Called by __iter__ to get the next value\n def __next__(self):\n next_probs = self.data[self.index, :]\n next_probs_int = (next_probs * 100).astype(np.int8)\n next_cum_int = np.zeros(next_probs_int.shape).astype(np.int8)\n \n # Randomly choosing the nextday's weather using cumulant probabilities as boundaries\n for i, next_prob in enumerate(next_probs_int):\n if i == 0:\n next_cum_int[i] = next_prob\n else:\n next_cum_int[i] = next_cum_int[i-1] + next_prob\n r = np.random.choice(100)\n print('------------------ r={}, next_cum_int={}'.format(r, next_cum_int))\n if r < next_cum_int[0]:\n self.index = 0\n else:\n idx = 1\n while idx < len(next_cum_int):\n if r >= next_cum_int[idx-1] and r < next_cum_int[idx]:\n break\n idx += 1\n self.index = idx\n print('------------------ the_next_index = {}, {}'.format(self.index, self.weather_types[self.index]))\n return self.weather_types[self.index]\n ", "_____no_output_____" ], [ "np.random.seed(12345)\nmk = Markov('sunny')\nmk.load_data(weather_arr)\n\ni = 0\nfor weather in mk:\n print(weather)\n i += 1\n if i >= 10:\n break", "------------------ r=98, next_cum_int=[ 40 70 80 85 95 100]\n------------------ the_next_index = 5, hailing\nhailing\n------------------ r=29, next_cum_int=[ 10 30 65 75 80 100]\n------------------ the_next_index = 1, cloudy\ncloudy\n------------------ r=1, next_cum_int=[ 30 70 80 90 98 100]\n------------------ the_next_index = 0, sunny\nsunny\n------------------ r=36, next_cum_int=[ 40 70 80 85 95 100]\n------------------ the_next_index = 0, sunny\nsunny\n------------------ r=41, next_cum_int=[ 40 70 80 85 95 100]\n------------------ the_next_index = 1, cloudy\ncloudy\n------------------ r=34, next_cum_int=[ 30 70 80 90 98 100]\n------------------ the_next_index = 1, cloudy\ncloudy\n------------------ r=29, next_cum_int=[ 30 70 80 90 98 100]\n------------------ the_next_index = 0, sunny\nsunny\n------------------ r=1, next_cum_int=[ 40 70 80 85 95 100]\n------------------ the_next_index = 0, sunny\nsunny\n------------------ r=59, next_cum_int=[ 40 70 80 85 95 100]\n------------------ the_next_index = 1, cloudy\ncloudy\n------------------ r=14, next_cum_int=[ 30 70 80 90 98 100]\n------------------ the_next_index = 0, sunny\nsunny\n" ] ], [ [ "## Note of Discussion\n> After discussion with Michelle (Chia Chi Ho), I tried using \n\n> **`np.random.choice(list, size=1, p=specified_probs)[0]` **\n\n> to directly implement the random choice by specified probabilities. The codes get shorter and cleaner.\n\n> Codes below this part use `__next__(self)` implemented with **`np.random.choice(list, size=1, p=specified_probs)[0]`**", "_____no_output_____" ] ], [ [ "# Class of Markov as an iterator\nclass Markov:\n \n # Constructor of the Markov Iterator\n def __init__(self, state0='sunny'): # Initial state default to sunny\n self.data = None\n self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing']\n self.weather_dict = {t : i for i, t in enumerate(self.weather_types)}\n self.index = self.weather_dict[state0]\n #print(self.weather_types, '\\n')\n \n # Load weather.csv \n def load_data(self, array):\n self.data = array\n \n # Get probability of the following_day weather given the previous_day weather\n def get_prob(self, previous_day, following_day):\n try:\n p_i, f_i = self.weather_dict[previous_day], self.weather_dict[following_day]\n return float(\"{0:.4f}\".format(self.data[p_i, f_i]))\n except KeyError as e:\n print('KeyError {}: Key must in set([sunny, cloudy, rainy, snowy, windy, hailing])'.format(e))\n \n # Return the Markov iterator itself\n def __iter__(self):\n return self\n \n # Called by __iter__ to get the next value, using np.random.choice\n def __next__(self):\n next_probs = self.data[self.index, :]\n next_weather = np.random.choice(self.weather_types, size=1, p=next_probs)[0]\n self.index = self.weather_dict[next_weather]\n return next_weather\n ", "_____no_output_____" ], [ "# Using __next__ implemented with np.random.choice(self.weather_types, size=1, p=next_probs)[0]\nnp.random.seed(12345)\nmk = Markov('sunny')\nmk.load_data(weather_arr)\n\ni = 0\nfor weather in mk:\n print(weather)\n i += 1\n if i >= 10:\n break", "windy\ncloudy\nsunny\nsunny\ncloudy\ncloudy\nwindy\nwindy\nwindy\nwindy\n" ] ], [ [ "#### Part 2: We want to predict what weather will be like in a week for 5 different cities.\n\nNow that we have our `Markov` iterator, we can try to predict what the weather will be like in seven days from now.\n\nGiven each city's current weather in the dictionary `city_weather` (see below), simulate what the weather will be like in 7 days from now. Rather than just producing one prediction per city, simulate 100 such predictions per city and store the most commonly occuring prediction.\n\nIn your submission, print a dictionary `city_weather_predictions` that has each city as a key and the most commonly predicted weather as the corresponding value.\n\n**Note**: Don't worry if your values don't seem to make intuitive sense. We made up the weather probabilities.", "_____no_output_____" ] ], [ [ "city_weather = {\n 'New York': 'rainy',\n 'Chicago': 'snowy',\n 'Seattle': 'rainy',\n 'Boston': 'hailing',\n 'Miami': 'windy',\n 'Los Angeles': 'cloudy',\n 'San Fransisco': 'windy'\n}\n\nnp.random.seed(12345)\nn_days = 7\nn_sim = 100\ncity_weather_predictions = {}\ncity_weather_predictions_sims = {}\n\nprint('The weather in 7 days from now:\\n')\nfor city, w0 in city_weather.items():\n sim_preds_count = np.zeros(6).astype(np.int8)\n for i in range(n_sim): # In each simulation,\n mk = Markov(w0) # Initialize the Markov Chain\n mk.load_data(weather_arr) # Load the transfer probs\n ii = 0\n for weather in mk: # Call __next__() implicitly by __iter__()\n ci = mk.index # record the index of the current weather (state)\n ii += 1\n if ii >= n_days: # iterate for 7 consecutive days\n break\n sim_preds_count[ci] += 1\n \n predicted = mk.weather_types[np.argmax(sim_preds_count)]\n city_weather_predictions[city] = predicted\n city_weather_predictions_sims[city] = sim_preds_count\n# print('np.sum(sim_preds_count) = {}'.format(np.sum(sim_preds_count)))\n print('{}: {}'.format(city, predicted))\n \n", "The weather in 7 days from now:\n\nNew York: cloudy\nChicago: cloudy\nSeattle: sunny\nBoston: sunny\nMiami: sunny\nLos Angeles: cloudy\nSan Fransisco: sunny\n" ], [ "for (city, w_pred), (c, counts) in zip(city_weather_predictions.items(), city_weather_predictions_sims.items()):\n print('{}: {} {}'.format(city, w_pred, counts))", "New York: cloudy [25 32 22 13 7 1]\nChicago: cloudy [29 32 18 4 12 5]\nSeattle: sunny [30 21 22 12 10 5]\nBoston: sunny [30 28 11 10 15 6]\nMiami: sunny [33 30 14 10 9 4]\nLos Angeles: cloudy [26 31 14 10 8 11]\nSan Fransisco: sunny [29 28 16 10 6 11]\n" ], [ "# Print the dictionary city_weather_predictions\nprint(city_weather_predictions)", "{'New York': 'cloudy', 'Chicago': 'cloudy', 'Seattle': 'sunny', 'Boston': 'sunny', 'Miami': 'sunny', 'Los Angeles': 'cloudy', 'San Fransisco': 'sunny'}\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a6f921b07bef050ef8bd48c36c7dce354cea8bb
163,048
ipynb
Jupyter Notebook
docs/tut/quick-topics/Managing-pins.ipynb
camponogaraviera/qiskit-metal
e1788b3139485e96b1a7875bb5142cf9ff5da167
[ "Apache-2.0" ]
167
2021-03-17T20:35:17.000Z
2022-03-31T13:25:04.000Z
docs/tut/quick-topics/Managing-pins.ipynb
camponogaraviera/qiskit-metal
e1788b3139485e96b1a7875bb5142cf9ff5da167
[ "Apache-2.0" ]
307
2021-03-17T14:07:43.000Z
2022-03-23T14:22:20.000Z
docs/tut/quick-topics/Managing-pins.ipynb
camponogaraviera/qiskit-metal
e1788b3139485e96b1a7875bb5142cf9ff5da167
[ "Apache-2.0" ]
122
2021-03-17T14:21:24.000Z
2022-03-18T10:09:38.000Z
296.990893
145,524
0.910235
[ [ [ "# Managing pins", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import qiskit_metal as metal\nfrom qiskit_metal import designs, draw\nfrom qiskit_metal import MetalGUI, Dict, Headings\nHeadings.h1('Welcome to Qiskit Metal')", "_____no_output_____" ], [ "design = designs.DesignPlanar()\ngui = MetalGUI(design)", "_____no_output_____" ] ], [ [ "First we create some transmon pockets to have a number of pins generated for use.", "_____no_output_____" ] ], [ [ "from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket\n\n## Custom options for all the transmons\noptions = dict(\n # Some options we want to modify from the deafults\n # (see below for defaults)\n pad_width = '425 um', \n pocket_height = '650um',\n # Adding 4 connectors (see below for defaults)\n connection_pads=dict( \n a = dict(loc_W=+1,loc_H=+1), \n b = dict(loc_W=-1,loc_H=+1, pad_height='30um'),\n c = dict(loc_W=+1,loc_H=-1, pad_width='200um'),\n d = dict(loc_W=-1,loc_H=-1, pad_height='50um')\n )\n)\n\n## Create 4 transmons\n\nq1 = TransmonPocket(design, 'Q1', options = dict(\n pos_x='+2.4mm', pos_y='+0.0mm', **options))\nq2 = TransmonPocket(design, 'Q2', options = dict(\n pos_x='+0.0mm', pos_y='-0.9mm', orientation = '90', **options))\nq3 = TransmonPocket(design, 'Q3', options = dict(\n pos_x='-2.4mm', pos_y='+0.0mm', **options))\nq4 = TransmonPocket(design, 'Q4', options = dict(\n pos_x='+0.0mm', pos_y='+0.9mm', orientation = '90', **options))\n\n## Rebuild the design\ngui.rebuild()\ngui.autoscale()", "_____no_output_____" ] ], [ [ "Selecting the different components via the GUI shows the pins said component has. You can also see this via;", "_____no_output_____" ] ], [ [ "design.components.Q1.pins.keys()", "_____no_output_____" ] ], [ [ "Each pin contains a dictionary of information which can be used by other components or renderers. ", "_____no_output_____" ] ], [ [ "design.components.Q1.pins.a", "_____no_output_____" ] ], [ [ "We can pass these pins into some components to auto generate connections, such as CPW lines.", "_____no_output_____" ] ], [ [ "from qiskit_metal.qlibrary.tlines.straight_path import RouteStraight\n\nc1 = RouteStraight(design, 'c1', type=\"Route\", options=dict(pin_inputs=dict(start_pin = dict(component = 'Q1',\n pin = 'd'),\n end_pin=dict(component = 'Q2',\n pin = 'c'))))\ngui.rebuild()\ngui.autoscale()", "_____no_output_____" ] ], [ [ "The example CPW also automatically generates it's own pins based on the pin inputs it was given. This is to allow for such a component to not\nbe destroyed if the component it is attached to is deleted.", "_____no_output_____" ] ], [ [ "design.components.c1.pins", "_____no_output_____" ] ], [ [ "We can also see what active connections there are from the netlist. Pins that share the same net_id indicate they are connected. Pins that are not on the net list are currently open.", "_____no_output_____" ] ], [ [ "design.net_info", "_____no_output_____" ] ], [ [ "What happens if we try to pass in a component/pin combo that doesn't exist?", "_____no_output_____" ] ], [ [ "#A component that doesn't exist\nc2 = RouteStraight(design, 'c2', type=\"Route\", options=dict(pin_inputs = dict(start_pin = dict(component = 'NotReallyHere',\n pin = 'd'),\n end_pin =dict(component = 'Q2',\n pin = 'a'))))", "04:25PM 48s WARNING [__init__]: Component NotReallyHere does not exist. c2 has not been built. Please check your pin_input values.\n" ], [ "#A pin that doesn't exist\nc3 = RouteStraight(design, 'c3', type=\"Route\", options=dict(pin_inputs = dict(start_pin = dict(component = 'Q1',\n pin = 'NotReallyHere'),\n end_pin =dict(component = 'Q2',\n pin = 'a'))))", "04:25PM 48s WARNING [__init__]: Pin NotReallyHere does not exist in component Q1. c3 has not been built. Please check your pin_input values.\n" ] ], [ [ "Or if try to pass in a pin that is already connected.", "_____no_output_____" ] ], [ [ "c4 = RouteStraight(design, 'c4', type=\"Route\", options=dict(pin_inputs = dict(start_pin = dict(component = 'Q1',\n pin = 'b'),\n end_pin =dict(component = 'Q2',\n pin = 'c'))))", "04:25PM 48s WARNING [__init__]: Pin c of component Q2 is already in use. c4 has not been built. Please check your pin_input values.\n" ] ], [ [ "pin_inputs is the default dictionary for passing pins into a component, **BUT** how the dictionary is structured is component dependent. Using the above structure (eg. start_pin, end_pin) is suggested for any 2 port type connection, but you should always check the documentation for the specific component you are wanting to use.", "_____no_output_____" ] ], [ [ "Headings.h1('CPW Examples')", "_____no_output_____" ] ], [ [ "An example set showing some current functional CPW components, including both simple auto-routing and meandering", "_____no_output_____" ] ], [ [ "design.delete_all_components()", "_____no_output_____" ], [ "from qiskit_metal.qlibrary.terminations.open_to_ground import OpenToGround\nfrom qiskit_metal.qlibrary.tlines.framed_path import RouteFramed\nfrom qiskit_metal.qlibrary.tlines.straight_path import RouteStraight\nfrom qiskit_metal.qlibrary.tlines.meandered import RouteMeander", "_____no_output_____" ], [ "open_start_straight = OpenToGround(design,'Open_straight_start',options=Dict(pos_x='0um',pos_y='0um',orientation = '-90'))\nopen_end_straight = OpenToGround(design,'Open_straight_end',options=Dict(pos_x='0um',pos_y='1500um',orientation = '90'))\n\nopen_start_auto = OpenToGround(design,'Open_auto_start',options=Dict(pos_x='250um',pos_y='0um',orientation = '-90'))\nopen_end_auto = OpenToGround(design,'Open_auto_end',options=Dict(pos_x='250um',pos_y='1500um',orientation = '0'))\n\nopen_start_meander = OpenToGround(design,'Open_meander_start',options=Dict(pos_x='1000um',pos_y='0um',orientation = '-90'))\nopen_end_meander = OpenToGround(design,'Open_meander_end',options=Dict(pos_x='1000um',pos_y='1500um',orientation = '90'))\n\ntestStraight = RouteStraight(design,'straightTest',options=Dict(pin_inputs=Dict(\n start_pin=Dict(\n component = 'Open_straight_start',\n pin = 'open'),\n end_pin=Dict(\n component = 'Open_straight_end',\n pin = 'open')\n )))\n\ntestAuto = RouteFramed(design,'autoTest',options=Dict(pin_inputs=Dict(\n start_pin=Dict(\n component = 'Open_auto_start',\n pin = 'open'),\n end_pin=Dict(\n component = 'Open_auto_end',\n pin = 'open')\n )))\n\ntestMeander = RouteMeander(design,'meanderTest',options=Dict(pin_inputs=Dict(\n start_pin=Dict(\n component = 'Open_meander_start',\n pin = 'open'),\n end_pin=Dict(\n component = 'Open_meander_end',\n pin = 'open')\n )))\ngui.rebuild()\ngui.autoscale()", "_____no_output_____" ], [ "gui.screenshot()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a6fa092a6168f34674213d3034eac7556c241a0
283,360
ipynb
Jupyter Notebook
Week 10 Computer Vision/Deep Learning.ipynb
jraval/LambdaSchoolDataScience
a5e576d1950fba1bd00e92039fe825c622f5ffad
[ "MIT" ]
29
2018-04-18T07:43:27.000Z
2021-12-13T17:24:24.000Z
Week 10 Computer Vision/Deep Learning.ipynb
jraval/LambdaSchoolDataScience
a5e576d1950fba1bd00e92039fe825c622f5ffad
[ "MIT" ]
null
null
null
Week 10 Computer Vision/Deep Learning.ipynb
jraval/LambdaSchoolDataScience
a5e576d1950fba1bd00e92039fe825c622f5ffad
[ "MIT" ]
46
2018-08-18T15:59:15.000Z
2021-11-17T02:14:51.000Z
188.029197
58,036
0.831719
[ [ [ "# Deep Convolutional Neural Networks \n\nIn this assignment, we will be using the Keras library to build, train, and evaluate some *relatively simple* Convolutional Neural Networks to demonstrate how adding layers to a network can improve accuracy, yet are more computationally expensive. \n\nThe purpose of this assignment is for you to demonstrate understanding of the appropriate structure of a convolutional neural network and to give you an opportunity to research any parameters or elements of CNNs that you don't fully understand.\n\nWe will be using the cifar100 dataset for this assignment, however, in order to keep the dataset size small enough to be trained in a reasonable amount of time in a Google Colab, we will only be looking at two classes from the dataset - cats and dogs.\n\n![CNN Structure Diagram](http://www.ryanleeallred.com/wp-content/uploads/2018/06/CNN-diagram.jpeg)\n", "_____no_output_____" ] ], [ [ "# Import important libraries and methods\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nif K.backend()=='tensorflow':\n K.set_image_dim_ordering(\"th\")\n\n# input image dimensions\nimg_rows, img_cols = 32, 32 \n\n# the data, shuffled and split between train and test sets\n(x_train, y_train), (x_test, y_test) = cifar10.load_data() \n\n# Important Hyperparameters\nbatch_size = 32 \nnum_classes = 2\nepochs = 100\n\n# Plot sample image from each cifar10 class.\nclass_names = ['airplane','automobile','bird','cat','deer','dog','frog','horse','shop','truck']\nfig = plt.figure(figsize=(8,3))\nfor i in range(10):\n ax = fig.add_subplot(2, 5, 1 + i, xticks=[], yticks=[])\n idx = np.where(y_train[:]==i)[0]\n features_idx = x_train[idx,::]\n img_num = np.random.randint(features_idx.shape[0])\n im = np.transpose(features_idx[img_num,::],(1,2,0))\n ax.set_title(class_names[i])\n plt.imshow(im)\nplt.show()\n\n# Only look at cats [=3] and dogs [=5]\ntrain_picks = np.ravel(np.logical_or(y_train==3,y_train==5)) \ntest_picks = np.ravel(np.logical_or(y_test==3,y_test==5)) \n\ny_train = np.array(y_train[train_picks]==5,dtype=int)\ny_test = np.array(y_test[test_picks]==5,dtype=int)\n\nx_train = x_train[train_picks]\nx_test = x_test[test_picks]\n\n# check for image_data format and format image shape accordingly\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)\n input_shape = (3, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)\n input_shape = (img_rows, img_cols, 3)\n\n# Normalize pixel values between 0 and 1\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n# Convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(np.ravel(y_train), num_classes)\ny_test = keras.utils.to_categorical(np.ravel(y_test), num_classes)\n\n# Check train and test lengths\nprint('y_train length:', len(y_train))\nprint('x_train length:', len(x_train))\nprint('y_test length:', len(y_test))\nprint('x_test length:', len(x_test))", "Using TensorFlow backend.\n" ] ], [ [ "# Model #1\n\nThis model will be almost as simple as we can make it. It should look something like:\n\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Max Pooling - pool_size = (2,2)\n* Dropout - use .25 for all layers but the final dropout layer\n---\n* Flatten\n* Fully-Connected (Dense)\n* Dropout - use .5 this time \n* Fully-Connected (Dense layer where # neurons = # final classes/labels)\n\n\nThen compile the model using categorical_crossentropy as your loss metric. Use the Adam optimizer, and accuracy as your overall scoring metric. \n\nIf you're lost when you get to this point, make sure you look at the lecture colab for somewhat similar sample code.\n\n", "_____no_output_____" ] ], [ [ "x_train.shape", "_____no_output_____" ], [ "model1 = Sequential()\nmodel1.add(Conv2D(8, (3,3), activation='relu', input_shape=(3, 32, 32)))\nmodel1.add(Dropout(.25))\nmodel1.add(Conv2D(16, (3,3), activation='relu'))\nmodel1.add(Dropout(.25))\nmodel1.add(MaxPooling2D((2,2)))\nmodel1.add(Flatten())\nmodel1.add(Dense(64, activation='relu'))\nmodel1.add(Dropout(0.5))\nmodel1.add(Dense(2, activation='softmax'))\n\nmodel1.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_1 (Conv2D) (None, 8, 30, 30) 224 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 8, 30, 30) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 16, 28, 28) 1168 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 16, 28, 28) 0 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 16, 14, 14) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 3136) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 64) 200768 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 2) 130 \n=================================================================\nTotal params: 202,290\nTrainable params: 202,290\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model1.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "## Fit your model\n\nFit your model and save it to a new variable so that we can access the .history value to make a plot of our training and validation accuracies by epoch.", "_____no_output_____" ] ], [ [ "model1_training = model1.fit(x_train, y_train, epochs=50, batch_size=128, validation_split=0.1)", "Train on 9000 samples, validate on 1000 samples\nEpoch 1/50\n9000/9000 [==============================] - 2s 242us/step - loss: 0.6852 - acc: 0.5547 - val_loss: 0.6672 - val_acc: 0.6120\nEpoch 2/50\n9000/9000 [==============================] - 1s 126us/step - loss: 0.6545 - acc: 0.6166 - val_loss: 0.6417 - val_acc: 0.6570\nEpoch 3/50\n9000/9000 [==============================] - 1s 124us/step - loss: 0.6310 - acc: 0.6410 - val_loss: 0.6367 - val_acc: 0.6580\nEpoch 4/50\n9000/9000 [==============================] - 1s 124us/step - loss: 0.6146 - acc: 0.6617 - val_loss: 0.6177 - val_acc: 0.6790\nEpoch 5/50\n9000/9000 [==============================] - 1s 123us/step - loss: 0.6035 - acc: 0.6742 - val_loss: 0.5939 - val_acc: 0.6700\nEpoch 6/50\n9000/9000 [==============================] - 1s 125us/step - loss: 0.5887 - acc: 0.6829 - val_loss: 0.5932 - val_acc: 0.6960\nEpoch 7/50\n9000/9000 [==============================] - 1s 125us/step - loss: 0.5758 - acc: 0.6964 - val_loss: 0.5873 - val_acc: 0.6940\nEpoch 8/50\n9000/9000 [==============================] - 1s 125us/step - loss: 0.5660 - acc: 0.7074 - val_loss: 0.5856 - val_acc: 0.6770\nEpoch 9/50\n 128/9000 [..............................] - ETA: 1s - loss: 0.5445 - acc: 0.7500" ] ], [ [ "## Plot Training and Validation Accuracies\n\nUse your matplotlib skills to give us a nice line graph of both training and validation accuracies as the number of epochs increases. Don't forget your legend, axis and plot title.", "_____no_output_____" ] ], [ [ "def train_val_metrics(epochs, model_training):\n epochs = range(1, epochs+1)\n metrics = model_training.history\n train_loss = metrics['loss']\n train_acc = metrics['acc']\n val_loss = metrics['val_loss']\n val_acc = metrics['val_acc']\n \n ax = plt.subplot(211)\n train, = ax.plot(epochs, train_loss)\n val, = ax.plot(epochs, val_loss)\n ax.legend([train, val], ['training', 'validation'])\n ax.set(xlabel='epochs', ylabel='categorical cross-entropy loss')\n\n ax2 = plt.subplot(212)\n train2, = ax2.plot(epochs, train_acc)\n val2, = ax2.plot(epochs, val_acc)\n ax2.legend([train2, val2], ['training', 'validation'])\n ax2.set(xlabel='epochs', ylabel='accuracy')", "_____no_output_____" ], [ "train_val_metrics(50, model1_training)", "_____no_output_____" ] ], [ [ "The model begins to overfit around epoch 20 or so. Early stopping would be useful here.", "_____no_output_____" ], [ "![something a little deeper](http://www.ryanleeallred.com/wp-content/uploads/2018/06/a-little-deeper.gif)", "_____no_output_____" ], [ "# Model #2\n\nLets add an additional set of convolutional->activation->pooling to this model:\n\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Max Pooling - pool_size = (2,2)\n* Dropout - use .25 for all layers but the final layer\n---\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Max Pooling - pool_size = (2,2)\n* Dropout - use .25 for all layers but the final layer\n---\n* Flatten\n* Fully-Connected (Dense)\n* Dropout - use .5 this time \n* Fully-Connected (Dense layer where # neurons = # final classes/labels)\n\nAgain, compile the model using categorical_crossentropy as your loss metric and use the Adam optimizer, and accuracy as your overall scoring metric. ", "_____no_output_____" ] ], [ [ "model2 = Sequential()\n\nmodel2.add(Conv2D(8, (3,3), activation='relu', input_shape=(3, 32, 32)))\nmodel2.add(Dropout(.25))\nmodel2.add(Conv2D(16, (3,3), activation='relu'))\nmodel2.add(Dropout(.25))\nmodel2.add(MaxPooling2D((2,2)))\n\nmodel2.add(Conv2D(16, (3,3), activation='relu', input_shape=(3, 32, 32)))\nmodel2.add(Dropout(.25))\nmodel2.add(Conv2D(32, (3,3), activation='relu'))\nmodel2.add(Dropout(.25))\nmodel2.add(MaxPooling2D((2,2)))\n\nmodel2.add(Flatten())\nmodel2.add(Dense(64, activation='relu'))\nmodel2.add(Dropout(0.5))\nmodel2.add(Dense(2, activation='softmax'))\n\nmodel2.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel2.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_3 (Conv2D) (None, 8, 30, 30) 224 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 8, 30, 30) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 16, 28, 28) 1168 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 16, 28, 28) 0 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 16, 14, 14) 0 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 16, 12, 12) 2320 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 16, 12, 12) 0 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 32, 10, 10) 4640 \n_________________________________________________________________\ndropout_7 (Dropout) (None, 32, 10, 10) 0 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 32, 5, 5) 0 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 800) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 64) 51264 \n_________________________________________________________________\ndropout_8 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 2) 130 \n=================================================================\nTotal params: 59,746\nTrainable params: 59,746\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "## Fit your model\n\nFit your model and save it to a new variable so that we can access the .history value to make a plot of our training and validation accuracies by epoch.", "_____no_output_____" ] ], [ [ "model2_training = model2.fit(x_train, y_train, epochs=50, batch_size=128, validation_split=0.1)", "Train on 9000 samples, validate on 1000 samples\nEpoch 1/50\n9000/9000 [==============================] - 2s 217us/step - loss: 0.6927 - acc: 0.5264 - val_loss: 0.6904 - val_acc: 0.5500\nEpoch 2/50\n9000/9000 [==============================] - 1s 157us/step - loss: 0.6847 - acc: 0.5568 - val_loss: 0.6871 - val_acc: 0.5400\nEpoch 3/50\n9000/9000 [==============================] - 1s 157us/step - loss: 0.6723 - acc: 0.5896 - val_loss: 0.6825 - val_acc: 0.5470\nEpoch 4/50\n9000/9000 [==============================] - 1s 157us/step - loss: 0.6583 - acc: 0.6091 - val_loss: 0.6738 - val_acc: 0.5680\nEpoch 5/50\n9000/9000 [==============================] - 1s 157us/step - loss: 0.6440 - acc: 0.6291 - val_loss: 0.6653 - val_acc: 0.5830\nEpoch 6/50\n9000/9000 [==============================] - 1s 157us/step - loss: 0.6270 - acc: 0.6439 - val_loss: 0.6216 - val_acc: 0.6670\nEpoch 7/50\n9000/9000 [==============================] - 1s 159us/step - loss: 0.6081 - acc: 0.6673 - val_loss: 0.6066 - val_acc: 0.6770\nEpoch 8/50\n 128/9000 [..............................] - ETA: 1s - loss: 0.5885 - acc: 0.6719" ] ], [ [ "## Plot Training and Validation Accuracies\n\nUse your matplotlib skills to give us a nice line graph of both training and validation accuracies as the number of epochs increases. Don't forget your legend, axis and plot title.", "_____no_output_____" ] ], [ [ "train_val_metrics(50, model2_training)", "_____no_output_____" ] ], [ [ "The model continues to find loss and accuracy improvements, suggesting that it could be trained for more epochs.", "_____no_output_____" ], [ "![We Need To Go Deeper](http://www.ryanleeallred.com/wp-content/uploads/2018/06/go-deeper.gif)", "_____no_output_____" ], [ "# Model #3\n\nFinally, one more set of convolutional/activation/pooling:\n\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Max Pooling - pool_size = (2,2)\n* Dropout - use .25 for all layers but the final layer\n---\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Max Pooling - pool_size = (2,2)\n* Dropout - use .25 for all layers but the final layer\n---\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Conv2D - kernel_size = (3,3)\n* Relu Activation\n* Max Pooling - pool_size = (2,2)\n* Dropout - use .25 for all layers but the final layer\n---\n\n* Flatten\n* Fully-Connected (Dense)\n* Dropout - use .5 this time \n* Fully-Connected (Dense layer where # neurons = # final classes/labels)\n\nAgain, compile the model using categorical_crossentropy as your loss metric and use the Adam optimizer, and accuracy as your overall scoring metric. ", "_____no_output_____" ] ], [ [ "model3 = Sequential()\n\nmodel3.add(Conv2D(8, (3,3), activation='relu', input_shape=(3, 32, 32)))\nmodel3.add(Dropout(.25))\nmodel3.add(Conv2D(16, (3,3), activation='relu'))\nmodel3.add(Dropout(.25))\nmodel3.add(MaxPooling2D((2,2), strides=1))\n\nmodel3.add(Conv2D(16, (3,3), activation='relu', input_shape=(3, 32, 32)))\nmodel3.add(Dropout(.25))\nmodel3.add(Conv2D(32, (3,3), activation='relu'))\nmodel3.add(Dropout(.25))\nmodel3.add(MaxPooling2D((2,2), strides=1))\n\nmodel3.add(Conv2D(32, (3,3), activation='relu', input_shape=(3, 32, 32)))\nmodel3.add(Dropout(.25))\nmodel3.add(Conv2D(64, (3,3), activation='relu'))\nmodel3.add(Dropout(.25))\nmodel3.add(MaxPooling2D(2,2))\n\nmodel3.add(Flatten())\nmodel3.add(Dense(128, activation='relu'))\nmodel3.add(Dropout(0.5))\nmodel3.add(Dense(2, activation='softmax'))\n\nmodel3.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel3.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_7 (Conv2D) (None, 8, 30, 30) 224 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 8, 30, 30) 0 \n_________________________________________________________________\nconv2d_8 (Conv2D) (None, 16, 28, 28) 1168 \n_________________________________________________________________\ndropout_10 (Dropout) (None, 16, 28, 28) 0 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 16, 27, 27) 0 \n_________________________________________________________________\nconv2d_9 (Conv2D) (None, 16, 25, 25) 2320 \n_________________________________________________________________\ndropout_11 (Dropout) (None, 16, 25, 25) 0 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 32, 23, 23) 4640 \n_________________________________________________________________\ndropout_12 (Dropout) (None, 32, 23, 23) 0 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 32, 22, 22) 0 \n_________________________________________________________________\nconv2d_11 (Conv2D) (None, 32, 20, 20) 9248 \n_________________________________________________________________\ndropout_13 (Dropout) (None, 32, 20, 20) 0 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 64, 18, 18) 18496 \n_________________________________________________________________\ndropout_14 (Dropout) (None, 64, 18, 18) 0 \n_________________________________________________________________\nmax_pooling2d_6 (MaxPooling2 (None, 64, 9, 9) 0 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 5184) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 128) 663680 \n_________________________________________________________________\ndropout_15 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_6 (Dense) (None, 2) 258 \n=================================================================\nTotal params: 700,034\nTrainable params: 700,034\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "## Fit your model\n\nFit your model and save it to a new variable so that we can access the .history value to make a plot of our training and validation accuracies by epoch.", "_____no_output_____" ] ], [ [ "model3_training = model3.fit(x_train, y_train, epochs=50, batch_size=128, validation_split=0.1)", "Train on 9000 samples, validate on 1000 samples\nEpoch 1/50\n9000/9000 [==============================] - 4s 405us/step - loss: 0.6998 - acc: 0.5192 - val_loss: 0.6933 - val_acc: 0.4940\nEpoch 2/50\n9000/9000 [==============================] - 3s 301us/step - loss: 0.6862 - acc: 0.5566 - val_loss: 0.6935 - val_acc: 0.4970\nEpoch 3/50\n9000/9000 [==============================] - 3s 304us/step - loss: 0.6779 - acc: 0.5866 - val_loss: 0.6897 - val_acc: 0.5170\nEpoch 4/50\n9000/9000 [==============================] - 3s 301us/step - loss: 0.6667 - acc: 0.6040 - val_loss: 0.6867 - val_acc: 0.5400\nEpoch 5/50\n9000/9000 [==============================] - 3s 300us/step - loss: 0.6582 - acc: 0.6148 - val_loss: 0.6790 - val_acc: 0.5610\nEpoch 6/50\n7808/9000 [=========================>....] - ETA: 0s - loss: 0.6455 - acc: 0.6297" ] ], [ [ "## Plot Training and Validation Accuracies\n\nUse your matplotlib skills to give us a nice line graph of both training and validation accuracies as the number of epochs increases. Don't forget your legend, axis and plot title.", "_____no_output_____" ] ], [ [ "train_val_metrics(50, model3_training)", "_____no_output_____" ] ], [ [ "# Stretch Goal:\n\n## Use other classes from Cifar10\nTry using different classes from the Cifar10 dataset or use all 10. You might need to sample the training data or limit the number of epochs if you decide to use the entire dataset due to processing constraints.\n\n## Hyperparameter Tune Your Model\nIf you have successfully complete shown how increasing the depth of a neural network can improve its accuracy, and you feel like you have a solid understanding of all of the different parts of CNNs, try hyperparameter tuning your strongest model to see how much additional accuracy you can squeeze out of it. This will also give you a chance to research the different hyperparameters as well as their significance/purpose. (There are lots and lots)\n\n---\n\nHere's a helpful article that will show you how to get started using GridSearch to hyperaparameter tune your CNN. (should you desire to use that method):\n\n[Grid Search Hyperparameters for Deep Learning Models in Python With Keras](https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6fbfe02d9a6bacab53d626bc0ce1c565b7d43f
7,197
ipynb
Jupyter Notebook
nb_ex11_1_qai_qc.ipynb
jskDr/keraspp_2021
dc46ebb4f4dea48612135136c9837da7c246534a
[ "MIT" ]
4
2021-09-21T15:35:04.000Z
2021-12-14T12:14:44.000Z
nb_ex11_1_qai_qc.ipynb
jskDr/keraspp_2021
dc46ebb4f4dea48612135136c9837da7c246534a
[ "MIT" ]
null
null
null
nb_ex11_1_qai_qc.ipynb
jskDr/keraspp_2021
dc46ebb4f4dea48612135136c9837da7c246534a
[ "MIT" ]
null
null
null
23.366883
64
0.494095
[ [ [ "# 11장. 케라스로 구현하는 QAI(양자인공지능)\n## 11.2 양자컴퓨팅 알고리즘 구현", "_____no_output_____" ], [ "- Cirq 라이브러리를 파이썬 환경으로 불러오기", "_____no_output_____" ] ], [ [ "import cirq", "_____no_output_____" ] ], [ [ "### 11.2.1 기본 양자 회로 만들기 ", "_____no_output_____" ] ], [ [ "# 양자비트와 양자회로 만들기\nq = cirq.NamedQubit('My Qubit')\ncircuit = cirq.Circuit(cirq.measure(q))\nprint(circuit)\n\n# 만들어진 양자회로를 시뮬레이션을 통해 어떤 결과가 만들어지는지 확인\nsimulator = cirq.Simulator()\nm_outputs = simulator.run(circuit, repetitions=3)\nprint(m_outputs.measurements)", "My Qubit: ───M───\n{'My Qubit': array([[0],\n [0],\n [0]], dtype=int8)}\n" ] ], [ [ "### 11.2.2 입력을 반전시키는 양자 회로", "_____no_output_____" ] ], [ [ "q = cirq.NamedQubit('My Qubit')\ncircuit = cirq.Circuit(cirq.X(q), cirq.measure(q))\nprint(circuit)\n\nsimulator = cirq.Simulator()\nm_outputs = simulator.run(circuit, repetitions=10)\nprint(m_outputs.measurements['My Qubit'][:,0])", "My Qubit: ───X───M───\n[1 1 1 1 1 1 1 1 1 1]\n" ] ], [ [ "### 11.2.3 두 상태를 중첩하는 양자회로", "_____no_output_____" ] ], [ [ "import numpy as np\n\nq = cirq.NamedQubit('My Qubit')\ncircuit = cirq.Circuit(cirq.H(q), cirq.measure(q))\nprint(circuit)\n\nsimulator = cirq.Simulator()\nm_outputs = simulator.run(circuit, repetitions=10)\nresults = m_outputs.measurements['My Qubit'][:,0]\nprint('Results=',results,' Average=',np.mean(results))\n\n# 충분히 반복하게되면 평균이 0.5에 더 가까워지는지 확인하기 위해 1000번 측정\nm_outputs = simulator.run(circuit, repetitions=1000)\nresults = m_outputs.measurements['My Qubit'][:,0]\nprint('Average for 100 measurements=',np.mean(results))", "My Qubit: ───H───M───\nResults= [0 1 1 0 1 0 1 1 1 1] Average= 0.7\nAverage for 100 measurements= 0.507\n" ] ], [ [ "### 11.2.4 두 개 양자비트를 위한 계산 예: CNOT 연산", "_____no_output_____" ], [ "- 두 양자비트의 초기 상태가 |00>인 경우", "_____no_output_____" ] ], [ [ "q = [cirq.GridQubit(i, 0) for i in range(2)]\nprint(q[0], q[1])\n\ncircuit = cirq.Circuit()\ncircuit.append(cirq.CNOT(q[0], q[1]))\nprint(circuit)\ncircuit.append([cirq.measure(q[0]),cirq.measure(q[1])])\nprint(circuit)\n\nsimulator = cirq.Simulator()\nm_outputs = simulator.run(circuit, repetitions=10)\nprint(m_outputs)", "(0, 0) (1, 0)\n(0, 0): ───@───\n │\n(1, 0): ───X───\n(0, 0): ───@───M───\n │\n(1, 0): ───X───M───\n(0, 0)=0000000000\n(1, 0)=0000000000\n" ] ], [ [ "- 두 양자비트의 초기 상태가 |10>인 경우", "_____no_output_____" ] ], [ [ "circuit = cirq.Circuit(cirq.X(q[0]))\ncircuit.append(cirq.CNOT(q[0], q[1]))\ncircuit.append([cirq.measure(q[0]),cirq.measure(q[1])])\nprint(circuit)\n\nsimulator = cirq.Simulator()\nm_outputs = simulator.run(circuit, repetitions=10)\nprint(m_outputs)", "(0, 0): ───X───@───M───\n │\n(1, 0): ───────X───M───\n(0, 0)=1111111111\n(1, 0)=1111111111\n" ] ], [ [ "### 11.2.5 벨 상태 만들기", "_____no_output_____" ] ], [ [ "q = [cirq.GridQubit(i, 0) for i in range(2)]\ncircuit = cirq.Circuit()\ncircuit.append(cirq.H(q[0]))\nprint(circuit)\ncircuit.append(cirq.CNOT(q[0], q[1]))\nprint(circuit)\ncircuit.append([cirq.measure(q[0]),cirq.measure(q[1])])\nprint(circuit)\n\nsimulator = cirq.Simulator()\nm_outputs = simulator.run(circuit, repetitions=10)\nprint(m_outputs)", "(0, 0): ───H───\n(0, 0): ───H───@───\n │\n(1, 0): ───────X───\n(0, 0): ───H───@───M───\n │\n(1, 0): ───────X───M───\n(0, 0)=0111101100\n(1, 0)=0111101100\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6fc5fd09fe8ae85bdbacb42631f72ee54b3a5c
389,542
ipynb
Jupyter Notebook
site/en/r2/guide/keras/functional.ipynb
shawnkoon/docs
c13cd44cfab572fe5a7111afd60bb0bfd9596039
[ "Apache-2.0" ]
9
2019-04-07T05:14:52.000Z
2020-02-10T15:33:21.000Z
site/en/r2/guide/keras/functional.ipynb
shawnkoon/docs
c13cd44cfab572fe5a7111afd60bb0bfd9596039
[ "Apache-2.0" ]
null
null
null
site/en/r2/guide/keras/functional.ipynb
shawnkoon/docs
c13cd44cfab572fe5a7111afd60bb0bfd9596039
[ "Apache-2.0" ]
3
2019-07-06T07:41:57.000Z
2019-11-13T05:57:20.000Z
199.050588
197,102
0.870165
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# The Keras Functional API in TensorFlow", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/alpha/guide/keras/functional\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/keras/functional.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/keras/functional.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "!pip install pydot\n!apt-get install graphviz", "Requirement already satisfied: pydot in /usr/local/lib/python3.6/dist-packages (1.3.0)\nRequirement already satisfied: pyparsing>=2.1.4 in /usr/local/lib/python3.6/dist-packages (from pydot) (2.3.1)\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\ngraphviz is already the newest version (2.40.1-2).\n0 upgraded, 0 newly installed, 0 to remove and 10 not upgraded.\n" ], [ "from __future__ import absolute_import, division, print_function\n\n!pip install tensorflow-gpu==2.0.0-alpha0\nimport tensorflow as tf\n\ntf.keras.backend.clear_session() # For easy reset of notebook state.", "_____no_output_____" ] ], [ [ "\n## Introduction\n\nYou're already familiar with the use of `keras.Sequential()` to create models.\nThe Functional API is a way to create models that is more flexible than `Sequential`:\nit can handle models with non-linear topology, models with shared layers,\nand models with multiple inputs or outputs.\n\nIt's based on the idea that a deep learning model\nis usually a directed acyclic graph (DAG) of layers.\nThe Functional API a set of tools for **building graphs of layers**.\n\nConsider the following model: \n\n```\n(input: 784-dimensional vectors)\n ↧\n[Dense (64 units, relu activation)]\n ↧\n[Dense (64 units, relu activation)]\n ↧\n[Dense (10 units, softmax activation)]\n ↧\n(output: probability distribution over 10 classes)\n```\n\nIt's a simple graph of 3 layers.\n\nTo build this model with the functional API,\nyou would start by creating an input node:", "_____no_output_____" ] ], [ [ "from tensorflow import keras\n\ninputs = keras.Input(shape=(784,))", "_____no_output_____" ] ], [ [ "Here we just specify the shape of our data: 784-dimensional vectors.\nNone that the batch size is always omitted, we only specify the shape of each sample.\nFor an input meant for images of shape `(32, 32, 3)`, we would have used:", "_____no_output_____" ] ], [ [ "img_inputs = keras.Input(shape=(32, 32, 3))", "_____no_output_____" ] ], [ [ "What gets returned, `inputs`, contains information about the shape and dtype of the\ninput data that you expect to feed to your model:", "_____no_output_____" ] ], [ [ "inputs.shape", "_____no_output_____" ], [ "inputs.dtype", "_____no_output_____" ] ], [ [ "You create a new node in the graph of layers by calling a layer on this `inputs` object:", "_____no_output_____" ] ], [ [ "from tensorflow.keras import layers\n\ndense = layers.Dense(64, activation='relu')\nx = dense(inputs)", "_____no_output_____" ] ], [ [ "The \"layer call\" action is like drawing an arrow from \"inputs\" to this layer we created.\nWe're \"passing\" the inputs to the `dense` layer, and out we get `x`.\n\nLet's add a few more layers to our graph of layers:", "_____no_output_____" ] ], [ [ "x = layers.Dense(64, activation='relu')(x)\noutputs = layers.Dense(10, activation='softmax')(x)", "_____no_output_____" ] ], [ [ "At this point, we can create a `Model` by specifying its inputs and outputs in the graph of layers:", "_____no_output_____" ] ], [ [ "model = keras.Model(inputs=inputs, outputs=outputs)", "_____no_output_____" ] ], [ [ "To recap, here is our full model definition process:", "_____no_output_____" ] ], [ [ "inputs = keras.Input(shape=(784,), name='img')\nx = layers.Dense(64, activation='relu')(inputs)\nx = layers.Dense(64, activation='relu')(x)\noutputs = layers.Dense(10, activation='softmax')(x)\n\nmodel = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')", "_____no_output_____" ] ], [ [ "Let's check out what the model summary looks like:", "_____no_output_____" ] ], [ [ "model.summary()", "Model: \"mnist_model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nimg (InputLayer) [(None, 784)] 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 64) 50240 \n_________________________________________________________________\ndense_4 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_5 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 55,050\nTrainable params: 55,050\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "We can also plot the model as a graph:", "_____no_output_____" ] ], [ [ "keras.utils.plot_model(model, 'my_first_model.png')", "_____no_output_____" ] ], [ [ "And optionally display the input and output shapes of each layer in the plotted graph:", "_____no_output_____" ] ], [ [ "keras.utils.plot_model(model, 'my_first_model_with_shape_info.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "\nThis figure and the code we wrote are virtually identical. In the code version,\nthe connection arrows are simply replaced by the call operation.\n\nA \"graph of layers\" is a very intuitive mental image for a deep learning model,\nand the functional API is a way to create models that closely mirrors this mental image.", "_____no_output_____" ], [ "\n\n## Training, evaluation, and inference\n\nTraining, evaluation, and inference work exactly in the same way for models built\nusing the Functional API as for Sequential models.\n\nHere is a quick demonstration.\n\nHere we load MNIST image data, reshape it into vectors,\nfit the model on the data (while monitoring performance on a validation split),\nand finally we evaluate our model on the test data:", "_____no_output_____" ] ], [ [ "(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\nx_train = x_train.reshape(60000, 784).astype('float32') / 255\nx_test = x_test.reshape(10000, 784).astype('float32') / 255\n\nmodel.compile(loss='sparse_categorical_crossentropy',\n optimizer=keras.optimizers.RMSprop(),\n metrics=['accuracy'])\nhistory = model.fit(x_train, y_train,\n batch_size=64,\n epochs=5,\n validation_split=0.2)\ntest_scores = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', test_scores[0])\nprint('Test accuracy:', test_scores[1])", "Train on 48000 samples, validate on 12000 samples\nEpoch 1/5\n48000/48000 [==============================] - 3s 64us/sample - loss: 0.3414 - accuracy: 0.9016 - val_loss: 0.1719 - val_accuracy: 0.9501\nEpoch 2/5\n48000/48000 [==============================] - 3s 57us/sample - loss: 0.1568 - accuracy: 0.9526 - val_loss: 0.1365 - val_accuracy: 0.9605\nEpoch 3/5\n48000/48000 [==============================] - 3s 58us/sample - loss: 0.1144 - accuracy: 0.9660 - val_loss: 0.1262 - val_accuracy: 0.9625\nEpoch 4/5\n48000/48000 [==============================] - 3s 54us/sample - loss: 0.0929 - accuracy: 0.9716 - val_loss: 0.1100 - val_accuracy: 0.9701\nEpoch 5/5\n48000/48000 [==============================] - 3s 55us/sample - loss: 0.0759 - accuracy: 0.9770 - val_loss: 0.1139 - val_accuracy: 0.9670\nTest loss: 0.100577776569454\nTest accuracy: 0.9696\n" ] ], [ [ "For a complete guide about model training and evaluation, see [Guide to Training & Evaluation](./training_and_evaluation.ipynb).", "_____no_output_____" ], [ "## Saving and serialization\n\nSaving and serialization work exactly in the same way for models built\nusing the Functional API as for Sequential models.\n\nTo standard way to save a Functional model is to call `model.save()` to save the whole model into a single file.\nYou can later recreate the same model from this file, even if you no longer have access to the code\nthat created the model.\n\nThis file includes:\n- The model's architecture\n- The model's weight values (which were learned during training)\n- The model's training config (what you passed to `compile`), if any\n- The optimizer and its state, if any (this enables you to restart training where you left off)", "_____no_output_____" ] ], [ [ "model.save('path_to_my_model.h5')\ndel model\n# Recreate the exact same model purely from the file:\nmodel = keras.models.load_model('path_to_my_model.h5')", "_____no_output_____" ] ], [ [ "For a complete guide about model saving, see [Guide to Saving and Serializing Models](./saving_and_serializing.ipynb).", "_____no_output_____" ], [ "## Using the same graph of layers to define multiple models\n\n\nIn the functional API, models are created by specifying their inputs\nand outputs in a graph of layers. That means that a single graph of layers\ncan be used to generate multiple models.\n\nIn the example below, we use the same stack of layers to instantiate two models:\nan `encoder` model that turns image inputs into 16-dimensional vectors,\nand an end-to-end `autoencoder` model for training.\n\n", "_____no_output_____" ] ], [ [ "encoder_input = keras.Input(shape=(28, 28, 1), name='img')\nx = layers.Conv2D(16, 3, activation='relu')(encoder_input)\nx = layers.Conv2D(32, 3, activation='relu')(x)\nx = layers.MaxPooling2D(3)(x)\nx = layers.Conv2D(32, 3, activation='relu')(x)\nx = layers.Conv2D(16, 3, activation='relu')(x)\nencoder_output = layers.GlobalMaxPooling2D()(x)\n\nencoder = keras.Model(encoder_input, encoder_output, name='encoder')\nencoder.summary()\n\nx = layers.Reshape((4, 4, 1))(encoder_output)\nx = layers.Conv2DTranspose(16, 3, activation='relu')(x)\nx = layers.Conv2DTranspose(32, 3, activation='relu')(x)\nx = layers.UpSampling2D(3)(x)\nx = layers.Conv2DTranspose(16, 3, activation='relu')(x)\ndecoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)\n\nautoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder')\nautoencoder.summary()", "Model: \"encoder\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nimg (InputLayer) [(None, 28, 28, 1)] 0 \n_________________________________________________________________\nconv2d (Conv2D) (None, 26, 26, 16) 160 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 24, 24, 32) 4640 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 8, 8, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 6, 6, 32) 9248 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 4, 4, 16) 4624 \n_________________________________________________________________\nglobal_max_pooling2d (Global (None, 16) 0 \n=================================================================\nTotal params: 18,672\nTrainable params: 18,672\nNon-trainable params: 0\n_________________________________________________________________\nModel: \"autoencoder\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nimg (InputLayer) [(None, 28, 28, 1)] 0 \n_________________________________________________________________\nconv2d (Conv2D) (None, 26, 26, 16) 160 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 24, 24, 32) 4640 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 8, 8, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 6, 6, 32) 9248 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 4, 4, 16) 4624 \n_________________________________________________________________\nglobal_max_pooling2d (Global (None, 16) 0 \n_________________________________________________________________\nreshape (Reshape) (None, 4, 4, 1) 0 \n_________________________________________________________________\nconv2d_transpose (Conv2DTran (None, 6, 6, 16) 160 \n_________________________________________________________________\nconv2d_transpose_1 (Conv2DTr (None, 8, 8, 32) 4640 \n_________________________________________________________________\nup_sampling2d (UpSampling2D) (None, 24, 24, 32) 0 \n_________________________________________________________________\nconv2d_transpose_2 (Conv2DTr (None, 26, 26, 16) 4624 \n_________________________________________________________________\nconv2d_transpose_3 (Conv2DTr (None, 28, 28, 1) 145 \n=================================================================\nTotal params: 28,241\nTrainable params: 28,241\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "Note that we make the decoding architecture strictly symmetrical to the encoding architecture,\nso that we get an output shape that is the same as the input shape `(28, 28, 1)`.\nThe reverse of a `Conv2D` layer is a `Conv2DTranspose` layer, and the reverse of a `MaxPooling2D`\nlayer is an `UpSampling2D` layer.", "_____no_output_____" ], [ "\n## All models are callable, just like layers\n\nYou can treat any model as if it were a layer, by calling it on an `Input` or on the output of another layer.\nNote that by calling a model you aren't just reusing the architecture of the model, you're also reusing its weights.\n\nLet's see this in action. Here's a different take on the autoencoder example that creates an encoder model, a decoder model,\nand chain them in two calls to obtain the autoencoder model:", "_____no_output_____" ] ], [ [ "encoder_input = keras.Input(shape=(28, 28, 1), name='original_img')\nx = layers.Conv2D(16, 3, activation='relu')(encoder_input)\nx = layers.Conv2D(32, 3, activation='relu')(x)\nx = layers.MaxPooling2D(3)(x)\nx = layers.Conv2D(32, 3, activation='relu')(x)\nx = layers.Conv2D(16, 3, activation='relu')(x)\nencoder_output = layers.GlobalMaxPooling2D()(x)\n\nencoder = keras.Model(encoder_input, encoder_output, name='encoder')\nencoder.summary()\n\ndecoder_input = keras.Input(shape=(16,), name='encoded_img')\nx = layers.Reshape((4, 4, 1))(decoder_input)\nx = layers.Conv2DTranspose(16, 3, activation='relu')(x)\nx = layers.Conv2DTranspose(32, 3, activation='relu')(x)\nx = layers.UpSampling2D(3)(x)\nx = layers.Conv2DTranspose(16, 3, activation='relu')(x)\ndecoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)\n\ndecoder = keras.Model(decoder_input, decoder_output, name='decoder')\ndecoder.summary()\n\nautoencoder_input = keras.Input(shape=(28, 28, 1), name='img')\nencoded_img = encoder(autoencoder_input)\ndecoded_img = decoder(encoded_img)\nautoencoder = keras.Model(autoencoder_input, decoded_img, name='autoencoder')\nautoencoder.summary()", "Model: \"encoder\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\noriginal_img (InputLayer) [(None, 28, 28, 1)] 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 26, 26, 16) 160 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 24, 24, 32) 4640 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 8, 8, 32) 0 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 6, 6, 32) 9248 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 4, 4, 16) 4624 \n_________________________________________________________________\nglobal_max_pooling2d_1 (Glob (None, 16) 0 \n=================================================================\nTotal params: 18,672\nTrainable params: 18,672\nNon-trainable params: 0\n_________________________________________________________________\nModel: \"decoder\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nencoded_img (InputLayer) [(None, 16)] 0 \n_________________________________________________________________\nreshape_1 (Reshape) (None, 4, 4, 1) 0 \n_________________________________________________________________\nconv2d_transpose_4 (Conv2DTr (None, 6, 6, 16) 160 \n_________________________________________________________________\nconv2d_transpose_5 (Conv2DTr (None, 8, 8, 32) 4640 \n_________________________________________________________________\nup_sampling2d_1 (UpSampling2 (None, 24, 24, 32) 0 \n_________________________________________________________________\nconv2d_transpose_6 (Conv2DTr (None, 26, 26, 16) 4624 \n_________________________________________________________________\nconv2d_transpose_7 (Conv2DTr (None, 28, 28, 1) 145 \n=================================================================\nTotal params: 9,569\nTrainable params: 9,569\nNon-trainable params: 0\n_________________________________________________________________\nModel: \"autoencoder\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nimg (InputLayer) [(None, 28, 28, 1)] 0 \n_________________________________________________________________\nencoder (Model) (None, 16) 18672 \n_________________________________________________________________\ndecoder (Model) (None, 28, 28, 1) 9569 \n=================================================================\nTotal params: 28,241\nTrainable params: 28,241\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "As you can see, model can be nested: a model can contain submodels (since a model is just like a layer).\n\nA common use case for model nesting is *ensembling*.\nAs an example, here's how to ensemble a set of models into a single model that averages their predictions:", "_____no_output_____" ] ], [ [ "def get_model():\n inputs = keras.Input(shape=(128,))\n outputs = layers.Dense(1, activation='sigmoid')(inputs)\n return keras.Model(inputs, outputs)\n\nmodel1 = get_model()\nmodel2 = get_model()\nmodel3 = get_model()\n\ninputs = keras.Input(shape=(128,))\ny1 = model1(inputs)\ny2 = model2(inputs)\ny3 = model3(inputs)\noutputs = layers.average([y1, y2, y3])\nensemble_model = keras.Model(inputs=inputs, outputs=outputs)", "_____no_output_____" ] ], [ [ "## Manipulating complex graph topologies\n\n\n### Models with multiple inputs and outputs\n\nThe functional API makes it easy to manipulate multiple inputs and outputs.\nThis cannot be handled with the Sequential API.\n\nHere's a simple example.\n\nLet's say you're building a system for ranking custom issue tickets by priority and routing them to the right department.\n\nYou model will have 3 inputs:\n\n- Title of the ticket (text input)\n- Text body of the ticket (text input)\n- Any tags added by the user (categorical input)\n\nIt will have two outputs:\n\n- Priority score between 0 and 1 (scalar sigmoid output)\n- The department that should handle the ticket (softmax output over the set of departments)\n\nLet's built this model in a few lines with the Functional API.", "_____no_output_____" ] ], [ [ "num_tags = 12 # Number of unique issue tags\nnum_words = 10000 # Size of vocabulary obtained when preprocessing text data\nnum_departments = 4 # Number of departments for predictions\n\ntitle_input = keras.Input(shape=(None,), name='title') # Variable-length sequence of ints\nbody_input = keras.Input(shape=(None,), name='body') # Variable-length sequence of ints\ntags_input = keras.Input(shape=(num_tags,), name='tags') # Binary vectors of size `num_tags`\n\n# Embed each word in the title into a 64-dimensional vector\ntitle_features = layers.Embedding(num_words, 64)(title_input)\n# Embed each word in the text into a 64-dimensional vector\nbody_features = layers.Embedding(num_words, 64)(body_input)\n\n# Reduce sequence of embedded words in the title into a single 128-dimensional vector\ntitle_features = layers.LSTM(128)(title_features)\n# Reduce sequence of embedded words in the body into a single 32-dimensional vector\nbody_features = layers.LSTM(32)(body_features)\n\n# Merge all available features into a single large vector via concatenation\nx = layers.concatenate([title_features, body_features, tags_input])\n\n# Stick a logistic regression for priority prediction on top of the features\npriority_pred = layers.Dense(1, activation='sigmoid', name='priority')(x)\n# Stick a department classifier on top of the features\ndepartment_pred = layers.Dense(num_departments, activation='softmax', name='department')(x)\n\n# Instantiate an end-to-end model predicting both priority and department\nmodel = keras.Model(inputs=[title_input, body_input, tags_input],\n outputs=[priority_pred, department_pred])", "_____no_output_____" ] ], [ [ "Let's plot the model:", "_____no_output_____" ] ], [ [ "keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "When compiling this model, we can assign different losses to each output.\nYou can even assign different weights to each loss, to modulate their\ncontribution to the total training loss.", "_____no_output_____" ] ], [ [ "model.compile(optimizer=keras.optimizers.RMSprop(1e-3),\n loss=['binary_crossentropy', 'categorical_crossentropy'],\n loss_weights=[1., 0.2])", "_____no_output_____" ] ], [ [ "Since we gave names to our output layers, we could also specify the loss like this:", "_____no_output_____" ] ], [ [ "model.compile(optimizer=keras.optimizers.RMSprop(1e-3),\n loss={'priority': 'binary_crossentropy',\n 'department': 'categorical_crossentropy'},\n loss_weights=[1., 0.2])", "_____no_output_____" ] ], [ [ "We can train the model by passing lists of Numpy arrays of inputs and targets:", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# Dummy input data\ntitle_data = np.random.randint(num_words, size=(1280, 10))\nbody_data = np.random.randint(num_words, size=(1280, 100))\ntags_data = np.random.randint(2, size=(1280, num_tags)).astype('float32')\n# Dummy target data\npriority_targets = np.random.random(size=(1280, 1))\ndept_targets = np.random.randint(2, size=(1280, num_departments))\n\nmodel.fit({'title': title_data, 'body': body_data, 'tags': tags_data},\n {'priority': priority_targets, 'department': dept_targets},\n epochs=2,\n batch_size=32)", "Epoch 1/2\n1280/1280 [==============================] - 11s 9ms/sample - loss: 1.2694 - priority_loss: 0.6984 - department_loss: 2.8547\nEpoch 2/2\n1280/1280 [==============================] - 11s 9ms/sample - loss: 1.2137 - priority_loss: 0.6489 - department_loss: 2.8242\n" ] ], [ [ "When calling fit with a `Dataset` object, it should yield either a\ntuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])`\nor a tuple of dictionaries like\n`({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`.\n\nFor more detailed explanation, refer to the complete guide [Guide to Training & Evaluation](./training_and_evaluation.ipynb).", "_____no_output_____" ], [ "### A toy resnet model\n\nIn addition to models with multiple inputs and outputs,\nthe Functional API makes it easy to manipulate non-linear connectivity topologies,\nthat is to say, models where layers are not connected sequentially.\nThis also cannot be handled with the Sequential API (as the name indicates).\n\nA common use case for this is residual connections.\n\nLet's build a toy ResNet model for CIFAR10 to demonstrate this.", "_____no_output_____" ] ], [ [ "inputs = keras.Input(shape=(32, 32, 3), name='img')\nx = layers.Conv2D(32, 3, activation='relu')(inputs)\nx = layers.Conv2D(64, 3, activation='relu')(x)\nblock_1_output = layers.MaxPooling2D(3)(x)\n\nx = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output)\nx = layers.Conv2D(64, 3, activation='relu', padding='same')(x)\nblock_2_output = layers.add([x, block_1_output])\n\nx = layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output)\nx = layers.Conv2D(64, 3, activation='relu', padding='same')(x)\nblock_3_output = layers.add([x, block_2_output])\n\nx = layers.Conv2D(64, 3, activation='relu')(block_3_output)\nx = layers.GlobalAveragePooling2D()(x)\nx = layers.Dense(256, activation='relu')(x)\nx = layers.Dropout(0.5)(x)\noutputs = layers.Dense(10, activation='softmax')(x)\n\nmodel = keras.Model(inputs, outputs, name='toy_resnet')\nmodel.summary()", "Model: \"toy_resnet\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nimg (InputLayer) [(None, 32, 32, 3)] 0 \n__________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 30, 30, 32) 896 img[0][0] \n__________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 28, 28, 64) 18496 conv2d_8[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 9, 9, 64) 0 conv2d_9[0][0] \n__________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 9, 9, 64) 36928 max_pooling2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 9, 9, 64) 36928 conv2d_10[0][0] \n__________________________________________________________________________________________________\nadd (Add) (None, 9, 9, 64) 0 conv2d_11[0][0] \n max_pooling2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 9, 9, 64) 36928 add[0][0] \n__________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 9, 9, 64) 36928 conv2d_12[0][0] \n__________________________________________________________________________________________________\nadd_1 (Add) (None, 9, 9, 64) 0 conv2d_13[0][0] \n add[0][0] \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 7, 7, 64) 36928 add_1[0][0] \n__________________________________________________________________________________________________\nglobal_average_pooling2d (Globa (None, 64) 0 conv2d_14[0][0] \n__________________________________________________________________________________________________\ndense_9 (Dense) (None, 256) 16640 global_average_pooling2d[0][0] \n__________________________________________________________________________________________________\ndropout (Dropout) (None, 256) 0 dense_9[0][0] \n__________________________________________________________________________________________________\ndense_10 (Dense) (None, 10) 2570 dropout[0][0] \n==================================================================================================\nTotal params: 223,242\nTrainable params: 223,242\nNon-trainable params: 0\n__________________________________________________________________________________________________\n" ] ], [ [ "Let's plot the model:", "_____no_output_____" ] ], [ [ "keras.utils.plot_model(model, 'mini_resnet.png', show_shapes=True)", "_____no_output_____" ] ], [ [ "Let's train it:", "_____no_output_____" ] ], [ [ "(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\ny_train = keras.utils.to_categorical(y_train, 10)\ny_test = keras.utils.to_categorical(y_test, 10)\n\nmodel.compile(optimizer=keras.optimizers.RMSprop(1e-3),\n loss='categorical_crossentropy',\n metrics=['acc'])\nmodel.fit(x_train, y_train,\n batch_size=64,\n epochs=1,\n validation_split=0.2)", "Train on 40000 samples, validate on 10000 samples\n40000/40000 [==============================] - 318s 8ms/sample - loss: 1.9034 - acc: 0.2767 - val_loss: 1.6173 - val_acc: 0.3870\n" ] ], [ [ "## Sharing layers\n\nAnother good use for the functional API are models that use shared layers. Shared layers are layer instances that get reused multiple times in a same model: they learn features that correspond to multiple paths in the graph-of-layers.\n\nShared layers are often used to encode inputs that come from similar spaces (say, two different pieces of text that feature similar vocabulary), since they enable sharing of information across these different inputs, and they make it possible to train such a model on less data. If a given word is seen in one of the inputs, that will benefit the processing of all inputs that go through the shared layer.\n\nTo share a layer in the Functional API, just call the same layer instance multiple times. For instance, here's an `Embedding` layer shared across two different text inputs:", "_____no_output_____" ] ], [ [ "# Embedding for 1000 unique words mapped to 128-dimensional vectors\nshared_embedding = layers.Embedding(1000, 128)\n\n# Variable-length sequence of integers\ntext_input_a = keras.Input(shape=(None,), dtype='int32')\n\n# Variable-length sequence of integers\ntext_input_b = keras.Input(shape=(None,), dtype='int32')\n\n# We reuse the same layer to encode both inputs\nencoded_input_a = shared_embedding(text_input_a)\nencoded_input_b = shared_embedding(text_input_b)", "_____no_output_____" ] ], [ [ "## Extracting and reusing nodes in the graph of layers", "_____no_output_____" ], [ "Because the graph of layers you are manipulating in the Functional API is a static datastructure, it can be accessed and inspected. This is how we are able to plot Functional models as images, for instance.\n\nThis also means that we can access the activations of intermediate layers (\"nodes\" in the graph) and reuse them elsewhere. This is extremely useful for feature extraction, for example!\n\nLet's look at an example. This is a VGG19 model with weights pre-trained on ImageNet:", "_____no_output_____" ] ], [ [ "from tensorflow.keras.applications import VGG19\n\nvgg19 = VGG19()", "Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5\n574717952/574710816 [==============================] - 6s 0us/step\n" ] ], [ [ "And these are the intermediate activations of the model, obtained by querying the graph datastructure:", "_____no_output_____" ] ], [ [ "features_list = [layer.output for layer in vgg19.layers]", "_____no_output_____" ] ], [ [ "We can use these features to create a new feature-extraction model, that returns the values of the intermediate layer activations -- and we can do all of this in 3 lines.", "_____no_output_____" ] ], [ [ "feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)\n\nimg = np.random.random((1, 224, 224, 3)).astype('float32')\nextracted_features = feat_extraction_model(img)", "_____no_output_____" ] ], [ [ "This comes in handy when [implementing neural style transfer](https://medium.com/tensorflow/neural-style-transfer-creating-art-with-deep-learning-using-tf-keras-and-eager-execution-7d541ac31398), among other things.", "_____no_output_____" ], [ "## Extending the API by writing custom layers\n\ntf.keras has a wide range of built-in layers. Here are a few examples:\n\n- Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`, etc.\n- Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`, etc.\n- RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`, etc.\n- `BatchNormalization`, `Dropout`, `Embedding`, etc.\n\nIf you don't find what you need, it's easy to extend the API by creating your own layers.\n\nAll layers subclass the `Layer` class and implement:\n- A `call` method, that specifies the computation done by the layer.\n- A `build` method, that creates the weights of the layer (note that this is just a style convention; you could create weights in `__init__` as well).\n\nTo learn more about creating layers from scratch, check out the guide [Guide to writing layers and models from scratch](./custom_layers_and_models.ipynb).\n\nHere's a simple implementation of a `Dense` layer:", "_____no_output_____" ] ], [ [ "class CustomDense(layers.Layer):\n\n def __init__(self, units=32):\n super(CustomDense, self).__init__()\n self.units = units\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)\n\n def call(self, inputs):\n return tf.matmul(inputs, self.w) + self.b\n \ninputs = keras.Input((4,))\noutputs = CustomDense(10)(inputs)\n\nmodel = keras.Model(inputs, outputs)", "_____no_output_____" ] ], [ [ "If you want your custom layer to support serialization, you should also define a `get_config` method,\nthat returns the constructor arguments of the layer instance:", "_____no_output_____" ] ], [ [ "class CustomDense(layers.Layer):\n\n def __init__(self, units=32):\n super(CustomDense, self).__init__()\n self.units = units\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)\n\n def call(self, inputs):\n return tf.matmul(inputs, self.w) + self.b\n\n def get_config(self):\n return {'units': self.units}\n \n \ninputs = keras.Input((4,))\noutputs = CustomDense(10)(inputs)\n\nmodel = keras.Model(inputs, outputs)\nconfig = model.get_config()\n\nnew_model = keras.Model.from_config(\n config, custom_objects={'CustomDense': CustomDense})", "_____no_output_____" ] ], [ [ "Optionally, you could also implement the classmethod `from_config(cls, config)`, which is in charge of recreating a layer instance given its config dictionary. The default implementation of `from_config` is:\n\n```python\ndef from_config(cls, config):\n return cls(**config)\n```", "_____no_output_____" ], [ "## When to use the Functional API\n\nHow to decide whether to use the Functional API to create a new model, or just subclass the `Model` class directly?\n\nIn general, the Functional API is higher-level, easier & safer to use, and has a number of features that subclassed Models do not support.\n\nHowever, Model subclassing gives you greater flexibility when creating models that are not easily expressible as directed acyclic graphs of layers (for instance, you could not implement a Tree-RNN with the Functional API, you would have to subclass `Model` directly).\n\n\n### Here are the strengths of the Functional API:\n\nThe properties listed below are all true for Sequential models as well (which are also data structures), but they aren't true for subclassed models (which are Python bytecode, not data structures).\n\n\n#### It is less verbose.\n\nNo `super(MyClass, self).__init__(...)`, no `def call(self, ...):`, etc.\n\nCompare:\n\n```python\ninputs = keras.Input(shape=(32,))\nx = layers.Dense(64, activation='relu')(inputs)\noutputs = layers.Dense(10)(x)\nmlp = keras.Model(inputs, outputs)\n```\n\nWith the subclassed version:\n\n```python\nclass MLP(keras.Model):\n \n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.dense_1 = layers.Dense(64, activation='relu')\n self.dense_2 = layers.Dense(10)\n \n def call(self, inputs):\n x = self.dense_1(inputs)\n return self.dense_2(x)\n \n# Instantiate the model.\nmlp = MLP()\n# Necessary to create the model's state.\n# The model doesn't have a state until it's called at least once.\n_ = mlp(tf.zeros((1, 32)))\n```\n\n\n#### It validates your model while you're defining it.\n\nIn the Functional API, your input specification (shape and dtype) is created in advance (via `Input`), and every time you call a layer, the layer checks that the specification passed to it matches its assumptions, and it will raise a helpful error message if not.\n\nThis guarantees that any model you can build with the Functional API will run. All debugging (other than convergence-related debugging) will happen statically during the model construction, and not at execution time. This is similar to typechecking in a compiler.\n\n\n#### Your Functional model is plottable and inspectable.\n\nYou can plot the model as a graph, and you can easily access intermediate nodes in this graph -- for instance, to extract and reuse the activations of intermediate layers, as we saw in a previous example:\n\n```python\nfeatures_list = [layer.output for layer in vgg19.layers]\nfeat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)\n```\n\n\n#### Your Functional model can be serialized or cloned.\n\nBecause a Functional model is a data structure rather than a piece of code, it is safely serializable and can be saved as a single file that allows you to recreate the exact same model without having access to any of the original code. See our [saving and serialization guide](./saving_and_serializing.ipynb) for more details.\n\n\n### Here are the weaknesses of the Functional API:\n\n\n#### It does not support dynamic architectures.\n\nThe Functional API treats models as DAGs of layers. This is true for most deep learning architectures, but not all: for instance, recursive networks or Tree RNNs do not follow this assumption and cannot be implemented in the Functional API.\n\n\n#### Sometimes, you just need to write everything from scratch.\n\nWhen writing advanced achitectures, you may want to do things that are outside the scope of \"defining a DAG of layers\": for instance, you may want to expose multiple custom training and inference methods on your model instance. This requires subclassing.\n\n\n---\n\n\nTo dive more in-depth into the differences between the Functional API and Model subclassing, you can read [What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://medium.com/tensorflow/what-are-symbolic-and-imperative-apis-in-tensorflow-2-0-dfccecb01021).", "_____no_output_____" ], [ "## Mix-and-matching different API styles\n\nImportantly, choosing between the Functional API or Model subclassing isn't a binary decision that restricts you to one category of models. All models in the tf.keras API can interact with each, whether they're Sequential models, Functional models, or subclassed Models/Layers written from scratch.\n\nYou can always use a Functional model or Sequential model as part of a subclassed Model/Layer:", "_____no_output_____" ] ], [ [ "units = 32\ntimesteps = 10\ninput_dim = 5\n\n# Define a Functional model\ninputs = keras.Input((None, units))\nx = layers.GlobalAveragePooling1D()(inputs)\noutputs = layers.Dense(1, activation='sigmoid')(x)\nmodel = keras.Model(inputs, outputs)\n\n\nclass CustomRNN(layers.Layer):\n\n def __init__(self):\n super(CustomRNN, self).__init__()\n self.units = units\n self.projection_1 = layers.Dense(units=units, activation='tanh')\n self.projection_2 = layers.Dense(units=units, activation='tanh')\n # Our previously-defined Functional model\n self.classifier = model\n\n def call(self, inputs):\n outputs = []\n state = tf.zeros(shape=(inputs.shape[0], self.units))\n for t in range(inputs.shape[1]):\n x = inputs[:, t, :]\n h = self.projection_1(x)\n y = h + self.projection_2(state)\n state = y\n outputs.append(y)\n features = tf.stack(outputs, axis=1)\n print(features.shape)\n return self.classifier(features)\n\nrnn_model = CustomRNN()\n_ = rnn_model(tf.zeros((1, timesteps, input_dim)))", "(1, 10, 32)\n" ] ], [ [ "Inversely, you can use any subclassed Layer or Model in the Functional API as long as it implements a `call` method that follows one of the following patterns:\n\n- `call(self, inputs, **kwargs)` where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors), and where `**kwargs` are non-tensor arguments (non-inputs).\n- `call(self, inputs, training=None, **kwargs)` where `training` is a boolean indicating whether the layer should behave in training mode and inference mode.\n- `call(self, inputs, mask=None, **kwargs)` where `mask` is a boolean mask tensor (useful for RNNs, for instance).\n- `call(self, inputs, training=None, mask=None, **kwargs)` -- of course you can have both masking and training-specific behavior at the same time.\n\nIn addition, if you implement the `get_config` method on your custom Layer or Model, the Functional models you create with it will still be serializable and clonable.\n\nHere's a quick example where we use a custom RNN written from scratch in a Functional model:", "_____no_output_____" ] ], [ [ "units = 32\ntimesteps = 10\ninput_dim = 5\nbatch_size = 16\n\n\nclass CustomRNN(layers.Layer):\n\n def __init__(self):\n super(CustomRNN, self).__init__()\n self.units = units\n self.projection_1 = layers.Dense(units=units, activation='tanh')\n self.projection_2 = layers.Dense(units=units, activation='tanh')\n self.classifier = layers.Dense(1, activation='sigmoid')\n\n def call(self, inputs):\n outputs = []\n state = tf.zeros(shape=(inputs.shape[0], self.units))\n for t in range(inputs.shape[1]):\n x = inputs[:, t, :]\n h = self.projection_1(x)\n y = h + self.projection_2(state)\n state = y\n outputs.append(y)\n features = tf.stack(outputs, axis=1)\n return self.classifier(features)\n\n# Note that we specify a static batch size for the inputs with the `batch_shape`\n# arg, because the inner computation of `CustomRNN` requires a static batch size\n# (when we create the `state` zeros tensor).\ninputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))\nx = layers.Conv1D(32, 3)(inputs)\noutputs = CustomRNN()(x)\n\nmodel = keras.Model(inputs, outputs)\n\nrnn_model = CustomRNN()\n_ = rnn_model(tf.zeros((1, 10, 5)))", "_____no_output_____" ] ], [ [ "This concludes our guide on the Functional API!\n\nNow you have at your fingertips a powerful set of tools for building deep learning models.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6fd529fa06290e2b5514bc343a7397af92c8fb
40,238
ipynb
Jupyter Notebook
CRIM_Metadata_Finder_8_2020.ipynb
RichardFreedman/CRIM_Metadata_Notebooks
05254b711a3ba2399bf4d6e80c31347fe80ef0ae
[ "Apache-2.0" ]
1
2021-06-03T20:36:16.000Z
2021-06-03T20:36:16.000Z
CRIM_Metadata_Finder_8_2020.ipynb
RichardFreedman/CRIM_Metadata_Notebooks
05254b711a3ba2399bf4d6e80c31347fe80ef0ae
[ "Apache-2.0" ]
null
null
null
CRIM_Metadata_Finder_8_2020.ipynb
RichardFreedman/CRIM_Metadata_Notebooks
05254b711a3ba2399bf4d6e80c31347fe80ef0ae
[ "Apache-2.0" ]
1
2021-06-19T07:48:42.000Z
2021-06-19T07:48:42.000Z
30.786534
1,339
0.462772
[ [ [ "# Various Routines to Harvest CRIM Metadata from Production Server\n\n### Just the basics here, allowing interaction with \"request\" as a way to retrieve individual Observations and Relationships", "_____no_output_____" ] ], [ [ "import requests\nimport pandas as pd", "_____no_output_____" ] ], [ [ "# Variables\nNow we can set a variable, in this case the URL of a single Observation in CRIM ", "_____no_output_____" ] ], [ [ "Obs_url = \"https://crimproject.org/data/observations/2/\"", "_____no_output_____" ] ], [ [ "And if we call for that variable, it will tell us what it is:", "_____no_output_____" ] ], [ [ "Obs_url", "_____no_output_____" ] ], [ [ "# Requests\nNow defining a new variable, which itself is a \"get request\" for our first variable:", "_____no_output_____" ] ], [ [ "response = requests.get(Obs_url)", "_____no_output_____" ], [ "type(response)", "_____no_output_____" ] ], [ [ "And now the json representation of that variable:", "_____no_output_____" ] ], [ [ "Obs_json = response.json()", "_____no_output_____" ], [ "Obs_json", "_____no_output_____" ] ], [ [ "# Json, Dictionaries, Keys and Values\nJson is in fact an elaborate dictionary, with items nested in an order.", "_____no_output_____" ] ], [ [ "type(Obs_json)", "_____no_output_____" ] ], [ [ "We can list the fixed \"keys\" for that JSON, which are in turned paired with \"values\".", "_____no_output_____" ] ], [ [ "Obs_json.keys()", "_____no_output_____" ] ], [ [ "And here we are after the value of just ONE key", "_____no_output_____" ] ], [ [ "Obs_ema = Obs_json[\"ema\"]", "_____no_output_____" ], [ "Obs_ema", "_____no_output_____" ] ], [ [ "It has a data type: string", "_____no_output_____" ] ], [ [ "type(Obs_ema)", "_____no_output_____" ] ], [ [ "Now calling for various other values for other keys:", "_____no_output_____" ] ], [ [ "Obs_json[\"musical_type\"]", "_____no_output_____" ], [ "Obs_mt = Obs_json[\"musical_type\"]", "_____no_output_____" ], [ "Obs_mt", "_____no_output_____" ] ], [ [ "The piece key actually is a dictionary within a dictionary, so it has LOTS of keys and values within it.", "_____no_output_____" ] ], [ [ "Obs_piece = Obs_json[\"piece\"]", "_____no_output_____" ], [ "Obs_piece", "_____no_output_____" ] ], [ [ "And to interact with the items there, we need to call for a key *within* that key.", "_____no_output_____" ] ], [ [ "Obs_mei = Obs_piece[\"mei_links\"]", "_____no_output_____" ], [ "Obs_mei", "_____no_output_____" ] ], [ [ "Various ways of calling for items according to their position. Note: Zero-based indexing!", "_____no_output_____" ] ], [ [ "len(Obs_mei)", "_____no_output_____" ], [ "Obs_mei[0]", "_____no_output_____" ], [ "Obs_json[\"piece\"][\"mei_links\"][0]", "_____no_output_____" ], [ "Obs_json[\"ema\"]", "_____no_output_____" ], [ "def get_ema_for_observation_id(obs_id):\n # get Obs_url\n url = \"https://crimproject.org/data/observations/{}/\".format(obs_id)\n return url", "_____no_output_____" ], [ "def get_ema_for_observation_id(obs_id):\n # get Obs_ema\n my_ema_mei_dictionary = dict()\n url = \"https://crimproject.org/data/observations/{}/\".format(obs_id)\n response = requests.get(url)\n Obs_json = response.json()\n \n # Obs_ema = Obs_json[\"ema\"]\n \n my_ema_mei_dictionary[\"id\"]=Obs_json[\"id\"]\n my_ema_mei_dictionary[\"musical type\"]=Obs_json[\"musical_type\"]\n my_ema_mei_dictionary[\"int\"]=Obs_json[\"mt_fg_int\"]\n my_ema_mei_dictionary[\"tint\"]=Obs_json[\"mt_fg_tint\"]\n my_ema_mei_dictionary[\"ema\"]=Obs_json[\"ema\"]\n my_ema_mei_dictionary[\"mei\"]=Obs_json[\"piece\"][\"mei_links\"][0]\n my_ema_mei_dictionary[\"pdf\"]=Obs_json[\"piece\"][\"pdf_links\"][0]\n \n \n # Obs_piece = Obs_json[\"piece\"]\n # Obs_mei = Obs_piece[\"mei_links\"]\n \n print(f'Got: {obs_id}')\n \n # return {\"ema\":Obs_ema,\"mei\":Obs_mei}\n \n return my_ema_mei_dictionary\n", "_____no_output_____" ] ], [ [ "Now we get a _particular_ observation.", "_____no_output_____" ] ], [ [ "get_ema_for_observation_id(20)", "Got: 20\n" ] ], [ [ "A new variable that contains the \"get_ema\" routine. We will pass a series of numbers to it.", "_____no_output_____" ] ], [ [ "output = get_ema_for_observation_id(20)", "Got: 20\n" ], [ "# this holds the output as a LIST of DICTS\nobs_data_list = []", "_____no_output_____" ], [ "# this is the list of Observation IDs to call\n\nobs_call_list = [1,3,5,17,21]", "_____no_output_____" ], [ "# this is the LOOP that runs through the list aboe\n# for observ in obs_call_list:\n\nfor observ in range(1,11):\n call_list_output = get_ema_for_observation_id(observ)\n \n # the print command simply puts the output in the notebook terminal. \n #Later we will put it in the List of Dicts.\n \n # print(call_list_output) The APPEND function adds one item after each loop.\n obs_data_list.append(call_list_output)\n ", "Got: 1\nGot: 2\nGot: 3\nGot: 4\nGot: 5\nGot: 6\nGot: 7\nGot: 8\nGot: 9\nGot: 10\n" ], [ "# list includes APPEND function that will allow us to add one item after each loop.\n# EX blank_list = [1,5,6] (note that these are in square brackets as LIST)\n# blank_list.append(89)\n# range would in parenths as in: range(1,11)\n# here we make a LIST object that contains the Range. \n# This allows it to iterate over the range\n# since the range could be HUGE We can ONLY append a number to a LIST!\n\nObs_range = list(range(1,11))\n", "_____no_output_____" ] ], [ [ "Now we call up the list of observations we created above, after appending one at a time to the \"[]\"", "_____no_output_____" ] ], [ [ "obs_data_list", "_____no_output_____" ] ], [ [ "# Pandas as Data Frame or CSV", "_____no_output_____" ] ], [ [ "pd.Series(obs_data_list).to_csv(\"obs_data_list.csv\")", "/Users/rfreedma/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "# Pandas DataFrame interprets the series of items in each Dict \n# as separate 'cells' (a tab structure)\nDF_output = pd.DataFrame(obs_data_list)", "_____no_output_____" ], [ "DF_output", "_____no_output_____" ], [ "DF_output.to_csv(\"obs_data_list.csv\")", "_____no_output_____" ], [ "# two \"==\" means check for equality\n# for 'contains' use str.contains(\"letter\")\n# can also use regex in this (for EMA range)\n# Filter_by_Type = (DF_output[\"musical type\"]==\"Fuga\") & (DF_output[\"id\"]==8)\nFilter_by_Type = DF_output[\"musical type\"].str.contains(\"Fuga\")\n\n# ", "_____no_output_____" ], [ "DF_output[Filter_by_Type]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a6fdbdd3ddde359345aed67816ec7725045cd97
327,191
ipynb
Jupyter Notebook
Reproducible Data Analysis in Jupyter - Part 2.ipynb
argute16/jupyterWorkflow
0727c13f302ad4d0eee4392b31fbdd0c98b07d45
[ "MIT" ]
null
null
null
Reproducible Data Analysis in Jupyter - Part 2.ipynb
argute16/jupyterWorkflow
0727c13f302ad4d0eee4392b31fbdd0c98b07d45
[ "MIT" ]
null
null
null
Reproducible Data Analysis in Jupyter - Part 2.ipynb
argute16/jupyterWorkflow
0727c13f302ad4d0eee4392b31fbdd0c98b07d45
[ "MIT" ]
null
null
null
681.647917
73,324
0.952731
[ [ [ "# Jupyter Data Science Workflow - Part 2\n**From exploratory analysis to reproducible science** \nContinue from Reproducible Data Analysis in Jupyter_V3", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom jupyterworkflow.data import get_fremont_data\nfrom sklearn.decomposition import PCA\nfrom sklearn.mixture import GaussianMixture\nimport matplotlib.pyplot as plt\nimport pandas as pd ", "_____no_output_____" ] ], [ [ "## Get Data", "_____no_output_____" ] ], [ [ "data = get_fremont_data()\npivoted=data.pivot_table('Total', index=data.index.time, columns=data.index.date)\npivoted.plot(legend=False, alpha=0.01)", "_____no_output_____" ] ], [ [ "Look at the **Shape of the Pivot**: \n\nIn this example (3163, 24) equals 3163 days and each observation consists of 24 hours", "_____no_output_____" ] ], [ [ "X = pivoted.T.values\nX.shape", "_____no_output_____" ] ], [ [ "**Clean Up Data** - due to missing values", "_____no_output_____" ] ], [ [ "X = pivoted.fillna(0).T.values\nX.shape", "_____no_output_____" ] ], [ [ "## Principal component analysis", "_____no_output_____" ] ], [ [ "X2=PCA(2, svd_solver='full').fit_transform(X)\nX2.shape", "_____no_output_____" ] ], [ [ "**Scatterplot** \nShows two clusers", "_____no_output_____" ] ], [ [ "plt.scatter(X2[:,0], X2[:,1])", "_____no_output_____" ] ], [ [ "## Unsupervised Clustering\n**Gaussian Mixture** \nTo distinguish the identified clusters", "_____no_output_____" ] ], [ [ "gmm=GaussianMixture(2).fit(X)\n#if we want to dostinguish more clusters just change the 2 into the number of clusters\n\nlabels=gmm.predict(X)\n#the labels now show in which cluster the data point is (either 0, or 1)", "_____no_output_____" ], [ "plt.scatter(X2[:,0], X2[:,1], c=labels, cmap='rainbow')\nplt.colorbar()", "_____no_output_____" ] ], [ [ "**Plot Pivoted Table**, but depending on labels \nExamine what happens in each cluster", "_____no_output_____" ] ], [ [ "pivoted.T[labels==0].T.plot(legend=False, alpha=0.01)", "_____no_output_____" ], [ "pivoted.T[labels==1].T.plot(legend=False, alpha=0.01)", "_____no_output_____" ] ], [ [ "## Comparing with Day of Week", "_____no_output_____" ] ], [ [ "dayofweek=pd.DatetimeIndex(pivoted.columns).dayofweek", "_____no_output_____" ], [ "plt.scatter(X2[:,0], X2[:,1], c=dayofweek, cmap='rainbow')\nplt.colorbar()", "_____no_output_____" ] ], [ [ "## Analyzing Outliners", "_____no_output_____" ], [ "**Is Label 1 equal to weekends?** \n- Shows the cases of labels 1 (expected to be weekends) which where actualy during the week \n- This shows, that the Gaussian Mixture did not perform verry well here ", "_____no_output_____" ] ], [ [ "dates=pd.DatetimeIndex(pivoted.columns)\ndates[(labels==1)&(dayofweek<5)]", "_____no_output_____" ] ], [ [ "**Is Label 0 equal to weekdays?** \n- Shows the cases of labels 0 (expected to be weekdays) which where actualy during the weekend \n- Here we see less differences", "_____no_output_____" ] ], [ [ "dates=pd.DatetimeIndex(pivoted.columns)\ndates[(labels==0)&(dayofweek>4)]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6fe1afa49d9b969a28944563589e450e367f82
7,271
ipynb
Jupyter Notebook
notebooks/pyoperant.ipynb
fasciae/pyoperant
1eddfb104f0d692d6ecc3e2dd7cb8de05538523a
[ "BSD-3-Clause" ]
10
2015-02-21T22:58:43.000Z
2021-05-20T22:47:57.000Z
notebooks/pyoperant.ipynb
fasciae/pyoperant
1eddfb104f0d692d6ecc3e2dd7cb8de05538523a
[ "BSD-3-Clause" ]
31
2015-02-17T16:43:15.000Z
2020-03-06T23:09:48.000Z
notebooks/pyoperant.ipynb
fasciae/pyoperant
1eddfb104f0d692d6ecc3e2dd7cb8de05538523a
[ "BSD-3-Clause" ]
10
2015-07-02T18:55:02.000Z
2021-09-20T22:45:43.000Z
20.253482
620
0.501444
[ [ [ "import sys\nprint(sys.executable)\nimport numpy as np\nimport scipy as sp", "/usr/bin/python\n" ], [ "#import sys\n#sys.path.append('../')", "_____no_output_____" ], [ "from pyoperant.local import PANELS\nimport time", "_____no_output_____" ], [ "import pyoperant\nprint(pyoperant.__file__)", "/home/pi/pyoperant/pyoperant/__init__.pyc\n" ], [ "PANELS.keys()", "_____no_output_____" ], [ "box_num = 1\nbox = PANELS['%d' % (box_num)]()\n#box.test()", "_____no_output_____" ], [ "box.left.status()", "_____no_output_____" ], [ "box.speaker.queue('/home/bird/2sec_noise_65dbmean_48kHz.wav')\nbox.speaker.play()", "_____no_output_____" ], [ "box.speaker.stop()", "_____no_output_____" ], [ "box.speaker.interface._stop_wav()", "_____no_output_____" ], [ "# box_num += 1\nbox_num = 7\nprint box_num\nbox = PANELS['Zog%d' % (box_num)]()\n\ntry:\n box.reset()\nexcept:\n pass\nelse:\n print 'reset complete'\n# box.test()", "_____no_output_____" ], [ "box.house_light.off()", "_____no_output_____" ], [ "from pyoperant.components import RGBLight", "_____no_output_____" ], [ "cue_light = RGBLight(red=box.outputs[7],\n green=box.outputs[5],\n blue=box.outputs[6]\n )", "_____no_output_____" ], [ "cue_light._red.write(False)", "_____no_output_____" ], [ "box_num = 11\nprint box_num\nbox = PANELS['Zog%d' % (box_num)]()\n\ntry:\n box.reset()\nexcept:\n pass\nbox.speaker.queue('/home/bird/opdat/B982/stims/triples_abd_48kHz.wavv.wav')\nbox.speaker.play()\ntime.sleep(1.0)\nbox.speaker.stop()", "_____no_output_____" ], [ "box_num = 0", "_____no_output_____" ], [ "box_num = 3\nprint box_num\nbox = PANELS['Zog%d' % (box_num)]()\n\n# try:\n# box.reset()\n# except:\n# pass\n# box.speaker.queue('/home/bird/Desktop/test48k.wav')\nbox.speaker.queue('/home/bird/2sec_noise_65dbmean.wav')\nbox.speaker.play()\ntime.sleep(2.0)\nbox.speaker.stop()", "_____no_output_____" ] ], [ [ "Box 3 is very quiet\nBox 10, 14 are very loud\nBox 15 doesn't work", "_____no_output_____" ] ], [ [ "for ii in box.inputs:\n print ii.read()", "_____no_output_____" ], [ "for oo in box.outputs:\n print oo.write(False)", "_____no_output_____" ], [ "for ii in box_1.inputs:\n print ii.read()", "_____no_output_____" ], [ "iface = box.interfaces['comedi']", "_____no_output_____" ], [ "box = PANELS['Zog6']()\nbox.reset()\nbox.test()", "_____no_output_____" ], [ "box.reward()", "_____no_output_____" ] ] ]
[ "code", "raw", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a6fef8fa53950f56fb15b4397a0859062eafcdf
6,849
ipynb
Jupyter Notebook
colabs/sheets_copy.ipynb
Ressmann/starthinker
301c5cf17e382afee346871974ca2f4ae905a94a
[ "Apache-2.0" ]
138
2018-11-28T21:42:44.000Z
2022-03-30T17:26:35.000Z
colabs/sheets_copy.ipynb
Ressmann/starthinker
301c5cf17e382afee346871974ca2f4ae905a94a
[ "Apache-2.0" ]
36
2019-02-19T18:33:20.000Z
2022-01-24T18:02:44.000Z
colabs/sheets_copy.ipynb
Ressmann/starthinker
301c5cf17e382afee346871974ca2f4ae905a94a
[ "Apache-2.0" ]
54
2018-12-06T05:47:32.000Z
2022-02-21T22:01:01.000Z
32.459716
261
0.505767
[ [ [ "#Sheet Copy\nCopy tab from a sheet to a sheet.\n", "_____no_output_____" ], [ "#License\n\nCopyright 2020 Google LLC,\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n", "_____no_output_____" ], [ "#Disclaimer\nThis is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.\n\nThis code generated (see starthinker/scripts for possible source):\n - **Command**: \"python starthinker_ui/manage.py colab\"\n - **Command**: \"python starthinker/tools/colab.py [JSON RECIPE]\"\n\n", "_____no_output_____" ], [ "#1. Install Dependencies\nFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.\n", "_____no_output_____" ] ], [ [ "!pip install git+https://github.com/google/starthinker\n", "_____no_output_____" ] ], [ [ "#2. Set Configuration\n\nThis code is required to initialize the project. Fill in required fields and press play.\n\n1. If the recipe uses a Google Cloud Project:\n - Set the configuration **project** value to the project identifier from [these instructions](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md).\n\n1. If the recipe has **auth** set to **user**:\n - If you have user credentials:\n - Set the configuration **user** value to your user credentials JSON.\n - If you DO NOT have user credentials:\n - Set the configuration **client** value to [downloaded client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md).\n\n1. If the recipe has **auth** set to **service**:\n - Set the configuration **service** value to [downloaded service credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md).\n\n", "_____no_output_____" ] ], [ [ "from starthinker.util.configuration import Configuration\n\n\nCONFIG = Configuration(\n project=\"\",\n client={},\n service={},\n user=\"/content/user.json\",\n verbose=True\n)\n\n", "_____no_output_____" ] ], [ [ "#3. Enter Sheet Copy Recipe Parameters\n 1. Provide the full edit URL for both sheets.\n 1. Provide the tab name for both sheets.\n 1. The tab will only be copied if it does not already exist.\nModify the values below for your use case, can be done multiple times, then click play.\n", "_____no_output_____" ] ], [ [ "FIELDS = {\n 'auth_read': 'user', # Credentials used for reading data.\n 'from_sheet': '',\n 'from_tab': '',\n 'to_sheet': '',\n 'to_tab': '',\n}\n\nprint(\"Parameters Set To: %s\" % FIELDS)\n", "_____no_output_____" ] ], [ [ "#4. Execute Sheet Copy\nThis does NOT need to be modified unless you are changing the recipe, click play.\n", "_____no_output_____" ] ], [ [ "from starthinker.util.configuration import execute\nfrom starthinker.util.recipe import json_set_fields\n\nTASKS = [\n {\n 'sheets': {\n 'auth': 'user', \n 'template': {\n 'sheet': {'field': {'name': 'from_sheet', 'kind': 'string', 'order': 1, 'default': ''}}, \n 'tab': {'field': {'name': 'from_tab', 'kind': 'string', 'order': 2, 'default': ''}}\n }, \n 'sheet': {'field': {'name': 'to_sheet', 'kind': 'string', 'order': 3, 'default': ''}}, \n 'tab': {'field': {'name': 'to_tab', 'kind': 'string', 'order': 4, 'default': ''}}\n }\n }\n]\n\njson_set_fields(TASKS, FIELDS)\n\nexecute(CONFIG, TASKS, force=True)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6ff931c7064114cf4543ffc700feeba19b75a2
39,399
ipynb
Jupyter Notebook
7.17.ipynb
LZ999/python.1
b63717e14728ad6e45c9f64a77a8715a9ad2c789
[ "Apache-2.0" ]
null
null
null
7.17.ipynb
LZ999/python.1
b63717e14728ad6e45c9f64a77a8715a9ad2c789
[ "Apache-2.0" ]
null
null
null
7.17.ipynb
LZ999/python.1
b63717e14728ad6e45c9f64a77a8715a9ad2c789
[ "Apache-2.0" ]
null
null
null
21.912681
1,310
0.478718
[ [ [ "# 数学函数、字符串和对象\n## 本章介绍Python函数来执行常见的数学运算\n- 函数是完成一个特殊任务的一组语句,可以理解为一个函数相当于一个小功能,但是在开发中,需要注意一个函数的长度最好不要超过一屏\n- Python中的内置函数是不需要Import导入的\n<img src=\"../Photo/15.png\"></img>", "_____no_output_____" ] ], [ [ "#绝对值\nprint(abs(-10))", "10\n" ], [ "#MAX \nmax(1,2,3)\n#max('abc')", "_____no_output_____" ], [ "#MIN\nmin(-1,0,1)", "_____no_output_____" ], [ "#POW(幂)\npow(5,8)", "_____no_output_____" ], [ "#ROUND(X)(返回与X最接近的整数)\nround(3.8)", "_____no_output_____" ], [ "#round(x,n) 保留浮点小数\nround(3.99999988875622332,8)", "_____no_output_____" ], [ "#赌博 \nimport random\nrandom.randint()", "_____no_output_____" ], [ "#石头 = 0 \n#剪刀 = 1\n#布 = 2\nimport random\nnumber = random.randint(0,2)\ncount = eval(input('please input count only 0、1、2:'))\nif abs(number - count) == 1:\n min(number,count)\n print('你赢啦!')\nelif abs(number - count) == 0:\n print('平局,再来一次吧!')\nelse:\n max(number,count)\n print('输了,再来一次吧!')", "please input count only 0、1、2:2\n输了,再来一次吧!\n" ], [ "import os\nimport random\na = eval(input('please input number'))\nb = ", "_____no_output_____" ], [ "a = -10\nprint(abs(a))", "_____no_output_____" ], [ "b = -10.1\nprint(abs(b))", "_____no_output_____" ], [ "c = 0\nprint(abs(c))", "_____no_output_____" ], [ "max(1, 2, 3, 4, 5)\n", "_____no_output_____" ], [ "min(1, 2, 3, 4, 5)", "_____no_output_____" ], [ "min(1, 2, 3, -4, 5)", "_____no_output_____" ], [ "for i in range(10):\n print(i)", "_____no_output_____" ], [ "pow(2, 4, 2) # 幂指数运算,第三个参数是取模运算", "_____no_output_____" ], [ "round(10.67, 1) # 一个参数就是四舍五入,保留小数位数", "_____no_output_____" ] ], [ [ "## 尝试练习Python内置函数", "_____no_output_____" ], [ "## Python中的math模块提供了许多数学函数\n<img src=\"../Photo/16.png\"></img>\n<img src=\"../Photo/17.png\"></img>\n", "_____no_output_____" ] ], [ [ "#圆的实现\n", "_____no_output_____" ], [ "import random\ny = random.randint(0,1)\na = random.randint(0,1)\nL = -(y*log(a)+(1-y)*(log(1-a)))", "_____no_output_____" ], [ "#sin()使用弧度\nimport math\nmath.sin(math.radians(90))", "_____no_output_____" ], [ "#FABS\nimport math\nmath.fabs(-4)", "_____no_output_____" ], [ "#ceil 向上取整\nimport math\nmath.ceil(-3.1)", "_____no_output_____" ], [ "import math\nmath.exp(100)\n", "_____no_output_____" ], [ "import time\nstart = time.time()#返回时间戳\nnum = 0\nfor i in range (1000000):\n num +=i\nend = time.time()\nprint(end - start)", "0.14760351181030273\n" ], [ "import math # 导入数学包\na1 = math.fabs(-2)\nprint(a1)\n\nprint(math.log(2.71828))\nprint(math.asin(1.0))", "_____no_output_____" ], [ "b1 = math.cos(math.radians(90)) # cos代入的是弧度值,very important!\nprint(b1)\nc1 = 3.1415926\nprint(math.degrees(c1))", "_____no_output_____" ], [ "math.sqrt(9)", "_____no_output_____" ], [ "math.sin(2 * math.pi)", "_____no_output_____" ], [ "math.cos(2 * math.pi)", "_____no_output_____" ], [ "min(2, 2, 1)", "_____no_output_____" ], [ "math.log(math.e ** 2)", "_____no_output_____" ], [ "math.exp(1)", "_____no_output_____" ], [ "max(2, 3, 4)", "_____no_output_____" ], [ "math.ceil(-2.5)", "_____no_output_____" ], [ "# 验证码系统\nfirst_num, second_num = 3, 4\nprint('验证码', first_num ,'+', second_num, '= ?')\nanswer = eval(input('写出结果: '))\n\nif answer == first_num + second_num:\n print('验证码正确')\nelse:\n print('验证码错误')\n", "_____no_output_____" ], [ "import random\nimport math\n\nfirst_num, second_num = 3, 4\nlist = ['+', '-', '*', '/']\nrandl = random.randint(0, 3)\nif list[randl]=='+':\n print('验证码', first_num ,'+', second_num, '= ?')\n right_answer = first_num + second_num\nelif list[randl]=='-':\n print('验证码', first_num ,'-', second_num, '= ?')\n right_answer = first_num - second_num\nelif list[randl]=='-':\n print('验证码', first_num ,'*', second_num, '= ?')\n right_answer = first_num * second_num\nelse:\n print('验证码', first_num ,'/', second_num, '= ?')\n right_answer = first_num / second_num\n \nanswer = eval(input('写出结果: '))\n\nif answer == right_answer:\n print('验证码正确')\nelse:\n print('验证码错误')", "_____no_output_____" ], [ "# 验证码系统\nimport random\nfirst_num = random.randint(0, 9)\nsecond_num = random.randint(0, 9)\nfuhao = random.randint(0, 3)\n\nif fuhao==0:\n print('验证码', first_num ,'+', second_num, '= ?')\n right_answer = first_num + second_num\nelif fuhao==1:\n print('验证码', first_num ,'-', second_num, '= ?')\n right_answer = first_num - second_num\nelif fuhao==2:\n print('验证码', first_num ,'*', second_num, '= ?')\n right_answer = first_num * second_num\nelse:\n print('验证码', first_num ,'/', second_num, '= ?')\n right_answer = first_num / second_num\n \nanswer = eval(input('写出结果: '))\n\nif answer == right_answer:\n print('验证码正确')\nelse:\n print('验证码错误')\n", "_____no_output_____" ], [ "import random\nlist = ['+', '-', '*', '/']\nc = random.sample(list, 1)\nprint(c)", "_____no_output_____" ], [ "import random\nimport math\n\nfirst_num = random.randint(0, 9)\nsecond_num = random.randint(0, 9)\nlist = ['+', '-', '*', '/']\nfuhao = random.sample(list, 1)\nif fuhao=='+':\n print('验证码', first_num ,'+', second_num, '= ?')\n right_answer = first_num + second_num\nelif fuhao=='-':\n print('验证码', first_num ,'-', second_num, '= ?')\n right_answer = first_num - second_num\nelif fuhao=='-':\n print('验证码', first_num ,'*', second_num, '= ?')\n right_answer = first_num * second_num\nelse:\n print('验证码', first_num ,'/', second_num, '= ?')\n right_answer = first_num / second_num\n \nanswer = eval(input('写出结果: '))\n\nif answer == right_answer:\n print('验证码正确')\nelse:\n print('验证码错误')", "_____no_output_____" ], [ "import PIL", "_____no_output_____" ] ], [ [ "## 两个数学常量PI和e,可以通过使用math.pi 和math.e调用", "_____no_output_____" ] ], [ [ "import math\nprint(math.pi)\nprint(math.e)", "_____no_output_____" ] ], [ [ "## EP:\n- 通过math库,写一个程序,使得用户输入三个顶点(x,y)返回三个角度\n- 注意:Python计算角度为弧度制,需要将其转换为角度\n<img src=\"../Photo/18.png\">", "_____no_output_____" ] ], [ [ "import math\nx1,y1 = eval(input('输入A点坐标:'))\nx2,y2 = eval(input('输入B点坐标:'))\nx3,y3 = eval(input('输入C点坐标:'))\n\na = math.sqrt(pow((x2-x3),2)+pow((y2-y3),2))\nb = math.sqrt(pow((x3-x1),2)+pow((y3-y1),2))\nc = math.sqrt(pow((x2-x1),2)+pow((y2-y1),2))\n\nA = math.degrees(math.acos((a*a-b*b-c*c)/(-2*b*c)))\nB = math.degrees(math.acos((b*b-a*a-c*c)/(-2*a*c)))\nC = math.degrees(math.acos((c*c-b*b-a*a)/(-2*b*a)))\n\nprint('三角形的三个角分别是:'A,B,C)", "_____no_output_____" ], [ "c = '''她说:“你真是一个小天才”\n你真是一个小煞笔\n你真是一个大煞笔\n你真是一个小机灵鬼'''\nnum = 0\nfor i in c:\n if i=='\\n':\n print('ok')\n continue\n else: num+=1 \nprint(num)", "ok\nok\nok\n38\n" ], [ "c = '''她说:“你真是一个小天才”\n你真是一个小煞笔\n你真是一个大煞笔\n你真是一个小机灵鬼'''\nbytes(c.encode('utf-8'))\n", "_____no_output_____" ], [ "#统计煞笔\nc = '''\n她说:“你真是一个小天才”\n你真是一个小煞笔\n你真是一个大煞笔\n你真是一个小机灵鬼\n'''\nnum = 0\nfor i in c:\n num +=1\n print(num)", "_____no_output_____" ], [ "import math\n\nx1, y1 = eval(input('输入A点坐标:'))\nx2, y2 = eval(input('输入B点坐标:'))\nx3, y3 = eval(input('输入C点坐标:'))\n\na = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\nb = math.sqrt((x1 - x3) ** 2 + (y1 - y3) ** 2)\nc = math.sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2)\n\nA = math.degrees(math.acos((a * a - b * b - c * c) / (-2 * b * c)))\nB = math.degrees(math.acos((b * b - a * a - c * c) / (-2 * a * c)))\nC = math.degrees(math.acos((c * c - b * b - a * a) / (-2 * a * b)))\n\nprint('三角形的三个角分别为', A, B, C)\n\n \n", "_____no_output_____" ] ], [ [ "## 字符串和字符\n- 在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“\n- 在使用”“”时,给予其变量则变为字符串,否则当多行注释使用", "_____no_output_____" ] ], [ [ "#练习\na = 'a'\nb = 'b'\n'''\n你真是\n'''", "_____no_output_____" ], [ "a = 'joker'\nb = \"Kate\"\nc = \"\"\"在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“\n在使用”“”时,给予其变量则变为字符串,否则当多行注释使用\"\"\" #字符串有多行时,添加三个单引号或者三个双引号\n\n\"\"\"在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“\n在使用”“”时,给予其变量则变为字符串,否则当多行注释使用\"\"\" #三引号可以表示多行注释\n# 当6个引号没有赋值时,那么它是注释的作用\n# 6个引号的作用,多行文本\n\nprint(type(a), type(b), type(c))", "_____no_output_____" ] ], [ [ "## ASCII码与Unicode码\n- <img src=\"../Photo/19.png\"></img>\n- <img src=\"../Photo/20.png\"></img>\n- <img src=\"../Photo/21.png\"></img>", "_____no_output_____" ], [ "## 函数ord、chr\n- ord 返回ASCII码值\n- chr 返回字符", "_____no_output_____" ] ], [ [ "ord('b')", "_____no_output_____" ], [ "chr(98)", "_____no_output_____" ], [ "#邮箱\na = '[email protected]'\nfor i in a:\n #print(ord(i),end=' ')\n print(chr(ord(i)),end='')", "[email protected]" ], [ "import random\nfh = ''\na = '[email protected]'\nb = random.randint(0,4)\nfor i in a:\nif b == 1:\n fh = '+b'\n elif b == 2:\n fh = '-b'\n elif b == 3:\n fh = '*b'\n elif b = 4:\n fh = '/b'\n else:\n fh = '%b'\n s = ord(i)fh\n print(s)\n #print(chr(ord(i)),end='')\n ", "_____no_output_____" ], [ "joker = 'A'\nord(joker)\nprint(ord('q'), ord('Z'))", "_____no_output_____" ], [ "print(chr(65))\nprint(chr(90))", "_____no_output_____" ], [ "import numpy as np\nnp.nonzero(1)", "_____no_output_____" ] ], [ [ "## EP:\n- 利用ord与chr进行简单邮箱加密", "_____no_output_____" ] ], [ [ "email = '[email protected]' # 邮箱加密过程\nj = 0\nfor i in email:\n text = ord(i) + 1\n re_text = chr(text)\n print(re_text)", "_____no_output_____" ], [ "import hashlib\nstr1 = 'this is a test.'\nh1 = hashlib.md5()\nh1.update(str1.encode(encoding = 'utf-8'))\nprint('MD5加密之后为:', h1.hexdigest())", "_____no_output_____" ] ], [ [ "## 转义序列 \\\n- a = \"He said,\"Johon's program is easy to read\"\"\n- 转掉原来的意思\n- 一般情况下,只有当语句与默认语句相撞的时候,就需要转义", "_____no_output_____" ] ], [ [ "#JOIN 以。。。进行拼接\n%time ''.join(('a','b'))", "Wall time: 0 ns\n" ], [ "%time 'a'+'100'", "Wall time: 0 ns\n" ], [ "a = \"He said,\\\"Johon's program is easy to read\\\"\" #z正则表达式中常用转义字符\\\nprint(a)", "_____no_output_____" ] ], [ [ "## 高级print\n- 参数 end: 以什么方式结束打印\n- 默认换行打印", "_____no_output_____" ] ], [ [ "email = '[email protected]' # 邮箱加密过程\nj = 0\nfor i in email:\n text = ord(i) + 1\n re_text = chr(text)\n print(re_text, end = '')", "_____no_output_____" ] ], [ [ "## 函数str\n- 将类型强制转换成字符串类型\n- 其他一些以后会学到(list,set,tuple...)", "_____no_output_____" ] ], [ [ "a = 100.12\ntype(str(a))", "_____no_output_____" ] ], [ [ "## 字符串连接操作\n- 直接使用 “+” \n- join() 函数 ", "_____no_output_____" ] ], [ [ "a1 = 'www.baidu.com/image.page='\na2 = '1'\nfor i in range(0, 10):\n a2 = a1 + str(i)\n print(a2)", "_____no_output_____" ], [ "joint = '^'\n%time joint.join(('a', 'b', 'c', 'd')) # join的参数需要在一个元组之中", "_____no_output_____" ], [ "%time '*'.join(('a', 'b', 'c', 'd')) # join的参数需要在一个元组之中", "_____no_output_____" ], [ "%time 'A' + 'B' + 'C'", "_____no_output_____" ] ], [ [ "## EP:\n- 将 “Welcome” “to” \"Python\" 拼接\n- 将int型 100 与 “joker is a bad man” 拼接\n- 从控制台读取字符串\n> 输入一个名字返回夸奖此人是一个帅哥", "_____no_output_____" ] ], [ [ "' '.join(('Welcome','to','Python'))", "_____no_output_____" ], [ "''.join((str(100),'joker is a bad man'))", "_____no_output_____" ], [ "name = input('please input name:')\na = ''.join((name,',you are very beautiful!'))\nprint(a)", "please input name:lily\nlily,you are very beautiful!\n" ], [ "text1 = ' '.join(('Welcome', 'to', 'Python'))\ni = 100\ntext2 = str(i)\ntext3 = ' '.join((text2, 'Joker is a bad man'))\nprint(text1, '\\n', text2 ,'\\n', text3)", "_____no_output_____" ], [ "name = input('输入名字:')\ntext = ' '.join((name, 'is a good boy.'))\nprint(text)", "输入名字:1\n1 is a good boy.\n" ] ], [ [ "## 实例研究:最小数量硬币\n- 开发一个程序,让用户输入总金额,这是一个用美元和美分表示的浮点值,返回一个由美元、两角五分的硬币、一角的硬币、五分硬币、以及美分个数\n<img src=\"../Photo/22.png\"></img>", "_____no_output_____" ] ], [ [ "amount = eval(input('Enter an amount, for example 11.56: '))\nfenshuAmount = int(amount * 100)\ndollorAmount = fenshuAmount // 100\nremainDollorAmount = fenshuAmount % 100\njiaoAmount = remainDollorAmount // 25\nremainJiaoAmount = remainDollorAmount % 25\nfenAmount = remainJiaoAmount // 10\nremainFenAmount = remainJiaoAmount % 10\nfenAmount2 = remainFenAmount // 5\nremainFenAmount2 = remainFenAmount % 5\nfenFinalAmount = remainFenAmount2 \nprint('美元个数为',dollorAmount,'\\n', '两角五分硬币个数为',\n jiaoAmount, '\\n','一角个数为', fenAmount, '\\n','五美分个数为', fenAmount2,'\\n', '一美分个数为',fenFinalAmount)", "_____no_output_____" ], [ "amount = eval(input('Ennter an amount,for example 11.56:'))\nremainingAmount = int(amount * 100)\nprint(remainingAmount)\nnumberOfOneDollars = remainingAmount //100\nremainingAmount = remainingAmount % 100\nnumberOfQuarters = remainingAmount // 25\nremainingAmount = remainingAmount % 25\nnumberOfDimes = remainingAmount // 10\nremainingAmount = remainingAmount % 10\nnumberOfNickls = remainingAmount // 5\nremainingAmount = remainingAmount % 5\nnumberOfPenies = remainingAmount\nprint(numberOfOneDollars,numberOfQuarters,numberOfDimes,numberOfNickls,numberOfPenies)", "_____no_output_____" ] ], [ [ "- Python弱项,对于浮点型的处理并不是很好,但是处理数据的时候使用的是Numpy类型\n<img src=\"../Photo/23.png\"></img>", "_____no_output_____" ] ], [ [ "remainingAmount = eval(input('Ennter an amount,for example 11.56:'))\nprint(remainingAmount)\nnumberOfOneDollars = remainingAmount //100\nremainingAmount = remainingAmount % 100\nnumberOfQuarters = remainingAmount // 25\nremainingAmount = remainingAmount % 25\nnumberOfDimes = remainingAmount // 10\nremainingAmount = remainingAmount % 10\nnumberOfNickls = remainingAmount // 5\nremainingAmount = remainingAmount % 5\nnumberOfPenies = remainingAmount\nprint(numberOfOneDollars,numberOfQuarters,numberOfDimes,numberOfNickls,numberOfPenies)", "_____no_output_____" ] ], [ [ "## id与type\n- id 查看内存地址,在判断语句中将会使用\n- type 查看元素类型", "_____no_output_____" ] ], [ [ "#\nid(262) is id(262)", "_____no_output_____" ], [ "a = 100\nid(a)", "_____no_output_____" ], [ "id(True)", "_____no_output_____" ], [ "100 == 100", "_____no_output_____" ], [ "112345678800000000 is '112345678800000000'", "_____no_output_____" ], [ "112345678800000000 is 112345678800000000", "_____no_output_____" ], [ "a = True\nb = False\nprint(id(a), id(b))\na is b\n", "_____no_output_____" ] ], [ [ "## 其他格式化语句见书", "_____no_output_____" ], [ "# Homework\n- 1\n<img src=\"../Photo/24.png\"><img>\n<img src=\"../Photo/25.png\"><img>", "_____no_output_____" ] ], [ [ "import math \nr = eval(input('Enter the length from the center to a vertex:'))\ns = 2*r*math.sin(math.pi / 5)\narea = 5 * s * s / (4 * math.tan(math.pi / 5))\nprint('The area of the pentagon is %.2f' %(area) )", "Enter the length from the center to a vertex:5.5\nThe area of the pentagon is 71.92\n" ] ], [ [ "- 2\n<img src=\"../Photo/26.png\"><img>", "_____no_output_____" ] ], [ [ "import math\nx1,y1 = eval(input('Enter point 1 (latitude and longitude) in degrees:'))\nx2,y2 = eval(input('Enter point 2 (latitude and longitude) in degrees:'))\nx1 = math.radians(x1)\nx2 = math.radians(x2)\ny1 = math.radians(y1)\ny2 = math.radians(y2)\nradius = 6371.01\nd = radius * math.acos(math.sin(x1) * math.sin(x2) + math.cos(x1) * math.cos(x2) * math.cos(y1-y2))\nprint('The distance between the two points is %f' %(d))", "Enter point 1 (latitude and longitude) in degrees:39.55,-116.25\nEnter point 2 (latitude and longitude) in degrees:41.5,87.37\nThe distance between the two points is 10691.791832\n" ] ], [ [ "- 3\n<img src=\"../Photo/27.png\"><img>", "_____no_output_____" ] ], [ [ "import math\nside = eval(input('Enter the said:'))\narea =5 * s * s/ (4 * math.tan(math.pi / 5))\nprint('The area of the pentagon is %.4f' %(area))", "Enter the said:5.5\nThe area of the pentagon is 52.0444\n" ] ], [ [ "- 4\n<img src=\"../Photo/28.png\"><img>", "_____no_output_____" ] ], [ [ "import math \nnumber = eval(input('Enter the number of sides:'))\ns = eval(input('Enter the sides:'))\narea = number * s * s /(4 * math.tan(math.pi / 5)) \nprint('The area of the polyon is %f' %(area))", "Enter the number of sides:5\nEnter the sides:6.5\nThe area of the polyon is 72.690170\n" ] ], [ [ "- 5\n<img src=\"../Photo/29.png\"><img>\n<img src=\"../Photo/30.png\"><img>", "_____no_output_____" ] ], [ [ "number = eval(input('Enter an ASCII code :'))\na = chr(number)\nprint('The character is ', a)", "Enter an ASCII code :69\nThe character is E\n" ] ], [ [ "- 6\n<img src=\"../Photo/31.png\"><img>", "_____no_output_____" ] ], [ [ "name = input(\"Enter employee's name :\")\nweektime = eval(input('Enter number of hours worked in a week:'))\nhourly = eval(input('Enter hourly pay rate:'))\nfederal = eval(input('Enter federal tax withholding rate:'))\nstate = eval(input('Enter state tax withholding rate :'))\n\nGrossPay = weektime * hourly\nFeferalWithholding = GrossPay * federal\nStateWithholding = GrossPay * state\nTotalDeduction = FeferalWithholding + StateWithholding\nNetPay = GrossPay - TotalDeduction\nprint('Employee Name:', name ,'\\n',\n 'Hours Worked:', weektime ,\n '\\n','Pay rate: $', hourly ,\n '\\n','Gross pay: 4',GrossPay , \n '\\n','Feferal Withholding(20.0%): $',FeferalWithholding , \n '\\n','State Withholding(9.0%): $',StateWithholding , \n '\\n','Employee Name:', name ,'\\n',\n 'Total Deduction: $',TotalDeduction , \n '\\n','Net Pay:', NetPay )", "Enter employee's name :Smith\nEnter number of hours worked in a week:10\nEnter hourly pay rate:9.75\nEnter federal tax withholding rate:0.20\nEnter state tax withholding rate :0.09\nEmployee Name: Smith \n Hours Worked: 10 \n Pay rate: $ 9.75 \n Gross pay: 4 97.5 \n Feferal Withholding(20.0%): $ 19.5 \n State Withholding(9.0%): $ 8.775 \n Employee Name: Smith \n Total Deduction: $ 28.275 \n Net Pay: 69.225\n" ] ], [ [ "- 7\n<img src=\"../Photo/32.png\"><img>", "_____no_output_____" ] ], [ [ "num = eval(input('Enter an integer:') )\na = num // 1000\nb = num % 1000 // 100\nc = num %100 // 10\nd = num %10\nprint(d,end='')\nprint(c,end='')\nprint(b,end='')\nprint (a,end='')\n#print('The reversed number is %d' %(num))", "Enter an integer:3125\n5213" ] ], [ [ "- 8 进阶:\n> 加密一串文本,并将解密后的文件写入本地保存", "_____no_output_____" ] ], [ [ "#邮箱加密\na = '[email protected]'\nres3 = ''\nfor i in a:\n res = ord(i) + 1\n res2 = chr(res)\n res3 = res3 + res2\nprint('加密信息:',res3)\n\nres4 = ''\nfor i in res3:\n res = ord(i)-1\n res2 = chr(res)\n res4 = res4 + res2\nprint('解密信息:',res4)\n", "加密信息: 2686882714Arr/dpn\n解密信息: [email protected]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a70008a3893b3bd6d02d0bfe37b9ee1acf2a3c1
254,534
ipynb
Jupyter Notebook
ML models/Models_Runners_Injury_Prediction.ipynb
spyrosviz/Injury_Prediction_MidLong_Distance_Runners
66a525513d223642b07faf377b164907771f64ac
[ "MIT" ]
1
2022-01-13T11:33:10.000Z
2022-01-13T11:33:10.000Z
ML models/Models_Runners_Injury_Prediction.ipynb
spyrosviz/Injury_Prediction_MidLong_Distance_Runners
66a525513d223642b07faf377b164907771f64ac
[ "MIT" ]
null
null
null
ML models/Models_Runners_Injury_Prediction.ipynb
spyrosviz/Injury_Prediction_MidLong_Distance_Runners
66a525513d223642b07faf377b164907771f64ac
[ "MIT" ]
null
null
null
165.604424
32,034
0.835959
[ [ [ "<a href=\"https://colab.research.google.com/github/spyrosviz/Injury_Prediction_MidLong_Distance_Runners/blob/main/ML%20models/Models_Runners_Injury_Prediction.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "# Import Libraries\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.ensemble import GradientBoostingClassifier, BaggingClassifier\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV, StratifiedKFold\nfrom sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import MinMaxScaler\nimport itertools\nfrom collections import Counter\n!pip install imbalanced-learn\nfrom imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN\nfrom imblearn.under_sampling import RandomUnderSampler, TomekLinks\nimport tensorflow as tf", "Requirement already satisfied: imbalanced-learn in /usr/local/lib/python3.7/dist-packages (0.8.1)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.7/dist-packages (from imbalanced-learn) (1.19.5)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from imbalanced-learn) (1.1.0)\nRequirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.7/dist-packages (from imbalanced-learn) (1.4.1)\nRequirement already satisfied: scikit-learn>=0.24 in /usr/local/lib/python3.7/dist-packages (from imbalanced-learn) (1.0.2)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.24->imbalanced-learn) (3.0.0)\n" ] ], [ [ "**Use the following split if you want to hold out a specified number of athletes for train and test set. The last 10 athletes instances were kept for test set.**", "_____no_output_____" ] ], [ [ "'''Import data and hold out a specified test set'''\n\n# Import data from excel, select the first 63 athletes events for train set and the last 10 athletes for test set\ndf = pd.read_excel(r'/content/drive/MyDrive/Runners_Injury_MLproject/Daily_Injury_Clean.xlsx',index_col = [0])\ndf_train = df[df['Athlete ID'] <= 63]\ndf_train.drop(['Date','Athlete ID'],axis=1,inplace=True)\ndf_test = df[df['Athlete ID'] > 63]\ndf_test.drop(['Date','Athlete ID'],axis=1,inplace=True)\n\n# Check if df_train has any equal instances with df_test. We expect to return an empty dataframe if they do not share common instances\nprint(df_train[df_test.eq(df_train).all(axis=1)==True])\n\n''' Set y '''\ny_train = df_train['injury'].values\ny_test = df_test['injury'].values\n\n''' Set all columns for X except injury which is the target'''\nX_train = df_train.drop(['injury'],axis=1).values\nX_test = df_test.drop(['injury'],axis=1).values\ncolumn_names = df_train.drop(['injury'],axis=1).columns\n\n#selected_features = ['Total Weekly Distance','Acute Load','Strain','Monotony','injury']\n\n''' Set X after dropping selected features '''\n#X_test = df_test.drop(selected_features,axis=1).values\n#X_train = df_train.drop(selected_features,axis=1).values\n#column_names = df_train.drop(selected_features,axis=1).columns\n\n''' Set selected features as X '''\n#X_train = df_train.loc[:,selected_features].values\n#X_test = df_test.loc[:,selected_features].values\n#column_names = df_train.loc[:,selected_features].columns\n\n# Print dataframes shapes and respective number of healthy and injury events\nprint(column_names)\nprint(Counter(df_train['injury'].values))\nprint(Counter(df_test['injury'].values))", "Empty DataFrame\nColumns: [nr. sessions, total km, km Z3-4, km Z5-T1-T2, km sprinting, strength training, hours alternative, perceived exertion, perceived trainingSuccess, perceived recovery, nr. sessions.1, total km.1, km Z3-4.1, km Z5-T1-T2.1, km sprinting.1, strength training.1, hours alternative.1, perceived exertion.1, perceived trainingSuccess.1, perceived recovery.1, nr. sessions.2, total km.2, km Z3-4.2, km Z5-T1-T2.2, km sprinting.2, strength training.2, hours alternative.2, perceived exertion.2, perceived trainingSuccess.2, perceived recovery.2, nr. sessions.3, total km.3, km Z3-4.3, km Z5-T1-T2.3, km sprinting.3, strength training.3, hours alternative.3, perceived exertion.3, perceived trainingSuccess.3, perceived recovery.3, nr. sessions.4, total km.4, km Z3-4.4, km Z5-T1-T2.4, km sprinting.4, strength training.4, hours alternative.4, perceived exertion.4, perceived trainingSuccess.4, perceived recovery.4, nr. sessions.5, total km.5, km Z3-4.5, km Z5-T1-T2.5, km sprinting.5, strength training.5, hours alternative.5, perceived exertion.5, perceived trainingSuccess.5, perceived recovery.5, nr. sessions.6, total km.6, km Z3-4.6, km Z5-T1-T2.6, km sprinting.6, strength training.6, hours alternative.6, perceived exertion.6, perceived trainingSuccess.6, perceived recovery.6, injury]\nIndex: []\nIndex(['nr. sessions', 'total km', 'km Z3-4', 'km Z5-T1-T2', 'km sprinting',\n 'strength training', 'hours alternative', 'perceived exertion',\n 'perceived trainingSuccess', 'perceived recovery', 'nr. sessions.1',\n 'total km.1', 'km Z3-4.1', 'km Z5-T1-T2.1', 'km sprinting.1',\n 'strength training.1', 'hours alternative.1', 'perceived exertion.1',\n 'perceived trainingSuccess.1', 'perceived recovery.1', 'nr. sessions.2',\n 'total km.2', 'km Z3-4.2', 'km Z5-T1-T2.2', 'km sprinting.2',\n 'strength training.2', 'hours alternative.2', 'perceived exertion.2',\n 'perceived trainingSuccess.2', 'perceived recovery.2', 'nr. sessions.3',\n 'total km.3', 'km Z3-4.3', 'km Z5-T1-T2.3', 'km sprinting.3',\n 'strength training.3', 'hours alternative.3', 'perceived exertion.3',\n 'perceived trainingSuccess.3', 'perceived recovery.3', 'nr. sessions.4',\n 'total km.4', 'km Z3-4.4', 'km Z5-T1-T2.4', 'km sprinting.4',\n 'strength training.4', 'hours alternative.4', 'perceived exertion.4',\n 'perceived trainingSuccess.4', 'perceived recovery.4', 'nr. sessions.5',\n 'total km.5', 'km Z3-4.5', 'km Z5-T1-T2.5', 'km sprinting.5',\n 'strength training.5', 'hours alternative.5', 'perceived exertion.5',\n 'perceived trainingSuccess.5', 'perceived recovery.5', 'nr. sessions.6',\n 'total km.6', 'km Z3-4.6', 'km Z5-T1-T2.6', 'km sprinting.6',\n 'strength training.6', 'hours alternative.6', 'perceived exertion.6',\n 'perceived trainingSuccess.6', 'perceived recovery.6'],\n dtype='object')\nCounter({0: 39189, 1: 533})\nCounter({0: 2994, 1: 50})\n" ] ], [ [ "**Use the following dataset split if you want to hold out 2000 random healthy instancies and 50 random injury instancies**", "_____no_output_____" ] ], [ [ "'''Import data and holdout a random test set'''\n\n# Import data from excel and drop Date and Athlete ID column\ndf = pd.read_excel(r'/content/drive/MyDrive/Runners_Injury_MLproject/run_injur_with_acuteloads.xlsx',index_col = [0])\n\n# Hold out a test set with 100 random injury events and 100 random healthy events \ndf_copy = df.copy()\ndf_copy.drop(['Date','Athlete ID'],axis=1,inplace=True)\ndf_inj = df_copy[df_copy['injury']==1].sample(50,random_state=42)\ndf_uninj = df_copy[df_copy['injury']==0].sample(2000,random_state=42)\ndf_test = pd.concat([df_inj,df_uninj],ignore_index=True)\n\n# Drop the test set from the original dataframe\ndf_train = pd.concat([df_copy,df_test],ignore_index=True).drop_duplicates(keep=False)\n\n# Set X and y\ny_train = df_train['injury'].values\ny_test = df_test['injury'].values\nselected_features = ['Total Weekly Distance','Acute Load','Strain','Monotony','injury']\nX_test = df_test.drop(selected_features,axis=1).values\nX_train = df_train.drop(selected_features,axis=1).values\n#X_train = df_train.loc[:,selected_features].values\n#X_test = df_test.loc[:,selected_features].values\n\n# Check if df_train has any equal instances with df_test. We expect to return an empty dataframe if they do not share common instances\n# Print dataframe shapes and respective number of healthy and injury events\nprint(df_train[df_test.eq(df_train).all(axis=1)==True])\n#print(df_train.drop(['Acute Load','Total Weekly Distance','Monotony','Strain','injury'],axis=1).columns)\nprint(df_train.shape)\nprint(Counter(df_train['injury'].values))\nprint(df_test.shape)\nprint(Counter(df_test['injury'].values))", "Empty DataFrame\nColumns: [nr. sessions, total km, km Z3-4, km Z5-T1-T2, km sprinting, strength training, hours alternative, perceived exertion, perceived trainingSuccess, perceived recovery, nr. sessions.1, total km.1, km Z3-4.1, km Z5-T1-T2.1, km sprinting.1, strength training.1, hours alternative.1, perceived exertion.1, perceived trainingSuccess.1, perceived recovery.1, nr. sessions.2, total km.2, km Z3-4.2, km Z5-T1-T2.2, km sprinting.2, strength training.2, hours alternative.2, perceived exertion.2, perceived trainingSuccess.2, perceived recovery.2, nr. sessions.3, total km.3, km Z3-4.3, km Z5-T1-T2.3, km sprinting.3, strength training.3, hours alternative.3, perceived exertion.3, perceived trainingSuccess.3, perceived recovery.3, nr. sessions.4, total km.4, km Z3-4.4, km Z5-T1-T2.4, km sprinting.4, strength training.4, hours alternative.4, perceived exertion.4, perceived trainingSuccess.4, perceived recovery.4, nr. sessions.5, total km.5, km Z3-4.5, km Z5-T1-T2.5, km sprinting.5, strength training.5, hours alternative.5, perceived exertion.5, perceived trainingSuccess.5, perceived recovery.5, nr. sessions.6, total km.6, km Z3-4.6, km Z5-T1-T2.6, km sprinting.6, strength training.6, hours alternative.6, perceived exertion.6, perceived trainingSuccess.6, perceived recovery.6, injury, Acute Load, Total Weekly Distance, Monotony, Strain]\nIndex: []\n(40327, 75)\nCounter({0: 39768, 1: 559})\n(120, 75)\nCounter({0: 100, 1: 20})\n" ], [ "class_imbalance = len(df_train[df_train['injury']==1].values)/len(df_train[df_train['injury']==0].values)\nprint(f'Class imbalance is {class_imbalance}')", "Class imbalance is 0.01360075531399117\n" ] ], [ [ "**Write a function to pretiffy confusion matrix results.\nThe function was found from Daniel Bourke's Tensorflow course**", "_____no_output_____" ] ], [ [ "def plot_confusion_matrix(y_true,y_pred,class_names,figsize=(10,10),text_size=15):\n\n # create the confusion matrix\n cm = confusion_matrix(y_true,y_pred)\n cm_norm = cm.astype('float') / cm.sum(axis=1)[:,np.newaxis] # normalize confusion matrix\n n_classes = cm.shape[0]\n\n fig, ax = plt.subplots(figsize=figsize)\n matrix_plot = ax.matshow(cm, cmap=plt.cm.Blues)\n fig.colorbar(matrix_plot)\n\n # Set labels to be classes\n if class_names:\n labels = class_names\n else:\n labels = np.arange(cm.shape[0])\n\n # Label the axes\n ax.set(title='Confusion Matrix',\n xlabel = 'Predicted Label',\n ylabel = 'True Label',\n xticks = np.arange(n_classes),\n yticks = np.arange(n_classes),\n xticklabels = labels,\n yticklabels = labels)\n\n # Set x axis labels to bottom\n ax.xaxis.set_label_position('bottom')\n ax.xaxis.tick_bottom()\n\n # Adjust label size\n ax.yaxis.label.set_size(text_size)\n ax.xaxis.label.set_size(text_size)\n ax.title.set_size(text_size)\n\n # Set threshold for different colors\n threshold = (cm.max() + cm.min()) / 2\n\n # Plot the text on each cell\n for i, j in itertools.product(range(cm.shape[0]),range(cm.shape[1])):\n plt.text(j,i,f'{cm[i,j]} ({cm_norm[i,j] * 100:.1f}%)',\n horizontalalignment='center',\n color='white' if cm[i,j] > threshold else 'black',\n size = text_size)", "_____no_output_____" ] ], [ [ "Because there is very high class imbalance in the injury variable that we want to predict, we will try the following techniques to overcome this problem and see what works best:\n* **Weighted XGBoost**\n* **XGBoost with Smote algorithm for Resampling**\n* **XGBoost model with Random Resampling**\n* **Bagging XGBoost model with Random Resampling**\n* **Neural Networks model with Random Undersampling**", "_____no_output_____" ] ], [ [ "# Set X and y with different resampling methods\n\n'''SMOTE algorithm for oversampling 15% ratio and random undersampling 1-1 ratio'''\n# Oversample the minority class to have number of instances equal with the 15% of the majority class\nsmote = SMOTE(sampling_strategy=0.15,random_state=1)\nX_sm,y_sm = smote.fit_resample(X_train,y_train)\n\n# Downsample the majority class to have number of instances equal with the minority class\nundersamp = RandomUnderSampler(sampling_strategy=1,random_state=1)\nX_smus,y_smus = undersamp.fit_resample(X_sm,y_sm)\n\n'''Random oversampling 10% ratio and random undersampling 1-1 ratio'''\n# Random over sampler for minority class to 1:10 class ratio\nros = RandomOverSampler(sampling_strategy=0.1,random_state=21)\nX_ros,y_ros = ros.fit_resample(X_train,y_train)\n\n# Undersample the majority class to have number of instances equal with the minority class\nundersamp = RandomUnderSampler(sampling_strategy=1,random_state=21)\nX_rosus,y_rosus = undersamp.fit_resample(X_ros,y_ros)\n\n'''Random undersampling 1-1 ratio'''\n# Random under sampler for majority class to 1:1 class ratio\nrus = RandomUnderSampler(sampling_strategy=1,random_state=21)\nX_rus,y_rus = rus.fit_resample(X_train,y_train)\n\n'''Tomek Links Undersampling'''\ntmkl = TomekLinks()\nX_tmk, y_tmk = tmkl.fit_resample(X_train,y_train)\n\n'''ADASYN for oversampling 15% ratio and random undersampler 1-1 ratio'''\n# ADASYN oversample minority class to 15% of the majority class\nadasyn = ADASYN(sampling_strategy=0.15,random_state=21)\nX_ada, y_ada = adasyn.fit_resample(X_train,y_train)\n\n# Random undersample the majority class to have equal instances with minority class\nadarus = RandomUnderSampler(sampling_strategy=1,random_state=21)\nX_adarus,y_adarus = adarus.fit_resample(X_ada,y_ada)", "_____no_output_____" ], [ "# Stratify crossvalidation\n\ncv = StratifiedKFold(n_splits=5,shuffle=True,random_state=21)", "_____no_output_____" ] ], [ [ "## 1) Weighted XGBoost Model", "_____no_output_____" ] ], [ [ "'''Weighted XGBoost'''\n\n# We will use scale_pos_weight argument in xgboost algorithm which increases the error for wrong positive class prediction.\n# From xgboost documentation it's suggested that the optimal value for scale_pos_weight argument is usually around the\n# sum(negative instances)/sum(positive instances). We will use randomizedsearchcv to find optimal value\n\nxgb_weight = XGBClassifier()\nparam_grid_weight = {\"gamma\":[0.01,0.1,1,10,50,100,1000],'reg_lambda':[1,5,10,20],\n 'learning_rate':np.arange(0.01,1,0.01),'eta':np.arange(0.1,1,0.1),'scale_pos_weight':[60,70,80,90,100]}\ngscv_weight = RandomizedSearchCV(xgb_weight,param_distributions=param_grid_weight,cv=cv,scoring='roc_auc')\ngscv_weight.fit(X_train,y_train)\nprint(\"Best param is {}\".format(gscv_weight.best_params_))\nprint(\"Best score is {}\".format(gscv_weight.best_score_))\n\noptimal_gamma = gscv_weight.best_params_['gamma']\noptimal_reg_lambda = gscv_weight.best_params_['reg_lambda']\noptim_lr = gscv_weight.best_params_['learning_rate']\noptimal_eta = gscv_weight.best_params_['eta']\noptimal_scale_pos_weight = gscv_weight.best_params_['scale_pos_weight']", "Best param is {'scale_pos_weight': 90, 'reg_lambda': 20, 'learning_rate': 0.23, 'gamma': 0.01, 'eta': 0.6}\nBest score is 0.5895046129151751\n" ], [ "tuned_xgb_weight = XGBClassifier(gamma=optimal_gamma,learning_rate=optim_lr,eta=optimal_eta,reg_lambda=optimal_reg_lambda,scale_pos_weight=optimal_scale_pos_weight,\n colsample_bytree=0.5,min_child_weight=90,objective='binary:logistic',subsample=0.5)\ntuned_xgb_weight.fit(X_train,y_train,early_stopping_rounds=10,eval_metric='auc',eval_set=[(X_test,y_test)])", "[0]\tvalidation_0-auc:0.553711\nWill train until validation_0-auc hasn't improved in 10 rounds.\n[1]\tvalidation_0-auc:0.602144\n[2]\tvalidation_0-auc:0.637224\n[3]\tvalidation_0-auc:0.634626\n[4]\tvalidation_0-auc:0.617141\n[5]\tvalidation_0-auc:0.615595\n[6]\tvalidation_0-auc:0.611754\n[7]\tvalidation_0-auc:0.608607\n[8]\tvalidation_0-auc:0.609275\n[9]\tvalidation_0-auc:0.621176\n[10]\tvalidation_0-auc:0.621847\n[11]\tvalidation_0-auc:0.62831\n[12]\tvalidation_0-auc:0.640124\n[13]\tvalidation_0-auc:0.638153\n[14]\tvalidation_0-auc:0.637291\n[15]\tvalidation_0-auc:0.629035\n[16]\tvalidation_0-auc:0.627248\n[17]\tvalidation_0-auc:0.631844\n[18]\tvalidation_0-auc:0.633373\n[19]\tvalidation_0-auc:0.638343\n[20]\tvalidation_0-auc:0.650511\n[21]\tvalidation_0-auc:0.652669\n[22]\tvalidation_0-auc:0.65689\n[23]\tvalidation_0-auc:0.655568\n[24]\tvalidation_0-auc:0.660691\n[25]\tvalidation_0-auc:0.661593\n[26]\tvalidation_0-auc:0.660438\n[27]\tvalidation_0-auc:0.66168\n[28]\tvalidation_0-auc:0.664706\n[29]\tvalidation_0-auc:0.673297\n[30]\tvalidation_0-auc:0.669689\n[31]\tvalidation_0-auc:0.663056\n[32]\tvalidation_0-auc:0.670825\n[33]\tvalidation_0-auc:0.680885\n[34]\tvalidation_0-auc:0.689385\n[35]\tvalidation_0-auc:0.702438\n[36]\tvalidation_0-auc:0.699613\n[37]\tvalidation_0-auc:0.700962\n[38]\tvalidation_0-auc:0.697241\n[39]\tvalidation_0-auc:0.689973\n[40]\tvalidation_0-auc:0.688597\n[41]\tvalidation_0-auc:0.682993\n[42]\tvalidation_0-auc:0.684068\n[43]\tvalidation_0-auc:0.68815\n[44]\tvalidation_0-auc:0.692639\n[45]\tvalidation_0-auc:0.68813\nStopping. Best iteration:\n[35]\tvalidation_0-auc:0.702438\n\n" ], [ "# Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity\n\ny_pred = tuned_xgb_weight.predict(X_test)\n\nprint(f'Area under curve score is {roc_auc_score(y_test,tuned_xgb_weight.predict_proba(X_test)[:,1])}')\n\n# Compute true positives, true neagatives, false negatives and false positives\ntp = confusion_matrix(y_test,y_pred)[1,1]\ntn = confusion_matrix(y_test,y_pred)[0,0]\nfn = confusion_matrix(y_test,y_pred)[1,0]\nfp = confusion_matrix(y_test,y_pred)[0,1]\n\n# Compute sensitivity and specificity\nsensitivity = tp / (tp + fn)\nspecificity = tn / (tn + fp)\nprint(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%')\n\nplot_confusion_matrix(y_true=y_test, y_pred=y_pred, class_names=['Healthy events','Injury events'])", "Area under curve score is 0.702438209752839\nSensitivity is 76.0% and specificity is 60.120240480961925%\n" ] ], [ [ "##2) XGBoost Model with SMOTE combined with Random Undersampling", "_____no_output_____" ] ], [ [ "'''XGBoost Classifier and SMOTE (Synthetic Minority Oversampling Technique) combined with Random Undersampling'''\n\n# Check the number of instances for each class before and after resampling\nprint(Counter(y_train))\nprint(Counter(y_smus))\n\nxgb_sm = XGBClassifier()\nparam_grid_sm = {\"gamma\":[0.01,0.1,1,10,50,100,1000],'learning_rate':np.arange(0.01,1,0.01),'eta':np.arange(0.1,1,0.1),'reg_lambda':[1,5,10,20]}\ngscv_sm = RandomizedSearchCV(xgb_sm,param_distributions=param_grid_sm,cv=5,scoring='roc_auc')\ngscv_sm.fit(X_smus,y_smus)\nprint(\"Best param is {}\".format(gscv_sm.best_params_))\nprint(\"Best score is {}\".format(gscv_sm.best_score_))\n\noptimal_gamma = gscv_sm.best_params_['gamma']\noptim_lr = gscv_sm.best_params_['learning_rate']\noptimal_eta = gscv_sm.best_params_['eta']\noptimal_lambda = gscv_sm.best_params_['reg_lambda']", "Counter({0: 39189, 1: 533})\nCounter({0: 5878, 1: 5878})\nBest param is {'reg_lambda': 20, 'learning_rate': 0.7100000000000001, 'gamma': 1, 'eta': 0.4}\nBest score is 0.963961113769578\n" ], [ "tuned_xgb_sm = XGBClassifier(gamma=optimal_gamma,learning_rate=optim_lr,eta=optimal_eta,reg_lambda=optimal_lambda,subsample=0.4,\n colsample_bytree=0.6,min_child_weight=90,objective='binary:logistic')\ntuned_xgb_sm.fit(X_smus,y_smus,early_stopping_rounds=10,eval_metric='auc',eval_set=[(X_test,y_test)])", "[0]\tvalidation_0-auc:0.611192\nWill train until validation_0-auc hasn't improved in 10 rounds.\n[1]\tvalidation_0-auc:0.660758\n[2]\tvalidation_0-auc:0.661787\n[3]\tvalidation_0-auc:0.649977\n[4]\tvalidation_0-auc:0.640434\n[5]\tvalidation_0-auc:0.656112\n[6]\tvalidation_0-auc:0.649071\n[7]\tvalidation_0-auc:0.651012\n[8]\tvalidation_0-auc:0.653036\n[9]\tvalidation_0-auc:0.648056\n[10]\tvalidation_0-auc:0.662241\n[11]\tvalidation_0-auc:0.650558\n[12]\tvalidation_0-auc:0.652749\n[13]\tvalidation_0-auc:0.641199\n[14]\tvalidation_0-auc:0.614653\n[15]\tvalidation_0-auc:0.625721\n[16]\tvalidation_0-auc:0.637836\n[17]\tvalidation_0-auc:0.640922\n[18]\tvalidation_0-auc:0.642184\n[19]\tvalidation_0-auc:0.638063\n[20]\tvalidation_0-auc:0.630595\nStopping. Best iteration:\n[10]\tvalidation_0-auc:0.662241\n\n" ], [ "# Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity\n\ny_pred = tuned_xgb_sm.predict(X_test)\n\nprint(f'Area under curve score is {roc_auc_score(y_test,tuned_xgb_sm.predict_proba(X_test)[:,1])}')\n\n# Compute true positives, true neagatives, false negatives and false positives\ntp = confusion_matrix(y_test,y_pred)[1,1]\ntn = confusion_matrix(y_test,y_pred)[0,0]\nfn = confusion_matrix(y_test,y_pred)[1,0]\nfp = confusion_matrix(y_test,y_pred)[0,1]\n\n# Compute sensitivity and specificity\nsensitivity = tp / (tp + fn)\nspecificity = tn / (tn + fp)\nprint(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%')\n\nplot_confusion_matrix(y_true=y_test, y_pred=y_pred, class_names=['Healthy events','Injury events'])", "Area under curve score is 0.6622411489645958\nSensitivity is 34.0% and specificity is 81.83032732130928%\n" ] ], [ [ "## 3) XGBoost Model with Random Resampling", "_____no_output_____" ] ], [ [ "'''XGBoost Classifier and Random Undersampling'''\n\n# Check the number of instances for each class before and after resampling\nprint(Counter(y_train))\nprint(Counter(y_rosus))\n\nxgb_rus = XGBClassifier()\nparam_grid_rus = {\"gamma\":[0.01,0.1,1,10,50,100,1000],'reg_lambda':[1,5,10,20],'learning_rate':np.arange(0.01,1,0.01),'eta':np.arange(0.1,1,0.1)}\ngscv_rus = RandomizedSearchCV(xgb_rus,param_distributions=param_grid_rus,cv=5,scoring='roc_auc')\ngscv_rus.fit(X_rosus,y_rosus)\nprint(\"Best param is {}\".format(gscv_rus.best_params_))\nprint(\"Best score is {}\".format(gscv_rus.best_score_))\n\noptimal_gamma = gscv_rus.best_params_['gamma']\noptimal_reg_lambda = gscv_rus.best_params_['reg_lambda']\noptim_lr = gscv_rus.best_params_['learning_rate']\noptimal_eta = gscv_rus.best_params_['eta']", "Counter({0: 39189, 1: 533})\nCounter({0: 3918, 1: 3918})\nBest param is {'reg_lambda': 5, 'learning_rate': 0.6900000000000001, 'gamma': 1, 'eta': 0.2}\nBest score is 0.9601002712690754\n" ], [ "tuned_xgb_rus = XGBClassifier(gamma=optimal_gamma,reg_lambda=optimal_reg_lambda,learning_rate=optim_lr,eta=optimal_eta,\n colsample_bytree=0.7,min_child_weight=9,objective='binary:logistic',subsample=0.8)\ntuned_xgb_rus.fit(X_rosus,y_rosus,early_stopping_rounds=10,eval_metric='auc',eval_set=[(X_test,y_test)])", "[0]\tvalidation_0-auc:0.579669\nWill train until validation_0-auc hasn't improved in 10 rounds.\n[1]\tvalidation_0-auc:0.588874\n[2]\tvalidation_0-auc:0.546109\n[3]\tvalidation_0-auc:0.555107\n[4]\tvalidation_0-auc:0.570431\n[5]\tvalidation_0-auc:0.585691\n[6]\tvalidation_0-auc:0.631974\n[7]\tvalidation_0-auc:0.642722\n[8]\tvalidation_0-auc:0.659005\n[9]\tvalidation_0-auc:0.658353\n[10]\tvalidation_0-auc:0.657468\n[11]\tvalidation_0-auc:0.664315\n[12]\tvalidation_0-auc:0.631533\n[13]\tvalidation_0-auc:0.623631\n[14]\tvalidation_0-auc:0.601069\n[15]\tvalidation_0-auc:0.592779\n[16]\tvalidation_0-auc:0.589365\n[17]\tvalidation_0-auc:0.59849\n[18]\tvalidation_0-auc:0.598851\n[19]\tvalidation_0-auc:0.590935\n[20]\tvalidation_0-auc:0.583714\n[21]\tvalidation_0-auc:0.577822\nStopping. Best iteration:\n[11]\tvalidation_0-auc:0.664315\n\n" ], [ "# Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity\n\ny_pred = tuned_xgb_rus.predict(X_test)\n\nprint(f'Area under curve score is {roc_auc_score(y_test,tuned_xgb_rus.predict_proba(X_test)[:,1])}')\n\n# Compute true positives, true neagatives, false negatives and false positives\ntp = confusion_matrix(y_test,y_pred)[1,1]\ntn = confusion_matrix(y_test,y_pred)[0,0]\nfn = confusion_matrix(y_test,y_pred)[1,0]\nfp = confusion_matrix(y_test,y_pred)[0,1]\n\n# Compute sensitivity and specificity\nsensitivity = tp / (tp + fn)\nspecificity = tn / (tn + fp)\nprint(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%')\n\nplot_confusion_matrix(y_true=y_test, y_pred=y_pred, class_names=['Healthy events','Injury events'])", "Area under curve score is 0.664315297261189\nSensitivity is 60.0% and specificity is 60.52104208416834%\n" ] ], [ [ "## 4) Bagging Model with XGBoost base estimators and Random Resampling", "_____no_output_____" ] ], [ [ "'''Bagging Classifier with XGBoost base estimators and Random Undersampling with combined Oversampling'''\n\n# Check the number of instances for each class before and after resampling\nprint(Counter(y_train))\nprint(Counter(y_rosus))\n\nbase_est = XGBClassifier(gamma=optimal_gamma,reg_lambda=optimal_reg_lambda,learning_rate=optim_lr,eta=optimal_eta,\n colsample_bytree=0.6,min_child_weight=90,objective='binary:logistic',subsample=0.8,n_estimators=11)\n\n# XGBoost base classifier\n#base_est = XGBClassifier(n_estimators=512,learning_rate=0.01,max_depth=3)\n\n# Bagging XGBoost Classifier\nbagg = BaggingClassifier(base_estimator=base_est,n_estimators=9,max_samples=2048,random_state=21)\n\n# Platt's Scaling to get probabilities outputs\ncalib_clf = CalibratedClassifierCV(bagg,cv=5)", "Counter({0: 39189, 1: 533})\nCounter({0: 3918, 1: 3918})\n" ], [ "# Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity\n# You can switch threshold prob in order to bias sensitivity at the cost of specificity. It is set to default 0.5\ncalib_clf.fit(X_rosus,y_rosus)\ny_pred_calib = calib_clf.predict_proba(X_test)\nthreshold_prob = 0.5\ny_pred = []\nfor y_hat in y_pred_calib:\n if y_hat[1] > threshold_prob:\n y_pred.append(1)\n else:\n y_pred.append(0)\n\nprint(f'Area under curve score is {roc_auc_score(y_test,calib_clf.predict_proba(X_test)[:,1])}')\n\n# Compute true positives, true neagatives, false negatives and false positives\ntp = confusion_matrix(y_test,np.array(y_pred))[1,1]\ntn = confusion_matrix(y_test,np.array(y_pred))[0,0]\nfn = confusion_matrix(y_test,np.array(y_pred))[1,0]\nfp = confusion_matrix(y_test,np.array(y_pred))[0,1]\n\n# Compute sensitivity and specificity\nsensitivity = tp / (tp + fn)\nspecificity = tn / (tn + fp)\nprint(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%')\n\n# Plot confusion matrix\nplot_confusion_matrix(y_true=y_test, y_pred=np.array(y_pred), class_names=['Healthy events','Injury events'])", "Area under curve score is 0.6407682030728122\nSensitivity is 68.0% and specificity is 46.0253841015364%\n" ] ], [ [ "## 5) Neural Networks Model", "_____no_output_____" ] ], [ [ "'''Neural Networks Model'''\n\n# Check the number of instances for each class before and after resampling\nprint(Counter(y_train))\nprint(Counter(y_rus))\n\n# Scale X data\nX_scaled_rus = MinMaxScaler().fit_transform(X_rus)\nX_scaled_test = MinMaxScaler().fit_transform(X_test)\n\n# set random seed for reproducibility\n\ntf.random.set_seed(24)\n\n# create model with 9 hidden layers with 50 neurons each and 1 output layer\n\nnn_model = tf.keras.Sequential([tf.keras.layers.Dense(128,activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(32,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(32,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1,activation=\"sigmoid\")\n])\n\n# compile model\n\nnn_model.compile(loss=\"binary_crossentropy\",\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.002),\n metrics=['AUC'])\n\n# set callback to stop after 10 epochs if model doesn't improve and fit training data\n\ncallback = tf.keras.callbacks.EarlyStopping(monitor='loss',patience=3)\nhistory = nn_model.fit(X_scaled_rus,y_rus,epochs=10,batch_size=32,callbacks=[callback])\n\n# Evaluate model performance on test set, with AUC, confusion matrix, sensitivity and specificity\n\ny_prob_pred = nn_model.predict(X_scaled_test)\ny_pred = []\nfor i in y_prob_pred:\n if i <=0.5:\n y_pred.append(0)\n else:\n y_pred.append(1)\n\ny_pred = np.array(y_pred)\nprint(y_pred[y_pred>1])\n\n# Compute true positives, true neagatives, false negatives and false positives\ntp = confusion_matrix(y_test,np.array(y_pred))[1,1]\ntn = confusion_matrix(y_test,np.array(y_pred))[0,0]\nfn = confusion_matrix(y_test,np.array(y_pred))[1,0]\nfp = confusion_matrix(y_test,np.array(y_pred))[0,1]\n\n# Compute sensitivity and specificity\nsensitivity = tp / (tp + fn)\nspecificity = tn / (tn + fp)\nprint(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%')\n\n# Plot confusion matrix\nplot_confusion_matrix(y_true=y_test, y_pred=np.array(y_pred), class_names=['Healthy events','Injury events'])\n\n# evaluate the model\n\nprint(f'Area Under Curve is {nn_model.evaluate(X_scaled_test,y_test)[1]}')", "Counter({0: 39189, 1: 533})\nCounter({0: 533, 1: 533})\nEpoch 1/10\n34/34 [==============================] - 2s 5ms/step - loss: 0.6888 - auc: 0.5756\nEpoch 2/10\n34/34 [==============================] - 0s 4ms/step - loss: 0.6676 - auc: 0.6319\nEpoch 3/10\n34/34 [==============================] - 0s 4ms/step - loss: 0.6598 - auc: 0.6603\nEpoch 4/10\n34/34 [==============================] - 0s 5ms/step - loss: 0.6473 - auc: 0.6753\nEpoch 5/10\n34/34 [==============================] - 0s 4ms/step - loss: 0.6260 - auc: 0.7046\nEpoch 6/10\n34/34 [==============================] - 0s 4ms/step - loss: 0.6171 - auc: 0.7151\nEpoch 7/10\n34/34 [==============================] - 0s 5ms/step - loss: 0.5850 - auc: 0.7570\nEpoch 8/10\n34/34 [==============================] - 0s 4ms/step - loss: 0.5874 - auc: 0.7611\nEpoch 9/10\n34/34 [==============================] - 0s 5ms/step - loss: 0.5561 - auc: 0.7883\nEpoch 10/10\n34/34 [==============================] - 0s 4ms/step - loss: 0.5402 - auc: 0.8036\n[]\nSensitivity is 76.0% and specificity is 43.7875751503006%\n96/96 [==============================] - 1s 2ms/step - loss: 0.9479 - auc: 0.5771\nArea Under Curve is 0.5771442651748657\n" ], [ "'''Find optimal Learning Rate for nn_model'''\n\n# set random seed for reproducibility\n\ntf.random.set_seed(24)\n\n# create model with 2 hidden layers and 1 output layer\n\nnn_model = tf.keras.Sequential([tf.keras.layers.Dense(128,activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(32,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(32,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1,activation=\"sigmoid\")\n])\n\n# compile model\n\nnn_model.compile(loss=\"binary_crossentropy\",\n optimizer=tf.keras.optimizers.Adam(),\n metrics=[\"AUC\"])\n\n# set callback to stop after 5 epochs if model doesn't improve and fit training data\n\nlr_scheduler = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 1e-4 * 10 ** (epoch/20))\nhistory = nn_model.fit(X_scaled_rus,y_rus,epochs=30,callbacks=[lr_scheduler])\n\n# plot accuracy vs learning rate to find optimal learning rate\n\nplt.figure(figsize=[10,10])\nplt.semilogx(1e-4 * (10 ** (tf.range(30)/20)),history.history[\"loss\"])\nplt.ylabel(\"Loss\")\nplt.title(\"Learning Rate vs Loss\")\nplt.show()", "Epoch 1/30\n34/34 [==============================] - 1s 4ms/step - loss: 0.6922 - auc: 0.5587 - lr: 1.0000e-04\nEpoch 2/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.6902 - auc: 0.6004 - lr: 1.1220e-04\nEpoch 3/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.6873 - auc: 0.6195 - lr: 1.2589e-04\nEpoch 4/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.6832 - auc: 0.6329 - lr: 1.4125e-04\nEpoch 5/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.6775 - auc: 0.6332 - lr: 1.5849e-04\nEpoch 6/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.6719 - auc: 0.6248 - lr: 1.7783e-04\nEpoch 7/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.6571 - auc: 0.6605 - lr: 1.9953e-04\nEpoch 8/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.6502 - auc: 0.6650 - lr: 2.2387e-04\nEpoch 9/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.6382 - auc: 0.6910 - lr: 2.5119e-04\nEpoch 10/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.6376 - auc: 0.6889 - lr: 2.8184e-04\nEpoch 11/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.6175 - auc: 0.7164 - lr: 3.1623e-04\nEpoch 12/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.6024 - auc: 0.7362 - lr: 3.5481e-04\nEpoch 13/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.5992 - auc: 0.7376 - lr: 3.9811e-04\nEpoch 14/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.5812 - auc: 0.7607 - lr: 4.4668e-04\nEpoch 15/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.5653 - auc: 0.7757 - lr: 5.0119e-04\nEpoch 16/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.5378 - auc: 0.8049 - lr: 5.6234e-04\nEpoch 17/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.5564 - auc: 0.7892 - lr: 6.3096e-04\nEpoch 18/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.5017 - auc: 0.8349 - lr: 7.0795e-04\nEpoch 19/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.5116 - auc: 0.8278 - lr: 7.9433e-04\nEpoch 20/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.5240 - auc: 0.8161 - lr: 8.9125e-04\nEpoch 21/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.4433 - auc: 0.8761 - lr: 0.0010\nEpoch 22/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.4512 - auc: 0.8694 - lr: 0.0011\nEpoch 23/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.4082 - auc: 0.8970 - lr: 0.0013\nEpoch 24/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.4413 - auc: 0.8749 - lr: 0.0014\nEpoch 25/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.4136 - auc: 0.8933 - lr: 0.0016\nEpoch 26/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.4046 - auc: 0.8969 - lr: 0.0018\nEpoch 27/30\n34/34 [==============================] - 0s 5ms/step - loss: 0.3550 - auc: 0.9217 - lr: 0.0020\nEpoch 28/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.3721 - auc: 0.9141 - lr: 0.0022\nEpoch 29/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.3717 - auc: 0.9139 - lr: 0.0025\nEpoch 30/30\n34/34 [==============================] - 0s 4ms/step - loss: 0.4066 - auc: 0.8946 - lr: 0.0028\n" ], [ "'''Crossvalidation on nn_model'''\n\nfrom keras.wrappers.scikit_learn import KerasClassifier\n\ntf.random.set_seed(24)\n\ndef create_nn_model():\n\n # create model with 2 hidden layers and 1 output layer\n\n nn_model = tf.keras.Sequential([tf.keras.layers.Dense(128,activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(128,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(64,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(32,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(32,activation=\"relu\"),\n #tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1,activation=\"sigmoid\")\n ])\n\n # compile model\n\n nn_model.compile(loss=\"binary_crossentropy\",\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.002),\n metrics=[\"AUC\"])\n \n return nn_model\n\n\nneural_network = KerasClassifier(build_fn=create_nn_model, \n epochs=10)\n\n# Evaluate neural network using 5-fold cross-validation\ncv = StratifiedKFold(n_splits=5,shuffle=True,random_state=1)\ncross_val_score(neural_network, X_scaled_rus, y_rus, scoring='roc_auc', cv=cv)", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:42: DeprecationWarning: KerasClassifier is deprecated, use Sci-Keras (https://github.com/adriangb/scikeras) instead.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a700aa91ef158e219c56f37c271f9b73c0e456a
3,773
ipynb
Jupyter Notebook
5-sem/multimedia-processing/exercises/esercitazione-1/Esercitazione 1.ipynb
denysvitali/supsi-i3b-sem1
d6b3b1b24749101b01357d1bec34f67bd31f56ad
[ "MIT" ]
2
2019-01-10T19:44:07.000Z
2019-02-10T17:47:49.000Z
5-sem/multimedia-processing/exercises/esercitazione-1/Esercitazione 1.ipynb
denysvitali/supsi-i3b-sem1
d6b3b1b24749101b01357d1bec34f67bd31f56ad
[ "MIT" ]
null
null
null
5-sem/multimedia-processing/exercises/esercitazione-1/Esercitazione 1.ipynb
denysvitali/supsi-i3b-sem1
d6b3b1b24749101b01357d1bec34f67bd31f56ad
[ "MIT" ]
null
null
null
47.1625
429
0.635303
[ [ [ "# Esercitazione 1\n## Esercizio\n\n### Analisi del segnale\nApriamo il segnale da analizzare con [Audacity](https://www.audacityteam.org/). \nAscoltandolo possiamo chiaramente riconoscere una sequenza di tasti premuti su un tastierino telefonico, anche conosciuto come [DTMF](https://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling) \n \n#### Analisi dello spettro di frequenze\nAnalizzandone lo spettro di frequenze, possiamo notare come la sequenza appare in modo relativamente chiaro. Possiamo altresì notare che il segnale contiene molto rumore.\n\n![Spettro di Frequenze](spectrogram-1.png) \n \n#### Pulizia del segnale\nPossiamo ora procedere alla rimozione del rumore di fondo, prendendo un campione di rumore ed utilizzando la funzionalità di *Noise Reduction* offerta da Audacity.\n\n![Riduzione del rumore - campionamento](noise-reduction-1.png) \n\nPrendiamo dapprima un campione del rumore, selezioniamo `Effect => Noise Reduction => Get Noise Profile` per ottenere un profilo del rumore. Selezioniamo ora la traccia intera, riapriamo il menù di riduzione del rumore ed impostiamo i parametri in modo da ottenere solo i toni DTMF quando clicchiamo su \"Preview\" (nel mio caso, Noise Reduction impostata a 48dB, Sensitivity impostata a 19.50 e frequency smoothing a 0)\n\nOtteniamo così uno spettrogramma decisamente più pulito, e possiamo quindi procedere all'analisi: \n \n![Spettrogramma \"pulito\"](spectrogram-2.png)\n \n \n#### Analisi dei toni\nPossiamo ora procedere all'analisi dei toni nel modo seguente: selezioniamo una parte del segnale nel quale è visibilmente presente un picco di una o più frequenze (parte più rossa nello spettrogramma precedente). Dopodiché usiamo lo strumento \"Plot Spectrum\" in \"Analyze\" e ricaviamo le frequenze analizzando i picchi: \n \n![Plot Spectrum](plot-spectrum.png)\n \n \nIn questo caso, possiamo ricavare le due frequenze dominanti, che risultano essere 853 Hz (-19.9 dB) e 1477 Hz (-18.2 db). \nConfrontandole con la tabella DTMF, possiamo dire con certezza che il primo tono rappresenta un 9.\n\n| | 1209 Hz | 1336 Hz | 1477 Hz | 1644 Hz |\n|-|------------|------------|------------|-------------|\n|697 Hz | 1 | 2 | 3 | A |\n|770 Hz | 4 | 5 | 6 | B |\n|852 Hz | 7 | 8 | **9** | C |\n|941 Hz | * | 0 | # | D |\n\nPossiamo ricavare allo stesso modo il resto della sequenza, che risulta essere: \n9, 3, 3, 5, 5, 5, 5, 5, 5, ?, 3, 6, 6, 6, 6, 6, 3, 3 \nSe però consideriamo singoli i numeri, allora la sequenza diventa: \n9, 3, 5, 5, ?, 3, 6, 6, 3\n\n### Ipotesi di risoluzione del problema\nPer risolvere il problema, si potrebbe analizzare il file wav tramite un software, effettuare l'FFT e ricavare il picco di frequenze, confrontandolo alla lista di frequenze ammesse dal DTMF. Se risulta una combinazione valida, possiamo salvarla, altrimenti la ignoriamo.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
4a701df4e89e6fce89376b2c0640d43fe28ecce0
154,876
ipynb
Jupyter Notebook
demos/node-classification/keras-node2vec-node-classification.ipynb
lyubov888L/stellargraph
cc15f176c6658d122d30cf7af3e08d3e139b3974
[ "Apache-2.0" ]
null
null
null
demos/node-classification/keras-node2vec-node-classification.ipynb
lyubov888L/stellargraph
cc15f176c6658d122d30cf7af3e08d3e139b3974
[ "Apache-2.0" ]
null
null
null
demos/node-classification/keras-node2vec-node-classification.ipynb
lyubov888L/stellargraph
cc15f176c6658d122d30cf7af3e08d3e139b3974
[ "Apache-2.0" ]
null
null
null
220.307255
132,444
0.91632
[ [ [ "# Node classification with Node2Vec using Stellargraph components", "_____no_output_____" ], [ "<table><tr><td>Run the latest release of this notebook:</td><td><a href=\"https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/node-classification/keras-node2vec-node-classification.ipynb\" alt=\"Open In Binder\" target=\"_parent\"><img src=\"https://mybinder.org/badge_logo.svg\"/></a></td><td><a href=\"https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/node-classification/keras-node2vec-node-classification.ipynb\" alt=\"Open In Colab\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\"/></a></td></tr></table>", "_____no_output_____" ], [ "This example demonstrates how to perform node classification with Node2Vec using the Stellargraph components. This uses a keras implementation of Node2Vec available in stellargraph instead of the reference implementation provided by ``gensim``.\n\n<a name=\"refs\"></a>\n**References**\n\n[1] Node2Vec: Scalable Feature Learning for Networks. A. Grover, J. Leskovec. ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), 2016. ([link](https://snap.stanford.edu/node2vec/))\n\n[2] Distributed representations of words and phrases and their compositionality. T. Mikolov, I. Sutskever, K. Chen, G. S. Corrado, and J. Dean. In Advances in Neural Information Processing Systems (NIPS), pp. 3111-3119, 2013. ([link](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf))\n\n[3] word2vec Parameter Learning Explained. X. Rong. arXiv preprint arXiv:1411.2738. 2014 Nov 11. ([link](https://arxiv.org/pdf/1411.2738.pdf))", "_____no_output_____" ], [ "## Introduction\nFollowing word2vec [2,3], for each (``target``,``context``) node pair $(v_i,v_j)$ collected from random walks, we learn the representation for the target node $v_i$ by using it to predict the existence of context node $v_j$, with the following three-layer neural network.", "_____no_output_____" ], [ "![](word2vec-illustration.png)", "_____no_output_____" ], [ "Node $v_i$'s representation in the hidden layer is obtained by multiplying $v_i$'s one-hot representation in the input layer with the input-to-hidden weight matrix $W_{in}$, which is equivalent to look up the $i$th row of input-to-hidden weight matrix $W_{in}$. The existence probability of each node conditioned on node $v_i$ is outputted in the output layer, which is obtained by multiplying $v_i$'s hidden-layer representation with the hidden-to-out weight matrix $W_{out}$ followed by a softmax activation. To capture the ``target-context`` relation between $v_i$ and $v_j$, we need to maximize the probability $\\mathrm{P}(v_j|v_i)$. However, computing $\\mathrm{P}(v_j|v_i)$ is time consuming, which involves the matrix multiplication between $v_i$'s hidden-layer representation and the hidden-to-out weight matrix $W_{out}$. ", "_____no_output_____" ], [ "To speed up the computing, we adopt the negative sampling strategy [2,3]. For each (``target``, ``context``) node pair, we sample a negative node $v_k$, which is not $v_i$'s context. To obtain the output, instead of multiplying $v_i$'s hidden-layer representation with the hidden-to-out weight matrix $W_{out}$ followed by a softmax activation, we only calculate the dot product between $v_i$'s hidden-layer representation and the $j$th column as well as the $k$th column of the hidden-to-output weight matrix $W_{out}$ followed by a sigmoid activation respectively. According to [3], the original objective to maximize $\\mathrm{P}(v_j|v_i)$ can be approximated by minimizing the cross entropy between $v_j$ and $v_k$'s outputs and their ground-truth labels (1 for $v_j$ and 0 for $v_k$).", "_____no_output_____" ], [ "Following [2,3], we denote the rows of the input-to-hidden weight matrix $W_{in}$ as ``input_embeddings`` and the columns of the hidden-to-out weight matrix $W_{out}$ as ``output_embeddings``. To build the Node2Vec model, we need look up ``input_embeddings`` for target nodes and ``output_embeddings`` for context nodes and calculate their inner product together with a sigmoid activation.", "_____no_output_____" ] ], [ [ "# install StellarGraph if running on Google Colab\nimport sys\nif 'google.colab' in sys.modules:\n %pip install -q stellargraph[demos]==1.1.0b", "_____no_output_____" ], [ "# verify that we're using the correct version of StellarGraph for this notebook\nimport stellargraph as sg\n\ntry:\n sg.utils.validate_notebook_version(\"1.1.0b\")\nexcept AttributeError:\n raise ValueError(\n f\"This notebook requires StellarGraph version 1.1.0b, but a different version {sg.__version__} is installed. Please see <https://github.com/stellargraph/stellargraph/issues/1172>.\"\n ) from None", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nfrom sklearn.manifold import TSNE\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.metrics import accuracy_score\n\nimport os\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom tensorflow import keras\n\nfrom stellargraph import StellarGraph\nfrom stellargraph.data import BiasedRandomWalk\nfrom stellargraph.data import UnsupervisedSampler\nfrom stellargraph.data import BiasedRandomWalk\nfrom stellargraph.mapper import Node2VecLinkGenerator, Node2VecNodeGenerator\nfrom stellargraph.layer import Node2Vec, link_classification\n\nfrom stellargraph import datasets\nfrom IPython.display import display, HTML\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Dataset\n\n\nFor clarity, we use only the largest connected component, ignoring isolated nodes and subgraphs; having these in the data does not prevent the algorithm from running and producing valid results.", "_____no_output_____" ] ], [ [ "dataset = datasets.Cora()\ndisplay(HTML(dataset.description))\nG, subjects = dataset.load(largest_connected_component_only=True)", "_____no_output_____" ], [ "print(G.info())", "StellarGraph: Undirected multigraph\n Nodes: 2485, Edges: 5209\n\n Node types:\n paper: [2485]\n Features: float32 vector, length 1433\n Edge types: paper-cites->paper\n\n Edge types:\n paper-cites->paper: [5209]\n Weights: all 1 (default)\n" ] ], [ [ "### The Node2Vec algorithm\n\nThe Node2Vec algorithm introduced in [[1]](#refs) is a 2-step representation learning algorithm. The two steps are:\n\n1. Use random walks to generate sentences from a graph. A sentence is a list of node ids. The set of all sentences makes a corpus.\n\n2. The corpus is then used to learn an embedding vector for each node in the graph. Each node id is considered a unique word/token in a dictionary that has size equal to the number of nodes in the graph. The Word2Vec algorithm [[2]](#refs) is used for calculating the embedding vectors.\n\nIn this implementation, we train the Node2Vec algorithm in the following two steps:\n\n1. Generate a set of (`target`, `context`) node pairs through starting the biased random walk with a fixed length at per node. The starting nodes are taken as the target nodes and the following nodes in biased random walks are taken as context nodes. For each (`target`, `context`) node pair, we generate 1 negative node pair.\n\n2. Train the Node2Vec algorithm through minimizing cross-entropy loss for `target-context` pair prediction, with the predictive value obtained by performing the dot product of the 'input embedding' of the target node and the 'output embedding' of the context node, followed by a sigmoid activation.", "_____no_output_____" ], [ "Specify the optional parameter values: the number of walks to take per node, the length of each walk. Here, to guarantee the running efficiency, we respectively set `walk_number` and `walk_length` to 100 and 5. Larger values can be set to them to achieve better performance.", "_____no_output_____" ] ], [ [ "walk_number = 100\nwalk_length = 5", "_____no_output_____" ] ], [ [ "Create the biased random walker to perform context node sampling, with the specified parameters.", "_____no_output_____" ] ], [ [ "walker = BiasedRandomWalk(\n G,\n n=walk_number,\n length=walk_length,\n p=0.5, # defines probability, 1/p, of returning to source node\n q=2.0, # defines probability, 1/q, for moving to a node away from the source node\n)", "_____no_output_____" ] ], [ [ "Create the UnsupervisedSampler instance with the biased random walker.", "_____no_output_____" ] ], [ [ "unsupervised_samples = UnsupervisedSampler(G, nodes=list(G.nodes()), walker=walker)", "_____no_output_____" ] ], [ [ "Set the batch size and the number of epochs.", "_____no_output_____" ] ], [ [ "batch_size = 50\nepochs = 2", "_____no_output_____" ] ], [ [ "Define an attri2vec training generator, which generates a batch of (index of target node, index of context node, label of node pair) pairs per iteration.", "_____no_output_____" ] ], [ [ "generator = Node2VecLinkGenerator(G, batch_size)", "_____no_output_____" ] ], [ [ "Build the Node2Vec model, with the dimension of learned node representations set to 128.", "_____no_output_____" ] ], [ [ "emb_size = 128\nnode2vec = Node2Vec(emb_size, generator=generator)", "_____no_output_____" ], [ "x_inp, x_out = node2vec.in_out_tensors()", "_____no_output_____" ] ], [ [ "Use the link_classification function to generate the prediction, with the 'dot' edge embedding generation method and the 'sigmoid' activation, which actually performs the dot product of the 'input embedding' of the target node and the 'output embedding' of the context node followed by a sigmoid activation.", "_____no_output_____" ] ], [ [ "prediction = link_classification(\n output_dim=1, output_act=\"sigmoid\", edge_embedding_method=\"dot\"\n)(x_out)", "link_classification: using 'dot' method to combine node embeddings into edge embeddings\n" ] ], [ [ "Stack the Node2Vec encoder and prediction layer into a Keras model. Our generator will produce batches of positive and negative context pairs as inputs to the model. Minimizing the binary crossentropy between the outputs and the provided ground truth is much like a regular binary classification task.", "_____no_output_____" ] ], [ [ "model = keras.Model(inputs=x_inp, outputs=prediction)\n\nmodel.compile(\n optimizer=keras.optimizers.Adam(lr=1e-3),\n loss=keras.losses.binary_crossentropy,\n metrics=[keras.metrics.binary_accuracy],\n)", "_____no_output_____" ] ], [ [ "Train the model.", "_____no_output_____" ] ], [ [ "history = model.fit(\n generator.flow(unsupervised_samples),\n epochs=epochs,\n verbose=1,\n use_multiprocessing=False,\n workers=4,\n shuffle=True,\n)", "Train for 39760 steps\nEpoch 1/2\n39760/39760 [==============================] - 159s 4ms/step - loss: 0.2956 - binary_accuracy: 0.8537\nEpoch 2/2\n39760/39760 [==============================] - 193s 5ms/step - loss: 0.1089 - binary_accuracy: 0.9644\n" ] ], [ [ "## Visualise Node Embeddings", "_____no_output_____" ], [ "Build the node based model for predicting node representations from node ids and the learned parameters. Below a Keras model is constructed, with x_inp[0] as input and x_out[0] as output. Note that this model's weights are the same as those of the corresponding node encoder in the previously trained node pair classifier.", "_____no_output_____" ] ], [ [ "x_inp_src = x_inp[0]\nx_out_src = x_out[0]\nembedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src)", "_____no_output_____" ] ], [ [ "Get the node embeddings from node ids.", "_____no_output_____" ] ], [ [ "node_gen = Node2VecNodeGenerator(G, batch_size).flow(G.nodes())\nnode_embeddings = embedding_model.predict(node_gen, workers=4, verbose=1)", "50/50 [==============================] - 0s 1ms/step\n" ] ], [ [ "Transform the embeddings to 2d space for visualisation.", "_____no_output_____" ] ], [ [ "transform = TSNE # PCA\n\ntrans = transform(n_components=2)\nnode_embeddings_2d = trans.fit_transform(node_embeddings)", "_____no_output_____" ], [ "# draw the embedding points, coloring them by the target label (paper subject)\nalpha = 0.7\nlabel_map = {l: i for i, l in enumerate(np.unique(subjects))}\nnode_colours = [label_map[target] for target in subjects]\n\nplt.figure(figsize=(7, 7))\nplt.axes().set(aspect=\"equal\")\nplt.scatter(\n node_embeddings_2d[:, 0],\n node_embeddings_2d[:, 1],\n c=node_colours,\n cmap=\"jet\",\n alpha=alpha,\n)\nplt.title(\"{} visualization of node embeddings\".format(transform.__name__))\nplt.show()", "_____no_output_____" ] ], [ [ "### Node Classification\nIn this task, we will use the `Node2Vec` node embeddings to train a classifier to predict the subject of a paper in Cora.", "_____no_output_____" ] ], [ [ "# X will hold the 128-dimensional input features\nX = node_embeddings\n# y holds the corresponding target values\ny = np.array(subjects)", "_____no_output_____" ] ], [ [ "### Data Splitting\n\nWe split the data into train and test sets. \n\nWe use 10% of the data for training and the remaining 90% for testing as a hold-out test set.", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.1, test_size=None)\nprint(\n \"Array shapes:\\n X_train = {}\\n y_train = {}\\n X_test = {}\\n y_test = {}\".format(\n X_train.shape, y_train.shape, X_test.shape, y_test.shape\n )\n)", "Array shapes:\n X_train = (248, 128)\n y_train = (248,)\n X_test = (2237, 128)\n y_test = (2237,)\n" ] ], [ [ "### Classifier Training\n\nWe train a Logistic Regression classifier on the training data. ", "_____no_output_____" ] ], [ [ "clf = LogisticRegressionCV(\n Cs=10, cv=10, scoring=\"accuracy\", verbose=False, multi_class=\"ovr\", max_iter=300\n)\nclf.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "Predict the hold-out test set.", "_____no_output_____" ] ], [ [ "y_pred = clf.predict(X_test)", "_____no_output_____" ] ], [ [ "Calculate the accuracy of the classifier on the test set.", "_____no_output_____" ] ], [ [ "accuracy_score(y_test, y_pred)", "_____no_output_____" ] ], [ [ "<table><tr><td>Run the latest release of this notebook:</td><td><a href=\"https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/node-classification/keras-node2vec-node-classification.ipynb\" alt=\"Open In Binder\" target=\"_parent\"><img src=\"https://mybinder.org/badge_logo.svg\"/></a></td><td><a href=\"https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/node-classification/keras-node2vec-node-classification.ipynb\" alt=\"Open In Colab\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\"/></a></td></tr></table>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a701fadd1fdba3469fa2e98027b87ecbc5e5d6b
865,698
ipynb
Jupyter Notebook
kidney.ipynb
zohrxai/ZohrxAI-Notebooks
08f0656b67e842ced9ae01db3994995e4b91c607
[ "MIT" ]
null
null
null
kidney.ipynb
zohrxai/ZohrxAI-Notebooks
08f0656b67e842ced9ae01db3994995e4b91c607
[ "MIT" ]
null
null
null
kidney.ipynb
zohrxai/ZohrxAI-Notebooks
08f0656b67e842ced9ae01db3994995e4b91c607
[ "MIT" ]
null
null
null
865,698
865,698
0.783357
[ [ [ "cd /content/drive/My Drive/Dava with ML", "/content/drive/My Drive/Dava with ML\n" ], [ "!unzip chronic-kidney-disease.zip", "Archive: chronic-kidney-disease.zip\nreplace new_model.csv? [y]es, [n]o, [A]ll, [N]one, [r]ename: n\n" ], [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nimport sklearn.metrics as m", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "ls", " breast-cancer-wisconsin-data.zip hepatities.ipynb\n Cancer.ipynb hepatitisdata.csv\n cardiovaskular_dataset.zip indian_liver_patient.csv\n chronic-kidney-disease.zip indian-liver-patient-records.zip\n'covid pnemonia.ipynb' kidney.ipynb\n data.csv liver.ipynb\n Dataset_spine.csv model_cancer5.h5\n dataset.zip model_cancer8.h5\n degree_cancer_5.tflite model_covid.h5\n degree_cancer_8.tflite model_diabeties.h5\n degree_covid_mobilenet.tflite model_heart.h5\n degree_covid.tflite model_kidney.h5\n degree_covid_vgg.tflite model_liver.h5\n degree_diabeties.tflite model_spine.h5\n degree_heart.tflite new_model.csv\n degree_kidney.tflite parkinson_dataset.zip\n degree_liver.tflite Parkinson.ipynb\n degreeliver.tflite pd_speech_features.csv\n degree_spine.tflite pima-indians-diabetes-database.zip\n diabetes.csv \u001b[0m\u001b[01;34msaved_model_cancer_5.pbtxt\u001b[0m/\n Diabeties.ipynb \u001b[01;34msaved_model_cancer_8.pbtxt\u001b[0m/\n heart.csv \u001b[01;34msaved_model_kidney.pbtxt\u001b[0m/\n heart-disease-uci.zip \u001b[01;34msaved_model_liver.pbtxt\u001b[0m/\n heart.ipynb Spine.ipynb\n hepatiits_dataset.zip \u001b[01;34mX-RayImageDataSet\u001b[0m/\n" ], [ "dataset=pd.read_csv('new_model.csv')\ndataset", "_____no_output_____" ], [ "dataset.columns", "_____no_output_____" ], [ "dataset.corr()", "_____no_output_____" ], [ "dataset.isnull().values.any()", "_____no_output_____" ], [ "sns.set(style=\"ticks\", color_codes=True)\n", "_____no_output_____" ], [ "\nplt.figure(figsize=(30,15))\nsns.heatmap(dataset.corr(),annot=True)\nplt.show()", "_____no_output_____" ], [ "dataset.describe()", "_____no_output_____" ] ], [ [ "##Splitting of Data", "_____no_output_____" ] ], [ [ "features=dataset.iloc[:,:-1]\nlabels=dataset.iloc[:,[-1]]", "_____no_output_____" ], [ "features", "_____no_output_____" ], [ "labels", "_____no_output_____" ], [ "feature_train,feature_test,label_train,label_test=train_test_split(features,labels,test_size=0.2,random_state=42)", "_____no_output_____" ] ], [ [ "##Logistic Regression", "_____no_output_____" ] ], [ [ "model=LogisticRegression(max_iter=1000)\nmodel.fit(feature_train,label_train)", "/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py:760: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n" ], [ "label_pred=model.predict(feature_test)", "_____no_output_____" ], [ "m.accuracy_score(label_test,label_pred)", "_____no_output_____" ], [ "label_pred", "_____no_output_____" ], [ "label_test", "_____no_output_____" ], [ "print(m.classification_report(label_test,label_pred))", " precision recall f1-score support\n\n 0 0.96 0.96 0.96 28\n 1 0.98 0.98 0.98 52\n\n accuracy 0.97 80\n macro avg 0.97 0.97 0.97 80\nweighted avg 0.97 0.97 0.97 80\n\n" ], [ "print(m.confusion_matrix(label_test,label_pred))", "[[27 1]\n [ 1 51]]\n" ] ], [ [ "##KNN", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\nknn=KNeighborsClassifier()\nparam={'n_neighbors':list(np.arange(1,20))}\n", "_____no_output_____" ], [ "model=GridSearchCV(knn,param_grid=param)\nmodel.fit(feature_train,label_train)", "/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_search.py:739: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n self.best_estimator_.fit(X, y, **fit_params)\n" ], [ "model.best_params_", "_____no_output_____" ], [ "model=KNeighborsClassifier(n_neighbors=10)\nmodel.fit(feature_train,label_train)", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:2: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n \n" ], [ "label_pred=model.predict(feature_test)", "_____no_output_____" ], [ "m.accuracy_score(label_test,label_pred)", "_____no_output_____" ], [ "print(m.classification_report(label_test,label_pred))", " precision recall f1-score support\n\n 0 0.57 0.86 0.69 28\n 1 0.89 0.65 0.76 52\n\n accuracy 0.73 80\n macro avg 0.73 0.76 0.72 80\nweighted avg 0.78 0.72 0.73 80\n\n" ], [ "print(m.confusion_matrix(label_test,label_pred))", "[[24 4]\n [18 34]]\n" ], [ "label_pred", "_____no_output_____" ], [ "label_test", "_____no_output_____" ] ], [ [ "##Decision Tree", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\ndt=DecisionTreeClassifier()\nparam={'max_depth':list(np.arange(1,20))}\n", "_____no_output_____" ], [ "model=GridSearchCV(dt,param_grid=param)\nmodel.fit(feature_train,label_train)", "_____no_output_____" ], [ "model.best_params_", "_____no_output_____" ], [ "model=DecisionTreeClassifier(max_depth=10)\nmodel.fit(feature_train,label_train)", "_____no_output_____" ], [ "label_pred=model.predict(feature_test)", "_____no_output_____" ], [ "m.accuracy_score(label_test,label_pred)", "_____no_output_____" ], [ "print(m.classification_report(label_test,label_pred))", " precision recall f1-score support\n\n 0 0.97 1.00 0.98 28\n 1 1.00 0.98 0.99 52\n\n accuracy 0.99 80\n macro avg 0.98 0.99 0.99 80\nweighted avg 0.99 0.99 0.99 80\n\n" ], [ "print(m.confusion_matrix(label_test,label_pred))", "[[28 0]\n [ 1 51]]\n" ], [ "label_pred", "_____no_output_____" ], [ "label_test", "_____no_output_____" ] ], [ [ "##SVM", "_____no_output_____" ] ], [ [ "model=SVC(kernel='linear')\nmodel.fit(feature_train,label_train)", "/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py:760: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n" ], [ "label_pred=model.predict(feature_test)", "_____no_output_____" ], [ "m.accuracy_score(label_test,label_pred)", "_____no_output_____" ], [ "print(m.classification_report(label_test,label_pred))", " precision recall f1-score support\n\n 0 0.96 0.93 0.95 28\n 1 0.96 0.98 0.97 52\n\n accuracy 0.96 80\n macro avg 0.96 0.95 0.96 80\nweighted avg 0.96 0.96 0.96 80\n\n" ], [ "print(m.confusion_matrix(label_test,label_pred))", "[[26 2]\n [ 1 51]]\n" ], [ "label_pred", "_____no_output_____" ], [ "label_test", "_____no_output_____" ] ], [ [ "##Random Forest Classifier", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\nrfc=RandomForestClassifier()\nparam={'n_estimators':[10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200],'max_depth':list(np.arange(1,20))}\n", "_____no_output_____" ], [ "model=GridSearchCV(rfc,param_grid=param)\nmodel.fit(feature_train,label_train)", "/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py:515: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n estimator.fit(X_train, y_train, **fit_params)\n/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_search.py:739: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n self.best_estimator_.fit(X, y, **fit_params)\n" ], [ "model.best_params_", "_____no_output_____" ], [ "model=model.best_estimator_", "_____no_output_____" ], [ "label_pred=model.predict(feature_test)", "_____no_output_____" ], [ "m.accuracy_score(label_test,label_pred)", "_____no_output_____" ], [ "print(m.classification_report(label_test,label_pred))", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 28\n 1 1.00 1.00 1.00 52\n\n accuracy 1.00 80\n macro avg 1.00 1.00 1.00 80\nweighted avg 1.00 1.00 1.00 80\n\n" ], [ "print(m.confusion_matrix(label_test,label_pred))", "[[28 0]\n [ 0 52]]\n" ], [ "label_pred", "_____no_output_____" ], [ "label_test", "_____no_output_____" ] ], [ [ "#Neural Networks", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ], [ "from tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential", "_____no_output_____" ], [ "from keras.utils.np_utils import to_categorical\nlabel_train=to_categorical(label_train)\nlabel_test=to_categorical(label_test)", "_____no_output_____" ], [ "feature_train.shape", "_____no_output_____" ], [ "model=Sequential()\nmodel.add(Dense(300,input_shape=[13],activation='relu'))\nmodel.add(Dense(2,activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])", "_____no_output_____" ], [ "model.summary()", "Model: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_4 (Dense) (None, 300) 4200 \n_________________________________________________________________\ndense_5 (Dense) (None, 2) 602 \n=================================================================\nTotal params: 4,802\nTrainable params: 4,802\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.fit(feature_train,label_train,epochs=100)", "Epoch 1/100\n10/10 [==============================] - 0s 2ms/step - loss: 2.1380 - accuracy: 0.8531\nEpoch 2/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5055 - accuracy: 0.9375\nEpoch 3/100\n10/10 [==============================] - 0s 2ms/step - loss: 2.9370 - accuracy: 0.8188\nEpoch 4/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.4059 - accuracy: 0.9125\nEpoch 5/100\n10/10 [==============================] - 0s 2ms/step - loss: 2.2538 - accuracy: 0.8531\nEpoch 6/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.0606 - accuracy: 0.9094\nEpoch 7/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4014 - accuracy: 0.9531\nEpoch 8/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3570 - accuracy: 0.9469\nEpoch 9/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.0029 - accuracy: 0.9156\nEpoch 10/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3331 - accuracy: 0.9406\nEpoch 11/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3499 - accuracy: 0.9438\nEpoch 12/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3202 - accuracy: 0.9438\nEpoch 13/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3673 - accuracy: 0.9531\nEpoch 14/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3882 - accuracy: 0.9594\nEpoch 15/100\n10/10 [==============================] - 0s 3ms/step - loss: 0.4230 - accuracy: 0.9406\nEpoch 16/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3447 - accuracy: 0.9563\nEpoch 17/100\n10/10 [==============================] - 0s 3ms/step - loss: 0.2952 - accuracy: 0.9625\nEpoch 18/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5773 - accuracy: 0.9469\nEpoch 19/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4252 - accuracy: 0.9375\nEpoch 20/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.6422 - accuracy: 0.9406\nEpoch 21/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4525 - accuracy: 0.9219\nEpoch 22/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5839 - accuracy: 0.9281\nEpoch 23/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8484 - accuracy: 0.9062\nEpoch 24/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4530 - accuracy: 0.9563\nEpoch 25/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4565 - accuracy: 0.9438\nEpoch 26/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3408 - accuracy: 0.9531\nEpoch 27/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3548 - accuracy: 0.9500\nEpoch 28/100\n10/10 [==============================] - 0s 3ms/step - loss: 0.2816 - accuracy: 0.9469\nEpoch 29/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3396 - accuracy: 0.9469\nEpoch 30/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.7570 - accuracy: 0.9219\nEpoch 31/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4695 - accuracy: 0.9469\nEpoch 32/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.3186 - accuracy: 0.8969\nEpoch 33/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.4451 - accuracy: 0.8875\nEpoch 34/100\n10/10 [==============================] - 0s 2ms/step - loss: 2.4937 - accuracy: 0.8438\nEpoch 35/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.9983 - accuracy: 0.8906\nEpoch 36/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.9691 - accuracy: 0.9531\nEpoch 37/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3966 - accuracy: 0.9563\nEpoch 38/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.6540 - accuracy: 0.9375\nEpoch 39/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4662 - accuracy: 0.9531\nEpoch 40/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4760 - accuracy: 0.9563\nEpoch 41/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8923 - accuracy: 0.9125\nEpoch 42/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8374 - accuracy: 0.9344\nEpoch 43/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.7427 - accuracy: 0.9250\nEpoch 44/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8614 - accuracy: 0.9125\nEpoch 45/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5731 - accuracy: 0.9375\nEpoch 46/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4315 - accuracy: 0.9500\nEpoch 47/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.2330 - accuracy: 0.8938\nEpoch 48/100\n10/10 [==============================] - 0s 2ms/step - loss: 3.6934 - accuracy: 0.7750\nEpoch 49/100\n10/10 [==============================] - 0s 2ms/step - loss: 3.5605 - accuracy: 0.8250\nEpoch 50/100\n10/10 [==============================] - 0s 2ms/step - loss: 2.7330 - accuracy: 0.8250\nEpoch 51/100\n10/10 [==============================] - 0s 3ms/step - loss: 0.8651 - accuracy: 0.9375\nEpoch 52/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5316 - accuracy: 0.9438\nEpoch 53/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8168 - accuracy: 0.9344\nEpoch 54/100\n10/10 [==============================] - 0s 3ms/step - loss: 1.2914 - accuracy: 0.9125\nEpoch 55/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.3626 - accuracy: 0.9062\nEpoch 56/100\n10/10 [==============================] - 0s 3ms/step - loss: 1.2184 - accuracy: 0.9156\nEpoch 57/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4728 - accuracy: 0.9438\nEpoch 58/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8136 - accuracy: 0.9406\nEpoch 59/100\n10/10 [==============================] - 0s 3ms/step - loss: 0.8361 - accuracy: 0.9344\nEpoch 60/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.9410 - accuracy: 0.9187\nEpoch 61/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4002 - accuracy: 0.9312\nEpoch 62/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.2715 - accuracy: 0.9531\nEpoch 63/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3443 - accuracy: 0.9438\nEpoch 64/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4194 - accuracy: 0.9469\nEpoch 65/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5499 - accuracy: 0.9312\nEpoch 66/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5629 - accuracy: 0.9531\nEpoch 67/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4070 - accuracy: 0.9594\nEpoch 68/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4816 - accuracy: 0.9469\nEpoch 69/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3298 - accuracy: 0.9531\nEpoch 70/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8922 - accuracy: 0.9219\nEpoch 71/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.3479 - accuracy: 0.9438\nEpoch 72/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.6510 - accuracy: 0.9406\nEpoch 73/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.6409 - accuracy: 0.9375\nEpoch 74/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.3652 - accuracy: 0.8781\nEpoch 75/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8807 - accuracy: 0.9000\nEpoch 76/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.0334 - accuracy: 0.9219\nEpoch 77/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.6600 - accuracy: 0.8687\nEpoch 78/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.4097 - accuracy: 0.8938\nEpoch 79/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.5424 - accuracy: 0.8719\nEpoch 80/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.0861 - accuracy: 0.9125\nEpoch 81/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.7320 - accuracy: 0.9312\nEpoch 82/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5611 - accuracy: 0.9531\nEpoch 83/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.5443 - accuracy: 0.9438\nEpoch 84/100\n10/10 [==============================] - 0s 3ms/step - loss: 0.4333 - accuracy: 0.9469\nEpoch 85/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.4141 - accuracy: 0.9594\nEpoch 86/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.5094 - accuracy: 0.9187\nEpoch 87/100\n10/10 [==============================] - 0s 3ms/step - loss: 1.3966 - accuracy: 0.9125\nEpoch 88/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8856 - accuracy: 0.9500\nEpoch 89/100\n10/10 [==============================] - 0s 3ms/step - loss: 1.9479 - accuracy: 0.8813\nEpoch 90/100\n10/10 [==============================] - 0s 3ms/step - loss: 3.6085 - accuracy: 0.8094\nEpoch 91/100\n10/10 [==============================] - 0s 3ms/step - loss: 7.0276 - accuracy: 0.7156\nEpoch 92/100\n10/10 [==============================] - 0s 2ms/step - loss: 7.1423 - accuracy: 0.7406\nEpoch 93/100\n10/10 [==============================] - 0s 2ms/step - loss: 3.3407 - accuracy: 0.8281\nEpoch 94/100\n10/10 [==============================] - 0s 3ms/step - loss: 1.9132 - accuracy: 0.9062\nEpoch 95/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.8957 - accuracy: 0.9469\nEpoch 96/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.6566 - accuracy: 0.9563\nEpoch 97/100\n10/10 [==============================] - 0s 3ms/step - loss: 0.6427 - accuracy: 0.9438\nEpoch 98/100\n10/10 [==============================] - 0s 2ms/step - loss: 0.6609 - accuracy: 0.9281\nEpoch 99/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.7665 - accuracy: 0.8938\nEpoch 100/100\n10/10 [==============================] - 0s 2ms/step - loss: 1.1853 - accuracy: 0.9406\n" ], [ "label_pred=model.predict(feature_test)", "_____no_output_____" ], [ "label_pred", "_____no_output_____" ], [ "label_pred=np.argmax(label_pred,axis=1)", "_____no_output_____" ], [ "label_pred", "_____no_output_____" ], [ "label_test=np.argmax(label_test,axis=1)\nlabel_train=np.argmax(label_train,axis=1)", "_____no_output_____" ], [ "m.accuracy_score(label_test,label_pred)", "_____no_output_____" ], [ "print(m.classification_report(label_test,label_pred))", " precision recall f1-score support\n\n 0 0.96 0.82 0.88 28\n 1 0.91 0.98 0.94 52\n\n accuracy 0.93 80\n macro avg 0.93 0.90 0.91 80\nweighted avg 0.93 0.93 0.92 80\n\n" ], [ "print(m.confusion_matrix(label_test,label_pred))", "[[23 5]\n [ 1 51]]\n" ], [ "label_pred", "_____no_output_____" ], [ "label_test", "_____no_output_____" ], [ "import tensorflow\n\n", "_____no_output_____" ], [ "model.save('model_kidney.h5')", "_____no_output_____" ], [ "# We need to create a TFLite Converter Object from model we created\n\nconverter = tensorflow.lite.TFLiteConverter.from_keras_model(model=model)", "_____no_output_____" ], [ "# Create a tflite model object from TFLite Converter\n\ntfmodel = converter.convert()\n\n# Save TFLite model into a .tflite file \n\nopen(\"degree_kidney.tflite\",\"wb\").write(tfmodel)", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\nInstructions for updating:\nThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\nInstructions for updating:\nThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\nINFO:tensorflow:Assets written to: /tmp/tmp5g7gx362/assets\n" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a70235f1982f39c272961985f34013f049bfa2a
335,339
ipynb
Jupyter Notebook
notebooks/NbClust_R.ipynb
lcajachahua/jupyterlab-demos
68ab1d211d0a8eea916286f7c7dfbc711ca308a0
[ "MIT" ]
1
2021-09-16T02:13:22.000Z
2021-09-16T02:13:22.000Z
notebooks/NbClust_R.ipynb
imera88/jupyterlab-demos
8817ad77caeed2acd8336b8f025e9368e7479611
[ "MIT" ]
null
null
null
notebooks/NbClust_R.ipynb
imera88/jupyterlab-demos
8817ad77caeed2acd8336b8f025e9368e7479611
[ "MIT" ]
5
2020-06-04T01:43:42.000Z
2021-08-25T16:42:13.000Z
360.192266
67,826
0.895971
[ [ [ "library(NbClust)\nlibrary(ggplot2)", "_____no_output_____" ], [ "data<-read.csv(\"Mall_Customers.csv\")", "_____no_output_____" ], [ "head(data)", "_____no_output_____" ], [ "names(data)[4:5]<-c('AnnualIncome','SpendingScore')", "_____no_output_____" ], [ "NbClust(data[,c(4,5)], diss=NULL, distance=\"maximum\", min.nc=4, max.nc=12, method=\"median\", index=\"all\")", "*** : The Hubert index is a graphical method of determining the number of clusters.\n In the plot of Hubert index, we seek a significant knee that corresponds to a \n significant increase of the value of the measure i.e the significant peak in Hubert\n index second differences plot. \n \n" ], [ "nbclu<-NbClust(data[,c(4,5)], diss=NULL, distance=\"euclidean\", min.nc=4, max.nc=12, method=\"kmeans\", index=\"all\")", "*** : The Hubert index is a graphical method of determining the number of clusters.\n In the plot of Hubert index, we seek a significant knee that corresponds to a \n significant increase of the value of the measure i.e the significant peak in Hubert\n index second differences plot. \n \n" ], [ "data<-cbind(data,'cluster'=nbclu$Best.partition)", "_____no_output_____" ], [ "data$cluster<-as.factor(data$cluster)", "_____no_output_____" ], [ "ggplot(data, aes(x=AnnualIncome, y=SpendingScore, shape=cluster, color=cluster)) + geom_point()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a703556b577352825cc94c4647943dddb819744
2,358
ipynb
Jupyter Notebook
notebooks/Papermill.ipynb
cbrueffer/covid-19_sinai_reviews
c5e924b8d406e488f85b330afe5f31a2b9c5d687
[ "CC-BY-4.0" ]
8
2020-04-04T13:50:29.000Z
2020-04-29T13:54:45.000Z
notebooks/Papermill.ipynb
cbrueffer/covid-19_sinai_reviews
c5e924b8d406e488f85b330afe5f31a2b9c5d687
[ "CC-BY-4.0" ]
9
2020-04-04T13:50:19.000Z
2020-04-08T11:34:45.000Z
notebooks/Papermill.ipynb
cbrueffer/covid-19_sinai_reviews
c5e924b8d406e488f85b330afe5f31a2b9c5d687
[ "CC-BY-4.0" ]
7
2020-04-04T13:33:16.000Z
2020-07-31T16:54:05.000Z
20.867257
120
0.525445
[ [ [ "# Papermill\nExecute notebooks to update Altmetrics and generate heatmap JSON.", "_____no_output_____" ] ], [ [ "import papermill as pm", "_____no_output_____" ], [ "!mkdir ../papermill_notebooks/\n# for inst_template in ['1.1.0_Collect_Altmetrics.ipynb', '1.2.0_Heatmap_Papers.ipynb']:\nfor inst_template in ['1.2.0_Heatmap_Papers.ipynb']:\n tmp = pm.execute_notebook(\n inst_template, \n '../papermill_notebooks/' + inst_template\n )", "mkdir: ../papermill_notebooks/: File exists\r\n" ], [ "!mkdir ../reports/\n!jupyter nbconvert --to html --output-dir='../reports/' ../papermill_notebooks/*", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
4a7039e0b409da0e2714e219cbe2861f7faa6dbd
92,829
ipynb
Jupyter Notebook
Machine_Learning_Code_Implementation-master/charpter2_linear_regression/linear_regression.ipynb
keesh0410/SkillTree
33478b328e501c5937bb16427266af62089c70d4
[ "MIT" ]
null
null
null
Machine_Learning_Code_Implementation-master/charpter2_linear_regression/linear_regression.ipynb
keesh0410/SkillTree
33478b328e501c5937bb16427266af62089c70d4
[ "MIT" ]
null
null
null
Machine_Learning_Code_Implementation-master/charpter2_linear_regression/linear_regression.ipynb
keesh0410/SkillTree
33478b328e501c5937bb16427266af62089c70d4
[ "MIT" ]
null
null
null
116.181477
31,298
0.8427
[ [ [ "## 线性回归", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "### 初始化模型参数\ndef initialize_params(dims):\n '''\n 输入:\n dims:训练数据变量维度\n 输出:\n w:初始化权重参数值\n b:初始化偏差参数值\n '''\n # 初始化权重参数为零矩阵\n w = np.zeros((dims, 1))\n # 初始化偏差参数为零\n b = 0\n return w, b", "_____no_output_____" ], [ "### 定义模型主体部分\n### 包括线性回归公式、均方损失和参数偏导三部分\ndef linear_loss(X, y, w, b):\n '''\n 输入:\n X:输入变量矩阵\n y:输出标签向量\n w:变量参数权重矩阵\n b:偏差项\n 输出:\n y_hat:线性模型预测输出\n loss:均方损失值\n dw:权重参数一阶偏导\n db:偏差项一阶偏导\n '''\n # 训练样本数量\n num_train = X.shape[0]\n # 训练特征数量\n num_feature = X.shape[1]\n # 线性回归预测输出\n y_hat = np.dot(X, w) + b\n # 计算预测输出与实际标签之间的均方损失\n loss = np.sum((y_hat-y)**2)/num_train\n # 基于均方损失对权重参数的一阶偏导数\n dw = np.dot(X.T, (y_hat-y)) /num_train\n # 基于均方损失对偏差项的一阶偏导数\n db = np.sum((y_hat-y)) /num_train\n return y_hat, loss, dw, db", "_____no_output_____" ], [ "### 定义线性回归模型训练过程\ndef linear_train(X, y, learning_rate=0.01, epochs=10000):\n '''\n 输入:\n X:输入变量矩阵\n y:输出标签向量\n learning_rate:学习率\n epochs:训练迭代次数\n 输出:\n loss_his:每次迭代的均方损失\n params:优化后的参数字典\n grads:优化后的参数梯度字典\n '''\n # 记录训练损失的空列表\n loss_his = []\n # 初始化模型参数\n w, b = initialize_params(X.shape[1])\n # 迭代训练\n for i in range(1, epochs):\n # 计算当前迭代的预测值、损失和梯度\n y_hat, loss, dw, db = linear_loss(X, y, w, b)\n # 基于梯度下降的参数更新\n w += -learning_rate * dw\n b += -learning_rate * db\n # 记录当前迭代的损失\n loss_his.append(loss)\n # 每1000次迭代打印当前损失信息\n if i % 10000 == 0:\n print('epoch %d loss %f' % (i, loss))\n # 将当前迭代步优化后的参数保存到字典\n params = {\n 'w': w,\n 'b': b\n }\n # 将当前迭代步的梯度保存到字典\n grads = {\n 'dw': dw,\n 'db': db\n } \n return loss_his, params, grads", "_____no_output_____" ], [ "X = np.ones(shape=(353,10))\nX.shape", "_____no_output_____" ], [ "w, b = initialize_params(X.shape[1])\nw.shape", "_____no_output_____" ], [ "y=np.ones(shape=(353,))\ny.shape", "_____no_output_____" ], [ "from sklearn.datasets import load_diabetes\ndiabetes = load_diabetes()\ndata = diabetes.data\ntarget = diabetes.target \nprint(data.shape)\nprint(target.shape)\nprint(data[:5])\nprint(target[:5])", "(442, 10)\n(442,)\n[[ 0.03807591 0.05068012 0.06169621 0.02187235 -0.0442235 -0.03482076\n -0.04340085 -0.00259226 0.01990842 -0.01764613]\n [-0.00188202 -0.04464164 -0.05147406 -0.02632783 -0.00844872 -0.01916334\n 0.07441156 -0.03949338 -0.06832974 -0.09220405]\n [ 0.08529891 0.05068012 0.04445121 -0.00567061 -0.04559945 -0.03419447\n -0.03235593 -0.00259226 0.00286377 -0.02593034]\n [-0.08906294 -0.04464164 -0.01159501 -0.03665645 0.01219057 0.02499059\n -0.03603757 0.03430886 0.02269202 -0.00936191]\n [ 0.00538306 -0.04464164 -0.03638469 0.02187235 0.00393485 0.01559614\n 0.00814208 -0.00259226 -0.03199144 -0.04664087]]\n[151. 75. 141. 206. 135.]\n" ], [ "# 导入sklearn diabetes数据接口\nfrom sklearn.datasets import load_diabetes\n# 导入sklearn打乱数据函数\nfrom sklearn.utils import shuffle\n# 获取diabetes数据集\ndiabetes = load_diabetes()\n# 获取输入和标签\ndata, target = diabetes.data, diabetes.target \n# 打乱数据集\nX, y = shuffle(data, target, random_state=13)\n# 按照8/2划分训练集和测试集\noffset = int(X.shape[0] * 0.8)\n# 训练集\nX_train, y_train = X[:offset], y[:offset]\n# 测试集\nX_test, y_test = X[offset:], y[offset:]\n# 将训练集改为列向量的形式\ny_train = y_train.reshape((-1,1))\n# 将验证集改为列向量的形式\ny_test = y_test.reshape((-1,1))\n# 打印训练集和测试集维度\nprint(\"X_train's shape: \", X_train.shape)\nprint(\"X_test's shape: \", X_test.shape)\nprint(\"y_train's shape: \", y_train.shape)\nprint(\"y_test's shape: \", y_test.shape)", "X_train's shape: (353, 10)\nX_test's shape: (89, 10)\ny_train's shape: (353, 1)\ny_test's shape: (89, 1)\n" ], [ "# 线性回归模型训练\nloss_his, params, grads = linear_train(X_train, y_train, 0.01, 200000)\n# 打印训练后得到模型参数\nprint(params)", "epoch 10000 loss 3679.868273\nepoch 20000 loss 3219.164522\nepoch 30000 loss 3040.820279\nepoch 40000 loss 2944.936608\nepoch 50000 loss 2885.991571\nepoch 60000 loss 2848.051813\nepoch 70000 loss 2823.157085\nepoch 80000 loss 2806.627821\nepoch 90000 loss 2795.546917\nepoch 100000 loss 2788.051561\nepoch 110000 loss 2782.935842\nepoch 120000 loss 2779.411265\nepoch 130000 loss 2776.957989\nepoch 140000 loss 2775.230803\nepoch 150000 loss 2773.998942\nepoch 160000 loss 2773.107192\nepoch 170000 loss 2772.450534\nepoch 180000 loss 2771.957489\nepoch 190000 loss 2771.579121\n{'w': array([[ 10.56390075],\n [-236.41625133],\n [ 481.50915635],\n [ 294.47043558],\n [ -60.99362023],\n [-110.54181897],\n [-206.44046579],\n [ 163.23511378],\n [ 409.28971463],\n [ 65.73254667]]), 'b': 150.8144748910088}\n" ], [ "### 定义线性回归预测函数\ndef predict(X, params):\n '''\n 输入:\n X:测试数据集\n params:模型训练参数\n 输出:\n y_pred:模型预测结果\n '''\n # 获取模型参数\n w = params['w']\n b = params['b']\n # 预测\n y_pred = np.dot(X, w) + b\n return y_pred\n# 基于测试集的预测\ny_pred = predict(X_test, params)\n# 打印前五个预测值\ny_pred[:5]", "_____no_output_____" ], [ "print(y_test[:5])", "[[ 37.]\n [122.]\n [ 88.]\n [214.]\n [262.]]\n" ], [ "### 定义R2系数函数\ndef r2_score(y_test, y_pred):\n '''\n 输入:\n y_test:测试集标签值\n y_pred:测试集预测值\n 输出:\n r2:R2系数\n '''\n # 测试标签均值\n y_avg = np.mean(y_test)\n # 总离差平方和\n ss_tot = np.sum((y_test - y_avg)**2)\n # 残差平方和\n ss_res = np.sum((y_test - y_pred)**2)\n # R2计算\n r2 = 1 - (ss_res/ss_tot)\n return r2", "_____no_output_____" ], [ "print(r2_score(y_test, y_pred))", "0.5334188457463577\n" ], [ "import matplotlib.pyplot as plt\nf = X_test.dot(params['w']) + params['b']\n\nplt.scatter(range(X_test.shape[0]), y_test)\nplt.plot(f, color = 'darkorange')\nplt.xlabel('X_test')\nplt.ylabel('y_test')\nplt.show();", "_____no_output_____" ], [ "plt.plot(loss_his, color = 'blue')\nplt.xlabel('epochs')\nplt.ylabel('loss')\nplt.show()", "_____no_output_____" ], [ "from sklearn.utils import shuffle\nX, y = shuffle(data, target, random_state=13)\nX = X.astype(np.float32)\ndata = np.concatenate((X, y.reshape((-1,1))), axis=1)\ndata.shape", "_____no_output_____" ], [ "from random import shuffle\n\ndef k_fold_cross_validation(items, k, randomize=True):\n if randomize:\n items = list(items)\n shuffle(items)\n\n slices = [items[i::k] for i in range(k)]\n\n for i in range(k):\n validation = slices[i]\n training = [item\n for s in slices if s is not validation\n for item in s]\n training = np.array(training)\n validation = np.array(validation)\n yield training, validation\n\n\nfor training, validation in k_fold_cross_validation(data, 5): \n X_train = training[:, :10]\n y_train = training[:, -1].reshape((-1,1))\n X_valid = validation[:, :10]\n y_valid = validation[:, -1].reshape((-1,1))\n loss5 = []\n #print(X_train.shape, y_train.shape, X_valid.shape, y_valid.shape)\n loss, params, grads = linear_train(X_train, y_train, 0.001, 100000)\n loss5.append(loss)\n score = np.mean(loss5)\n print('five kold cross validation score is', score)\n y_pred = predict(X_valid, params)\n valid_score = np.sum(((y_pred-y_valid)**2))/len(X_valid)\n print('valid score is', valid_score)", "epoch 10000 loss 5691.020878\nepoch 20000 loss 5348.335014\nepoch 30000 loss 5057.483172\nepoch 40000 loss 4809.887762\nepoch 50000 loss 4598.431321\nepoch 60000 loss 4417.205780\nepoch 70000 loss 4261.304941\nepoch 80000 loss 4126.652666\nepoch 90000 loss 4009.860637\nfive kold cross validation score is 4846.591085182258\nvalid score is 3778.803258768074\nepoch 10000 loss 5282.139068\nepoch 20000 loss 5012.692214\nepoch 30000 loss 4780.800660\nepoch 40000 loss 4580.708131\nepoch 50000 loss 4407.569874\nepoch 60000 loss 4257.306699\nepoch 70000 loss 4126.482511\nepoch 80000 loss 4012.201515\nepoch 90000 loss 3912.021932\nfive kold cross validation score is 4622.954950545252\nvalid score is 4753.46891405462\nepoch 10000 loss 5640.394829\nepoch 20000 loss 5305.537053\nepoch 30000 loss 5020.762639\nepoch 40000 loss 4777.971744\nepoch 50000 loss 4570.405124\nepoch 60000 loss 4392.420727\nepoch 70000 loss 4239.307624\nepoch 80000 loss 4107.131004\nepoch 90000 loss 3992.603032\nfive kold cross validation score is 4810.225325366101\nvalid score is 3946.527748745793\nepoch 10000 loss 5633.617842\nepoch 20000 loss 5330.573826\nepoch 30000 loss 5071.399255\nepoch 40000 loss 4849.128993\nepoch 50000 loss 4657.935846\nepoch 60000 loss 4492.943015\nepoch 70000 loss 4350.067583\nepoch 80000 loss 4225.889850\nepoch 90000 loss 4117.544239\nfive kold cross validation score is 4886.416952390444\nvalid score is 3626.3111869073064\nepoch 10000 loss 5581.581786\nepoch 20000 loss 5288.687061\nepoch 30000 loss 5037.833139\nepoch 40000 loss 4822.336343\nepoch 50000 loss 4636.611397\nepoch 60000 loss 4475.989026\nepoch 70000 loss 4336.563969\nepoch 80000 loss 4215.068309\nepoch 90000 loss 4108.765883\nfive kold cross validation score is 4862.41798136116\nvalid score is 3753.0781481163667\n" ], [ "from sklearn.datasets import load_diabetes\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\ndiabetes = load_diabetes()\ndata = diabetes.data\ntarget = diabetes.target \nX, y = shuffle(data, target, random_state=13)\nX = X.astype(np.float32)\ny = y.reshape((-1, 1))\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nprint(X_train.shape, y_train.shape, X_test.shape, y_test.shape)", "(353, 10) (353, 1) (89, 10) (89, 1)\n" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n\nregr = linear_model.LinearRegression()\nregr.fit(X_train, y_train)\n\ny_pred = regr.predict(X_test)\n\n# The coefficients\nprint('Coefficients: \\n', regr.coef_)\n# The mean squared error\nprint(\"Mean squared error: %.2f\"\n % mean_squared_error(y_test, y_pred))\n# Explained variance score: 1 is perfect prediction\nprint('Variance score: %.2f' % r2_score(y_test, y_pred))\nprint(r2_score(y_test, y_pred))\n\n# Plot outputs\nplt.scatter(range(X_test.shape[0]), y_test, color='red')\nplt.plot(range(X_test.shape[0]), y_pred, color='blue', linewidth=3)\n\nplt.xticks(())\nplt.yticks(())\n\nplt.show();", "Coefficients: \n [[ -23.510529 -216.31224 472.36664 372.07184 -863.6953 583.27313\n 105.79194 194.76958 754.0722 38.22219 ]]\nMean squared error: 3028.50\nVariance score: 0.53\n0.5298198665264144\n" ], [ "import numpy as np \nimport pandas as pd \nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import LinearRegression\n\n### 交叉验证\ndef cross_validate(model, x, y, folds=5, repeats=5):\n \n ypred = np.zeros((len(y),repeats))\n score = np.zeros(repeats)\n for r in range(repeats):\n i=0\n print('Cross Validating - Run', str(r + 1), 'out of', str(repeats))\n x,y = shuffle(x, y, random_state=r) #shuffle data before each repeat\n kf = KFold(n_splits=folds,random_state=i+1000) #random split, different each time\n for train_ind, test_ind in kf.split(x):\n print('Fold', i+1, 'out of', folds)\n xtrain,ytrain = x[train_ind,:],y[train_ind]\n xtest,ytest = x[test_ind,:],y[test_ind]\n model.fit(xtrain, ytrain)\n #print(xtrain.shape, ytrain.shape, xtest.shape, ytest.shape)\n ypred[test_ind]=model.predict(xtest)\n i+=1\n score[r] = R2(ypred[:,r],y)\n print('\\nOverall R2:',str(score))\n print('Mean:',str(np.mean(score)))\n print('Deviation:',str(np.std(score)))\n pass\n\ncross_validate(regr, X, y, folds=5, repeats=5)", "Cross Validating - Run 1 out of 5\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a703c6e8904bca1e7ae43777098be23a1f0e0ee
148,352
ipynb
Jupyter Notebook
src/60_Hyperopt_elastic_net.ipynb
fkubota/kaggle-Predicting-Molecular-Properties
ceaf401a2bfab10a3314f3122b12cf07b7c6bf2c
[ "MIT" ]
null
null
null
src/60_Hyperopt_elastic_net.ipynb
fkubota/kaggle-Predicting-Molecular-Properties
ceaf401a2bfab10a3314f3122b12cf07b7c6bf2c
[ "MIT" ]
null
null
null
src/60_Hyperopt_elastic_net.ipynb
fkubota/kaggle-Predicting-Molecular-Properties
ceaf401a2bfab10a3314f3122b12cf07b7c6bf2c
[ "MIT" ]
2
2020-09-26T08:38:36.000Z
2021-01-10T10:56:57.000Z
51.493232
2,802
0.373113
[ [ [ "# Introduction\n- ElasticNetを使ってみる\n- permutation importance を追加", "_____no_output_____" ], [ "# Import everything I need :)", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')\nimport time\nimport multiprocessing\nimport glob\nimport gc\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nfrom plotly.offline import init_notebook_mode, iplot\nimport plotly.graph_objs as go\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nfrom sklearn.model_selection import KFold, train_test_split, GridSearchCV\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn import linear_model\nfrom functools import partial\nfrom hyperopt import fmin, hp, tpe, Trials, space_eval, STATUS_OK, STATUS_RUNNING\nfrom fastprogress import progress_bar", "_____no_output_____" ] ], [ [ "# Preparation", "_____no_output_____" ] ], [ [ "nb = 60\nisSmallSet = False\nlength = 50000\nmodel_name = 'elastic_net'", "_____no_output_____" ], [ "pd.set_option('display.max_columns', 200)", "_____no_output_____" ], [ "# use atomic numbers to recode atomic names\nATOMIC_NUMBERS = {\n 'H': 1,\n 'C': 6,\n 'N': 7,\n 'O': 8,\n 'F': 9\n}", "_____no_output_____" ], [ "file_path = '../input/champs-scalar-coupling/'\nglob.glob(file_path + '*')", "_____no_output_____" ], [ "# train\npath = file_path + 'train.csv'\nif isSmallSet:\n train = pd.read_csv(path) [:length]\nelse:\n train = pd.read_csv(path)", "_____no_output_____" ], [ "# test\npath = file_path + 'test.csv'\nif isSmallSet:\n test = pd.read_csv(path)[:length]\nelse:\n test = pd.read_csv(path)", "_____no_output_____" ], [ "# structure\npath = file_path + 'structures.csv'\nstructures = pd.read_csv(path)", "_____no_output_____" ], [ "# fc_train\npath = file_path + 'nb47_fc_train.csv'\nif isSmallSet:\n fc_train = pd.read_csv(path)[:length]\nelse:\n fc_train = pd.read_csv(path)", "_____no_output_____" ], [ "# fc_test\npath = file_path + 'nb47_fc_test.csv'\nif isSmallSet:\n fc_test = pd.read_csv(path)[:length]\nelse:\n fc_test = pd.read_csv(path)", "_____no_output_____" ], [ "# train dist-interact\npath = file_path + 'nb33_train_dist-interaction.csv'\nif isSmallSet:\n dist_interact_train = pd.read_csv(path)[:length]\nelse:\n dist_interact_train = pd.read_csv(path)", "_____no_output_____" ], [ "# test dist-interact\npath = file_path + 'nb33_test_dist-interaction.csv'\nif isSmallSet:\n dist_interact_test = pd.read_csv(path)[:length]\nelse:\n dist_interact_test = pd.read_csv(path)", "_____no_output_____" ], [ "# ob charge train\npath = file_path + 'train_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'\nif isSmallSet:\n ob_charge_train = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)\nelse:\n ob_charge_train = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)", "_____no_output_____" ], [ "# ob charge test\npath = file_path + 'test_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'\nif isSmallSet:\n ob_charge_test = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)\nelse:\n ob_charge_test = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)", "_____no_output_____" ], [ "len(test), len(fc_test)", "_____no_output_____" ], [ "len(train), len(fc_train)", "_____no_output_____" ], [ "if isSmallSet:\n print('using SmallSet !!')\n print('-------------------')\n\nprint(f'There are {train.shape[0]} rows in train data.')\nprint(f'There are {test.shape[0]} rows in test data.')\n\nprint(f\"There are {train['molecule_name'].nunique()} distinct molecules in train data.\")\nprint(f\"There are {test['molecule_name'].nunique()} distinct molecules in test data.\")\nprint(f\"There are {train['atom_index_0'].nunique()} unique atoms.\")\nprint(f\"There are {train['type'].nunique()} unique types.\")", "There are 4658147 rows in train data.\nThere are 2505542 rows in test data.\nThere are 85003 distinct molecules in train data.\nThere are 45772 distinct molecules in test data.\nThere are 29 unique atoms.\nThere are 8 unique types.\n" ] ], [ [ "---\n## myFunc\n**metrics**", "_____no_output_____" ] ], [ [ "def kaggle_metric(df, preds):\n df[\"prediction\"] = preds\n maes = []\n for t in df.type.unique():\n y_true = df[df.type==t].scalar_coupling_constant.values\n y_pred = df[df.type==t].prediction.values\n mae = np.log(mean_absolute_error(y_true, y_pred))\n maes.append(mae)\n return np.mean(maes)", "_____no_output_____" ] ], [ [ "---\n**momory**", "_____no_output_____" ] ], [ [ "def reduce_mem_usage(df, verbose=True):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n start_mem = df.memory_usage().sum() / 1024**2\n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n c_prec = df[col].apply(lambda x: np.finfo(x).precision).max()\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max and c_prec == np.finfo(np.float16).precision:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max and c_prec == np.finfo(np.float32).precision:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n end_mem = df.memory_usage().sum() / 1024**2\n if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))\n return df", "_____no_output_____" ], [ "class permutation_importance():\n def __init__(self, model, metric):\n self.is_computed = False\n self.n_feat = 0\n self.base_score = 0\n self.model = model\n self.metric = metric\n self.df_result = []\n \n def compute(self, X_valid, y_valid):\n self.n_feat = len(X_valid.columns)\n self.base_score = self.metric(y_valid, self.model.predict(X_valid))\n self.df_result = pd.DataFrame({'feat': X_valid.columns, \n 'score': np.zeros(self.n_feat),\n 'score_diff': np.zeros(self.n_feat)})\n \n # predict\n for i, col in enumerate(X_valid.columns):\n df_perm = X_valid.copy()\n np.random.seed(1)\n df_perm[col] = np.random.permutation(df_perm[col])\n y_valid_pred = model.predict(df_perm)\n score = self.metric(y_valid, y_valid_pred)\n self.df_result['score'][self.df_result['feat']==col] = score\n self.df_result['score_diff'][self.df_result['feat']==col] = self.base_score - score\n self.is_computed = True\n \n def get_negative_feature(self):\n assert self.is_computed!=False, 'compute メソッドが実行されていません'\n idx = self.df_result['score_diff'] < 0\n return self.df_result.loc[idx, 'feat'].values.tolist()\n \n def get_positive_feature(self):\n assert self.is_computed!=False, 'compute メソッドが実行されていません'\n idx = self.df_result['score_diff'] > 0\n return self.df_result.loc[idx, 'feat'].values.tolist()\n \n def show_permutation_importance(self, score_type='loss'):\n assert self.is_computed!=False, 'compute メソッドが実行されていません'\n if score_type=='loss':\n ascending = True\n elif score_type=='accuracy':\n ascending = False\n else:\n ascending = ''\n \n plt.figure(figsize=(15, int(0.25*self.n_feat)))\n sns.barplot(x=\"score_diff\", y=\"feat\", data=self.df_result.sort_values(by=\"score_diff\", ascending=ascending))\n plt.title('base_score - permutation_score')", "_____no_output_____" ] ], [ [ "# Feature Engineering", "_____no_output_____" ], [ "Build Distance Dataset", "_____no_output_____" ] ], [ [ "def build_type_dataframes(base, structures, coupling_type):\n base = base[base['type'] == coupling_type].drop('type', axis=1).copy()\n base = base.reset_index()\n base['id'] = base['id'].astype('int32')\n structures = structures[structures['molecule_name'].isin(base['molecule_name'])]\n return base, structures\n\n# a,b = build_type_dataframes(train, structures, '1JHN')", "_____no_output_____" ], [ "def add_coordinates(base, structures, index):\n df = pd.merge(base, structures, how='inner',\n left_on=['molecule_name', f'atom_index_{index}'],\n right_on=['molecule_name', 'atom_index']).drop(['atom_index'], axis=1)\n df = df.rename(columns={\n 'atom': f'atom_{index}',\n 'x': f'x_{index}',\n 'y': f'y_{index}',\n 'z': f'z_{index}'\n })\n return df", "_____no_output_____" ], [ "def add_atoms(base, atoms):\n df = pd.merge(base, atoms, how='inner',\n on=['molecule_name', 'atom_index_0', 'atom_index_1'])\n return df", "_____no_output_____" ], [ "def merge_all_atoms(base, structures):\n df = pd.merge(base, structures, how='left',\n left_on=['molecule_name'],\n right_on=['molecule_name'])\n df = df[(df.atom_index_0 != df.atom_index) & (df.atom_index_1 != df.atom_index)]\n return df", "_____no_output_____" ], [ "def add_center(df):\n df['x_c'] = ((df['x_1'] + df['x_0']) * np.float32(0.5))\n df['y_c'] = ((df['y_1'] + df['y_0']) * np.float32(0.5))\n df['z_c'] = ((df['z_1'] + df['z_0']) * np.float32(0.5))\n\ndef add_distance_to_center(df):\n df['d_c'] = ((\n (df['x_c'] - df['x'])**np.float32(2) +\n (df['y_c'] - df['y'])**np.float32(2) + \n (df['z_c'] - df['z'])**np.float32(2)\n )**np.float32(0.5))\n\ndef add_distance_between(df, suffix1, suffix2):\n df[f'd_{suffix1}_{suffix2}'] = ((\n (df[f'x_{suffix1}'] - df[f'x_{suffix2}'])**np.float32(2) +\n (df[f'y_{suffix1}'] - df[f'y_{suffix2}'])**np.float32(2) + \n (df[f'z_{suffix1}'] - df[f'z_{suffix2}'])**np.float32(2)\n )**np.float32(0.5))", "_____no_output_____" ], [ "def add_distances(df):\n n_atoms = 1 + max([int(c.split('_')[1]) for c in df.columns if c.startswith('x_')])\n \n for i in range(1, n_atoms):\n for vi in range(min(4, i)):\n add_distance_between(df, i, vi)", "_____no_output_____" ], [ "def add_n_atoms(base, structures):\n dfs = structures['molecule_name'].value_counts().rename('n_atoms').to_frame()\n return pd.merge(base, dfs, left_on='molecule_name', right_index=True)", "_____no_output_____" ], [ "def build_couple_dataframe(some_csv, structures_csv, coupling_type, n_atoms=10):\n base, structures = build_type_dataframes(some_csv, structures_csv, coupling_type)\n base = add_coordinates(base, structures, 0)\n base = add_coordinates(base, structures, 1)\n \n base = base.drop(['atom_0', 'atom_1'], axis=1)\n atoms = base.drop('id', axis=1).copy()\n if 'scalar_coupling_constant' in some_csv:\n atoms = atoms.drop(['scalar_coupling_constant'], axis=1)\n \n add_center(atoms)\n atoms = atoms.drop(['x_0', 'y_0', 'z_0', 'x_1', 'y_1', 'z_1'], axis=1)\n\n atoms = merge_all_atoms(atoms, structures)\n \n add_distance_to_center(atoms)\n \n atoms = atoms.drop(['x_c', 'y_c', 'z_c', 'atom_index'], axis=1)\n atoms.sort_values(['molecule_name', 'atom_index_0', 'atom_index_1', 'd_c'], inplace=True)\n atom_groups = atoms.groupby(['molecule_name', 'atom_index_0', 'atom_index_1'])\n atoms['num'] = atom_groups.cumcount() + 2\n atoms = atoms.drop(['d_c'], axis=1)\n atoms = atoms[atoms['num'] < n_atoms]\n\n atoms = atoms.set_index(['molecule_name', 'atom_index_0', 'atom_index_1', 'num']).unstack()\n atoms.columns = [f'{col[0]}_{col[1]}' for col in atoms.columns]\n atoms = atoms.reset_index()\n \n# # downcast back to int8\n for col in atoms.columns:\n if col.startswith('atom_'):\n atoms[col] = atoms[col].fillna(0).astype('int8')\n \n# atoms['molecule_name'] = atoms['molecule_name'].astype('int32')\n \n full = add_atoms(base, atoms)\n add_distances(full)\n \n full.sort_values('id', inplace=True)\n \n return full", "_____no_output_____" ], [ "def take_n_atoms(df, n_atoms, four_start=4):\n labels = ['id', 'molecule_name', 'atom_index_1', 'atom_index_0']\n for i in range(2, n_atoms):\n label = f'atom_{i}'\n labels.append(label)\n\n for i in range(n_atoms):\n num = min(i, 4) if i < four_start else 4\n for j in range(num):\n labels.append(f'd_{i}_{j}')\n if 'scalar_coupling_constant' in df:\n labels.append('scalar_coupling_constant')\n return df[labels]", "_____no_output_____" ], [ "atoms = structures['atom'].values\ntypes_train = train['type'].values\ntypes_test = test['type'].values\nstructures['atom'] = structures['atom'].replace(ATOMIC_NUMBERS).astype('int8')\nfulls_train = []\nfulls_test = []\nfor type_ in progress_bar(train['type'].unique()):\n full_train = build_couple_dataframe(train, structures, type_, n_atoms=10)\n full_test = build_couple_dataframe(test, structures, type_, n_atoms=10)\n full_train = take_n_atoms(full_train, 10)\n full_test = take_n_atoms(full_test, 10)\n fulls_train.append(full_train)\n fulls_test.append(full_test)\n \nstructures['atom'] = atoms\ntrain = pd.concat(fulls_train).sort_values(by=['id']) #, axis=0)\ntest = pd.concat(fulls_test).sort_values(by=['id']) #, axis=0)\ntrain['type'] = types_train\ntest['type'] = types_test\ntrain = train.fillna(0)\ntest = test.fillna(0)", "_____no_output_____" ] ], [ [ "<br>\n<br>\ndist-interact", "_____no_output_____" ] ], [ [ "train['dist_interact'] = dist_interact_train.values\ntest['dist_interact'] = dist_interact_test.values", "_____no_output_____" ] ], [ [ "<br>\n<br>\nbasic", "_____no_output_____" ] ], [ [ "def map_atom_info(df_1,df_2, atom_idx):\n df = pd.merge(df_1, df_2, how = 'left',\n left_on = ['molecule_name', f'atom_index_{atom_idx}'],\n right_on = ['molecule_name', 'atom_index'])\n df = df.drop('atom_index', axis=1)\n return df\n\n\n# structure and ob_charges\nob_charge = pd.concat([ob_charge_train, ob_charge_test])\nmerge = pd.merge(ob_charge, structures, how='left',\n left_on = ['molecule_name', 'atom_index'],\n right_on = ['molecule_name', 'atom_index'])\nfor atom_idx in [0,1]:\n train = map_atom_info(train, merge, atom_idx)\n test = map_atom_info(test, merge, atom_idx)\n \n train = train.rename(columns={\n 'atom': f'atom_{atom_idx}',\n 'x': f'x_{atom_idx}',\n 'y': f'y_{atom_idx}',\n 'z': f'z_{atom_idx}',\n 'eem': f'eem_{atom_idx}',\n 'mmff94': f'mmff94_{atom_idx}',\n 'gasteiger': f'gasteiger_{atom_idx}', \n 'qeq': f'qeq_{atom_idx}',\n 'qtpie': f'qtpie_{atom_idx}', \n 'eem2015ha': f'eem2015ha_{atom_idx}', \n 'eem2015hm': f'eem2015hm_{atom_idx}', \n 'eem2015hn': f'eem2015hn_{atom_idx}', \n 'eem2015ba': f'eem2015ba_{atom_idx}', \n 'eem2015bm': f'eem2015bm_{atom_idx}', \n 'eem2015bn': f'eem2015bn_{atom_idx}',})\n test = test.rename(columns={\n 'atom': f'atom_{atom_idx}',\n 'x': f'x_{atom_idx}',\n 'y': f'y_{atom_idx}',\n 'z': f'z_{atom_idx}',\n 'eem': f'eem_{atom_idx}',\n 'mmff94': f'mmff94_{atom_idx}',\n 'gasteiger': f'gasteiger_{atom_idx}', \n 'qeq': f'qeq_{atom_idx}', \n 'qtpie': f'qtpie_{atom_idx}', \n 'eem2015ha': f'eem2015ha_{atom_idx}', \n 'eem2015hm': f'eem2015hm_{atom_idx}', \n 'eem2015hn': f'eem2015hn_{atom_idx}', \n 'eem2015ba': f'eem2015ba_{atom_idx}', \n 'eem2015bm': f'eem2015bm_{atom_idx}', \n 'eem2015bn': f'eem2015bn_{atom_idx}'})\n# test = test.rename(columns={'atom': f'atom_{atom_idx}',\n# 'x': f'x_{atom_idx}',\n# 'y': f'y_{atom_idx}',\n# 'z': f'z_{atom_idx}'})\n\n# ob_charges\n# train = map_atom_info(train, ob_charge_train, 0)\n# test = map_atom_info(test, ob_charge_test, 0)\n# train = map_atom_info(train, ob_charge_train, 1)\n# test = map_atom_info(test, ob_charge_test, 1)", "_____no_output_____" ] ], [ [ "<br>\n<br>\ntype0", "_____no_output_____" ] ], [ [ "def create_type0(df):\n df['type_0'] = df['type'].apply(lambda x : x[0])\n return df\n# train['type_0'] = train['type'].apply(lambda x: x[0])\n# test['type_0'] = test['type'].apply(lambda x: x[0])", "_____no_output_____" ] ], [ [ "<br>\n<br>\ndistances", "_____no_output_____" ] ], [ [ "def distances(df):\n df_p_0 = df[['x_0', 'y_0', 'z_0']].values\n df_p_1 = df[['x_1', 'y_1', 'z_1']].values\n \n df['dist'] = np.linalg.norm(df_p_0 - df_p_1, axis=1)\n df['dist_x'] = (df['x_0'] - df['x_1']) ** 2\n df['dist_y'] = (df['y_0'] - df['y_1']) ** 2\n df['dist_z'] = (df['z_0'] - df['z_1']) ** 2\n \n return df\n\n# train = distances(train)\n# test = distances(test)", "_____no_output_____" ] ], [ [ "<br>\n<br>\n統計量", "_____no_output_____" ] ], [ [ "def create_features(df):\n df['molecule_couples'] = df.groupby('molecule_name')['id'].transform('count')\n df['molecule_dist_mean'] = df.groupby('molecule_name')['dist'].transform('mean')\n df['molecule_dist_min'] = df.groupby('molecule_name')['dist'].transform('min')\n df['molecule_dist_max'] = df.groupby('molecule_name')['dist'].transform('max')\n df['atom_0_couples_count'] = df.groupby(['molecule_name', 'atom_index_0'])['id'].transform('count')\n df['atom_1_couples_count'] = df.groupby(['molecule_name', 'atom_index_1'])['id'].transform('count')\n df[f'molecule_atom_index_0_x_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['x_1'].transform('std')\n df[f'molecule_atom_index_0_y_1_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('mean')\n df[f'molecule_atom_index_0_y_1_mean_diff'] = df[f'molecule_atom_index_0_y_1_mean'] - df['y_1']\n df[f'molecule_atom_index_0_y_1_mean_div'] = df[f'molecule_atom_index_0_y_1_mean'] / df['y_1']\n df[f'molecule_atom_index_0_y_1_max'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('max')\n df[f'molecule_atom_index_0_y_1_max_diff'] = df[f'molecule_atom_index_0_y_1_max'] - df['y_1']\n df[f'molecule_atom_index_0_y_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('std')\n df[f'molecule_atom_index_0_z_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['z_1'].transform('std')\n df[f'molecule_atom_index_0_dist_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('mean')\n df[f'molecule_atom_index_0_dist_mean_diff'] = df[f'molecule_atom_index_0_dist_mean'] - df['dist']\n df[f'molecule_atom_index_0_dist_mean_div'] = df[f'molecule_atom_index_0_dist_mean'] / df['dist']\n df[f'molecule_atom_index_0_dist_max'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('max')\n df[f'molecule_atom_index_0_dist_max_diff'] = df[f'molecule_atom_index_0_dist_max'] - df['dist']\n df[f'molecule_atom_index_0_dist_max_div'] = df[f'molecule_atom_index_0_dist_max'] / df['dist']\n df[f'molecule_atom_index_0_dist_min'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')\n df[f'molecule_atom_index_0_dist_min_diff'] = df[f'molecule_atom_index_0_dist_min'] - df['dist']\n df[f'molecule_atom_index_0_dist_min_div'] = df[f'molecule_atom_index_0_dist_min'] / df['dist']\n df[f'molecule_atom_index_0_dist_std'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('std')\n df[f'molecule_atom_index_0_dist_std_diff'] = df[f'molecule_atom_index_0_dist_std'] - df['dist']\n df[f'molecule_atom_index_0_dist_std_div'] = df[f'molecule_atom_index_0_dist_std'] / df['dist']\n df[f'molecule_atom_index_1_dist_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('mean')\n df[f'molecule_atom_index_1_dist_mean_diff'] = df[f'molecule_atom_index_1_dist_mean'] - df['dist']\n df[f'molecule_atom_index_1_dist_mean_div'] = df[f'molecule_atom_index_1_dist_mean'] / df['dist']\n df[f'molecule_atom_index_1_dist_max'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('max')\n df[f'molecule_atom_index_1_dist_max_diff'] = df[f'molecule_atom_index_1_dist_max'] - df['dist']\n df[f'molecule_atom_index_1_dist_max_div'] = df[f'molecule_atom_index_1_dist_max'] / df['dist']\n df[f'molecule_atom_index_1_dist_min'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('min')\n df[f'molecule_atom_index_1_dist_min_diff'] = df[f'molecule_atom_index_1_dist_min'] - df['dist']\n df[f'molecule_atom_index_1_dist_min_div'] = df[f'molecule_atom_index_1_dist_min'] / df['dist']\n df[f'molecule_atom_index_1_dist_std'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('std')\n df[f'molecule_atom_index_1_dist_std_diff'] = df[f'molecule_atom_index_1_dist_std'] - df['dist']\n df[f'molecule_atom_index_1_dist_std_div'] = df[f'molecule_atom_index_1_dist_std'] / df['dist']\n df[f'molecule_atom_1_dist_mean'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('mean')\n df[f'molecule_atom_1_dist_min'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('min')\n df[f'molecule_atom_1_dist_min_diff'] = df[f'molecule_atom_1_dist_min'] - df['dist']\n df[f'molecule_atom_1_dist_min_div'] = df[f'molecule_atom_1_dist_min'] / df['dist']\n df[f'molecule_atom_1_dist_std'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('std')\n df[f'molecule_atom_1_dist_std_diff'] = df[f'molecule_atom_1_dist_std'] - df['dist']\n df[f'molecule_type_0_dist_std'] = df.groupby(['molecule_name', 'type_0'])['dist'].transform('std')\n df[f'molecule_type_0_dist_std_diff'] = df[f'molecule_type_0_dist_std'] - df['dist']\n df[f'molecule_type_dist_mean'] = df.groupby(['molecule_name', 'type'])['dist'].transform('mean')\n df[f'molecule_type_dist_mean_diff'] = df[f'molecule_type_dist_mean'] - df['dist']\n df[f'molecule_type_dist_mean_div'] = df[f'molecule_type_dist_mean'] / df['dist']\n df[f'molecule_type_dist_max'] = df.groupby(['molecule_name', 'type'])['dist'].transform('max')\n df[f'molecule_type_dist_min'] = df.groupby(['molecule_name', 'type'])['dist'].transform('min')\n df[f'molecule_type_dist_std'] = df.groupby(['molecule_name', 'type'])['dist'].transform('std')\n df[f'molecule_type_dist_std_diff'] = df[f'molecule_type_dist_std'] - df['dist']\n # fc\n df[f'molecule_type_fc_max'] = df.groupby(['molecule_name', 'type'])['fc'].transform('max')\n df[f'molecule_type_fc_min'] = df.groupby(['molecule_name', 'type'])['fc'].transform('min')\n df[f'molecule_type_fc_std'] = df.groupby(['molecule_name', 'type'])['fc'].transform('std')\n df[f'molecule_type_fc_std_diff'] = df[f'molecule_type_fc_std'] - df['fc']\n df[f'molecule_atom_index_0_fc_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('mean')\n df[f'molecule_atom_index_0_fc_mean_diff'] = df[f'molecule_atom_index_0_fc_mean'] - df['fc']\n df[f'molecule_atom_index_0_fc_mean_div'] = df[f'molecule_atom_index_0_fc_mean'] / df['dist']\n df[f'molecule_atom_index_0_fc_max'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('max')\n df[f'molecule_atom_index_0_fc_max_diff'] = df[f'molecule_atom_index_0_fc_max'] - df['fc']\n df[f'molecule_atom_index_0_fc_max_div'] = df[f'molecule_atom_index_0_fc_max'] / df['fc']\n df[f'molecule_atom_index_0_fc_min'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('min')\n df[f'molecule_atom_index_0_fc_min_diff'] = df[f'molecule_atom_index_0_fc_min'] - df['fc']\n df[f'molecule_atom_index_0_fc_min_div'] = df[f'molecule_atom_index_0_fc_min'] / df['fc']\n df[f'molecule_atom_index_0_fc_std'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('std')\n df[f'molecule_atom_index_0_fc_std_diff'] = df[f'molecule_atom_index_0_fc_std'] - df['fc']\n df[f'molecule_atom_index_0_fc_std_div'] = df[f'molecule_atom_index_0_fc_std'] / df['fc']\n df[f'molecule_atom_index_1_fc_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('mean')\n df[f'molecule_atom_index_1_fc_mean_diff'] = df[f'molecule_atom_index_1_fc_mean'] - df['fc']\n df[f'molecule_atom_index_1_fc_mean_div'] = df[f'molecule_atom_index_1_fc_mean'] / df['fc']\n df[f'molecule_atom_index_1_fc_max'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('max')\n df[f'molecule_atom_index_1_fc_max_diff'] = df[f'molecule_atom_index_1_fc_max'] - df['fc']\n df[f'molecule_atom_index_1_fc_max_div'] = df[f'molecule_atom_index_1_fc_max'] / df['fc']\n df[f'molecule_atom_index_1_fc_min'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('min')\n df[f'molecule_atom_index_1_fc_min_diff'] = df[f'molecule_atom_index_1_fc_min'] - df['fc']\n df[f'molecule_atom_index_1_fc_min_div'] = df[f'molecule_atom_index_1_fc_min'] / df['fc']\n df[f'molecule_atom_index_1_fc_std'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('std')\n df[f'molecule_atom_index_1_fc_std_diff'] = df[f'molecule_atom_index_1_fc_std'] - df['fc']\n df[f'molecule_atom_index_1_fc_std_div'] = df[f'molecule_atom_index_1_fc_std'] / df['fc']\n \n return df", "_____no_output_____" ] ], [ [ "angle features", "_____no_output_____" ] ], [ [ "def map_atom_info(df_1,df_2, atom_idx):\n df = pd.merge(df_1, df_2, how = 'left',\n left_on = ['molecule_name', f'atom_index_{atom_idx}'],\n right_on = ['molecule_name', 'atom_index'])\n df = df.drop('atom_index', axis=1)\n\n return df\n\ndef create_closest(df):\n df_temp=df.loc[:,[\"molecule_name\",\"atom_index_0\",\"atom_index_1\",\"dist\",\"x_0\",\"y_0\",\"z_0\",\"x_1\",\"y_1\",\"z_1\"]].copy()\n df_temp_=df_temp.copy()\n df_temp_= df_temp_.rename(columns={'atom_index_0': 'atom_index_1',\n 'atom_index_1': 'atom_index_0',\n 'x_0': 'x_1',\n 'y_0': 'y_1',\n 'z_0': 'z_1',\n 'x_1': 'x_0',\n 'y_1': 'y_0',\n 'z_1': 'z_0'})\n df_temp=pd.concat(objs=[df_temp,df_temp_],axis=0)\n\n df_temp[\"min_distance\"]=df_temp.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')\n df_temp= df_temp[df_temp[\"min_distance\"]==df_temp[\"dist\"]]\n\n df_temp=df_temp.drop(['x_0','y_0','z_0','min_distance', 'dist'], axis=1)\n df_temp= df_temp.rename(columns={'atom_index_0': 'atom_index',\n 'atom_index_1': 'atom_index_closest',\n 'distance': 'distance_closest',\n 'x_1': 'x_closest',\n 'y_1': 'y_closest',\n 'z_1': 'z_closest'})\n\n for atom_idx in [0,1]:\n df = map_atom_info(df,df_temp, atom_idx)\n df = df.rename(columns={'atom_index_closest': f'atom_index_closest_{atom_idx}',\n 'distance_closest': f'distance_closest_{atom_idx}',\n 'x_closest': f'x_closest_{atom_idx}',\n 'y_closest': f'y_closest_{atom_idx}',\n 'z_closest': f'z_closest_{atom_idx}'})\n return df\n\ndef add_cos_features(df):\n df[\"distance_0\"]=((df['x_0']-df['x_closest_0'])**2+(df['y_0']-df['y_closest_0'])**2+(df['z_0']-df['z_closest_0'])**2)**(1/2)\n df[\"distance_1\"]=((df['x_1']-df['x_closest_1'])**2+(df['y_1']-df['y_closest_1'])**2+(df['z_1']-df['z_closest_1'])**2)**(1/2)\n df[\"vec_0_x\"]=(df['x_0']-df['x_closest_0'])/df[\"distance_0\"]\n df[\"vec_0_y\"]=(df['y_0']-df['y_closest_0'])/df[\"distance_0\"]\n df[\"vec_0_z\"]=(df['z_0']-df['z_closest_0'])/df[\"distance_0\"]\n df[\"vec_1_x\"]=(df['x_1']-df['x_closest_1'])/df[\"distance_1\"]\n df[\"vec_1_y\"]=(df['y_1']-df['y_closest_1'])/df[\"distance_1\"]\n df[\"vec_1_z\"]=(df['z_1']-df['z_closest_1'])/df[\"distance_1\"]\n df[\"vec_x\"]=(df['x_1']-df['x_0'])/df[\"dist\"]\n df[\"vec_y\"]=(df['y_1']-df['y_0'])/df[\"dist\"]\n df[\"vec_z\"]=(df['z_1']-df['z_0'])/df[\"dist\"]\n df[\"cos_0_1\"]=df[\"vec_0_x\"]*df[\"vec_1_x\"]+df[\"vec_0_y\"]*df[\"vec_1_y\"]+df[\"vec_0_z\"]*df[\"vec_1_z\"]\n df[\"cos_0\"]=df[\"vec_0_x\"]*df[\"vec_x\"]+df[\"vec_0_y\"]*df[\"vec_y\"]+df[\"vec_0_z\"]*df[\"vec_z\"]\n df[\"cos_1\"]=df[\"vec_1_x\"]*df[\"vec_x\"]+df[\"vec_1_y\"]*df[\"vec_y\"]+df[\"vec_1_z\"]*df[\"vec_z\"]\n df=df.drop(['vec_0_x','vec_0_y','vec_0_z','vec_1_x','vec_1_y','vec_1_z','vec_x','vec_y','vec_z'], axis=1)\n return df\n\n", "_____no_output_____" ], [ "%%time\n\nprint('add fc')\nprint(len(train), len(test))\ntrain['fc'] = fc_train.values\ntest['fc'] = fc_test.values\n\nprint('type0')\nprint(len(train), len(test))\ntrain = create_type0(train)\ntest = create_type0(test)\n\nprint('distances')\nprint(len(train), len(test))\ntrain = distances(train)\ntest = distances(test)\n\nprint('create_featueres')\nprint(len(train), len(test))\ntrain = create_features(train)\ntest = create_features(test)\n\nprint('create_closest')\nprint(len(train), len(test))\ntrain = create_closest(train)\ntest = create_closest(test)\ntrain.drop_duplicates(inplace=True, subset=['id']) # なぜかtrainの行数が増えるバグが発生\ntrain = train.reset_index(drop=True)\n\nprint('add_cos_features')\nprint(len(train), len(test))\ntrain = add_cos_features(train)\ntest = add_cos_features(test)", "add fc\n4658147 2505542\ntype0\n4658147 2505542\ndistances\n4658147 2505542\ncreate_featueres\n4658147 2505542\ncreate_closest\n4658147 2505542\nadd_cos_features\n4658147 2505542\nCPU times: user 2min 58s, sys: 4min 37s, total: 7min 36s\nWall time: 7min 36s\n" ] ], [ [ "---\n<br>\n<br>\n<br>\nnanがある特徴量を削除", "_____no_output_____" ] ], [ [ "drop_feats = train.columns[train.isnull().sum(axis=0) != 0].values\ndrop_feats", "_____no_output_____" ], [ "train = train.drop(drop_feats, axis=1)\ntest = test.drop(drop_feats, axis=1)\n\nassert sum(train.isnull().sum(axis=0))==0, f'train に nan があります。'\nassert sum(test.isnull().sum(axis=0))==0, f'test に nan があります。'", "_____no_output_____" ] ], [ [ "<br>\n<br>\n<br>\nエンコーディング", "_____no_output_____" ] ], [ [ "cat_cols = ['atom_1']\nnum_cols = list(set(train.columns) - set(cat_cols) - set(['type', \"scalar_coupling_constant\", 'molecule_name', 'id',\n 'atom_0', 'atom_1','atom_2', 'atom_3', 'atom_4', 'atom_5', 'atom_6', 'atom_7', 'atom_8', 'atom_9']))\n \nprint(f'カテゴリカル: {cat_cols}')\nprint(f'数値: {num_cols}')", "カテゴリカル: ['atom_1']\n数値: ['y_0', 'd_4_3', 'x_closest_1', 'd_2_0', 'molecule_atom_index_1_dist_max_div', 'molecule_atom_index_1_fc_min_div', 'molecule_atom_index_0_fc_min_diff', 'eem2015ba_0', 'molecule_atom_1_dist_min_div', 'molecule_atom_index_0_fc_min', 'd_5_2', 'molecule_atom_index_1_fc_mean_div', 'mmff94_0', 'd_5_3', 'molecule_atom_index_0_fc_max_div', 'molecule_type_dist_min', 'type_0', 'molecule_dist_min', 'qeq_0', 'eem2015hn_0', 'gasteiger_1', 'd_6_1', 'eem2015hn_1', 'd_9_1', 'molecule_type_dist_mean', 'atom_index_closest_1', 'molecule_atom_index_0_dist_mean', 'molecule_atom_index_0_fc_mean_div', 'd_7_1', 'molecule_type_dist_mean_div', 'd_4_0', 'd_8_3', 'd_3_1', 'molecule_atom_index_1_fc_max_div', 'eem_1', 'dist_y', 'molecule_atom_index_1_fc_min', 'molecule_atom_index_0_fc_max_diff', 'molecule_atom_index_1_fc_mean', 'molecule_atom_index_0_fc_min_div', 'molecule_atom_index_0_y_1_mean_diff', 'd_8_0', 'd_9_0', 'eem2015ha_1', 'atom_1_couples_count', 'molecule_atom_index_1_dist_min', 'molecule_dist_max', 'molecule_atom_index_0_dist_mean_diff', 'd_9_2', 'y_1', 'd_7_0', 'distance_0', 'atom_index_0', 'd_6_3', 'x_1', 'z_closest_0', 'z_1', 'molecule_atom_index_0_dist_max', 'eem2015bn_0', 'eem2015bn_1', 'd_7_2', 'dist_x', 'molecule_atom_index_1_dist_max', 'd_8_1', 'eem2015ba_1', 'y_closest_1', 'molecule_dist_mean', 'd_5_1', 'gasteiger_0', 'cos_0', 'qeq_1', 'd_3_2', 'cos_1', 'd_6_2', 'molecule_atom_index_0_dist_min_div', 'molecule_atom_1_dist_min_diff', 'eem2015hm_0', 'molecule_atom_index_0_dist_mean_div', 'molecule_atom_index_1_dist_min_div', 'molecule_atom_index_0_dist_max_diff', 'molecule_atom_index_1_fc_max', 'd_3_0', 'eem2015ha_0', 'dist', 'y_closest_0', 'eem_0', 'molecule_atom_index_1_dist_max_diff', 'atom_index_1', 'molecule_atom_index_0_fc_mean', 'molecule_atom_index_1_fc_mean_diff', 'molecule_atom_index_0_y_1_mean', 'd_2_1', 'molecule_atom_index_1_fc_min_diff', 'd_9_3', 'eem2015hm_1', 'x_0', 'eem2015bm_0', 'd_4_1', 'molecule_atom_index_0_fc_max', 'molecule_atom_index_1_fc_max_diff', 'd_1_0', 'molecule_atom_index_0_fc_mean_diff', 'qtpie_0', 'x_closest_0', 'molecule_atom_index_1_dist_mean_div', 'molecule_type_dist_mean_diff', 'z_closest_1', 'qtpie_1', 'molecule_type_dist_max', 'd_5_0', 'molecule_atom_index_1_dist_mean', 'molecule_atom_index_0_y_1_max', 'z_0', 'molecule_atom_1_dist_mean', 'd_4_2', 'cos_0_1', 'molecule_couples', 'molecule_atom_1_dist_min', 'd_8_2', 'eem2015bm_1', 'molecule_atom_index_0_dist_max_div', 'molecule_type_fc_min', 'fc', 'molecule_atom_index_0_y_1_max_diff', 'mmff94_1', 'molecule_type_fc_max', 'dist_z', 'molecule_atom_index_0_dist_min_diff', 'molecule_atom_index_1_dist_mean_diff', 'atom_index_closest_0', 'molecule_atom_index_1_dist_min_diff', 'molecule_atom_index_0_dist_min', 'd_6_0', 'd_7_3', 'distance_1', 'atom_0_couples_count']\n" ] ], [ [ "<br>\n<br>\nLabelEncode\n\n- `atom_1` = {H, C, N}\n- `type_0` = {1, 2, 3}\n- `type` = {2JHC, ...}", "_____no_output_____" ] ], [ [ "for f in ['type_0', 'type']:\n if f in train.columns:\n lbl = LabelEncoder()\n lbl.fit(list(train[f].values) + list(test[f].values))\n train[f] = lbl.transform(list(train[f].values))\n test[f] = lbl.transform(list(test[f].values))", "_____no_output_____" ] ], [ [ "<br>\n<br>\n<br>\none hot encoding", "_____no_output_____" ] ], [ [ "train = pd.get_dummies(train, columns=cat_cols)\ntest = pd.get_dummies(test, columns=cat_cols)", "_____no_output_____" ] ], [ [ "<br>\n<br>\n<br>\n標準化", "_____no_output_____" ] ], [ [ "scaler = StandardScaler()\ntrain[num_cols] = scaler.fit_transform(train[num_cols])\ntest[num_cols] = scaler.transform(test[num_cols])", "_____no_output_____" ] ], [ [ "<br>\n<br>\n\n---\n**show features**", "_____no_output_____" ] ], [ [ "train.head(2)", "_____no_output_____" ], [ "print(train.columns)", "Index(['id', 'molecule_name', 'atom_index_1', 'atom_index_0', 'atom_2',\n 'atom_3', 'atom_4', 'atom_5', 'atom_6', 'atom_7',\n ...\n 'y_closest_1', 'z_closest_1', 'distance_0', 'distance_1', 'cos_0_1',\n 'cos_0', 'cos_1', 'atom_1_C', 'atom_1_H', 'atom_1_N'],\n dtype='object', length=152)\n" ] ], [ [ "# create train, test data", "_____no_output_____" ] ], [ [ "y = train['scalar_coupling_constant']\ntrain = train.drop(['id', 'molecule_name', 'atom_0', 'scalar_coupling_constant'], axis=1)\ntest = test.drop(['id', 'molecule_name', 'atom_0'], axis=1)\n# train = reduce_mem_usage(train)\n# test = reduce_mem_usage(test)\n\nX = train.copy()\nX_test = test.copy()\n\nassert len(X.columns) == len(X_test.columns), f'X と X_test のサイズが違います X: {len(X.columns)}, X_test: {len(X_test.columns)}'", "_____no_output_____" ], [ "del train, test, full_train, full_test", "_____no_output_____" ], [ "gc.collect()", "_____no_output_____" ] ], [ [ "# Hyperopt", "_____no_output_____" ] ], [ [ "X_train, X_valid, y_train, y_valid = train_test_split(X,\n y,\n test_size = 0.30, \n random_state = 0)", "_____no_output_____" ], [ "# Define searched space\nhyper_space = {'alpha': hp.choice('alpha', [0.01, 0.05, 0.1, 0.5, 1, 2]),\n 'l1_ratio': hp.choice('l1_ratio', [0, 0.1, 0.3, 0.5, 0.7, 0.9, 1])}\n\n# Seting the number of Evals\nMAX_EVALS= 30", "_____no_output_____" ], [ "%%time\n# type ごとの学習 \n\nbest_params_list = []\nfor t in sorted(X_train['type'].unique()):\n print('*'*80)\n print(f'- Training of type {t}')\n print('*'*80)\n X_t_train = X_train.loc[X_train['type'] == t]\n X_t_valid = X_valid.loc[X_valid['type'] == t]\n y_t_train = y_train[X_train['type'] == t]\n y_t_valid = y_valid[X_valid['type'] == t]\n \n \n # evaluate_metric\n def evaluate_metric(params):\n model = linear_model.ElasticNet(**params, random_state=42, max_iter=3000) # <=======================\n model.fit(X_t_train, y_t_train)\n\n pred = model.predict(X_t_valid)\n y_t_train_pred = model.predict(X_t_train)\n\n _X_t_valid = X_t_valid.copy()\n _X_t_valid['scalar_coupling_constant'] = y_t_valid\n cv_score = kaggle_metric(_X_t_valid, pred)\n _X_t_valid = _X_t_valid.drop(['scalar_coupling_constant'], axis=1)\n\n# print(f'mae(valid): {mean_absolute_error(y_t_valid, pred)}')\n \n print(params)\n print(f'training l1: {mean_absolute_error(y_t_train, y_t_train_pred) :.5f}\t\\t valid l1: {mean_absolute_error(y_t_valid, pred) :.5f} ')\n print(f'cv_score: {cv_score}')\n print('-'*80)\n print('\\n')\n\n return {\n 'loss': cv_score,\n 'status': STATUS_OK,\n 'stats_running': STATUS_RUNNING\n }\n \n \n # hyperopt\n # Trail\n trials = Trials()\n\n # Set algoritm parameters\n algo = partial(tpe.suggest, \n n_startup_jobs=-1)\n\n # Seting the number of Evals\n MAX_EVALS= 20\n\n # Fit Tree Parzen Estimator\n best_vals = fmin(evaluate_metric, space=hyper_space, verbose=1,\n algo=algo, max_evals=MAX_EVALS, trials=trials)\n\n # Print best parameters\n best_params = space_eval(hyper_space, best_vals)\n best_params_list.append(best_params)\n print(\"BEST PARAMETERS: \" + str(best_params))\n print('')", "********************************************************************************\n- Training of type 0\n********************************************************************************\n{'alpha': 0.1, 'l1_ratio': 0.7} \ntraining l1: 0.91368\t\t valid l1: 0.91070 \ncv_score: -0.09354614507126717 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.7} \ntraining l1: 0.91368\t\t valid l1: 0.91070 \ncv_score: -0.09354614507126717 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.7} \ntraining l1: 1.93478\t\t valid l1: 1.93498 \ncv_score: 0.660094823215998 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.3} \ntraining l1: 1.16280\t\t valid l1: 1.16010 \ncv_score: 0.14850689058794553 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0} \ntraining l1: 3.53566\t\t valid l1: 3.53481 \ncv_score: 1.2626595371168408 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.1} \ntraining l1: 0.99092\t\t valid l1: 0.98978 \ncv_score: -0.010275395368536121 \n-------------------------------------------------------------------------------- \n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.72196\t\t valid l1: 0.71890 \ncv_score: -0.33003828990696465 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.72196\t\t valid l1: 0.71890 \ncv_score: -0.33003828990696465 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.71184\t\t valid l1: 0.70783 \ncv_score: -0.34554766910460294 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 1} \ntraining l1: 2.08979\t\t valid l1: 2.09396 \ncv_score: 0.7390593595528752 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.71164\t\t valid l1: 0.70784 \ncv_score: -0.3455343186463923 \n-------------------------------------------------------------------------------- \n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.71184\t\t valid l1: 0.70783 \ncv_score: -0.34554766910460294 \n-------------------------------------------------------------------------------- \n{'alpha': 1, 'l1_ratio': 1} \ntraining l1: 1.24328\t\t valid l1: 1.24317 \ncv_score: 0.217668151743598 \n-------------------------------------------------------------------------------- \n{'alpha': 0.5, 'l1_ratio': 0.9} \ntraining l1: 1.49460\t\t valid l1: 1.49488 \ncv_score: 0.40204628334445036 \n-------------------------------------------------------------------------------- \n{'alpha': 2, 'l1_ratio': 1} \ntraining l1: 2.08979\t\t valid l1: 2.09396 \ncv_score: 0.7390593595528752 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.1} \ntraining l1: 0.99092\t\t valid l1: 0.98978 \ncv_score: -0.010275395368536121 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.74977\t\t valid l1: 0.74773 \ncv_score: -0.2907189908482652 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.71184\t\t valid l1: 0.70783 \ncv_score: -0.34554766910460294 \n-------------------------------------------------------------------------------- \n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.71184\t\t valid l1: 0.70783 \ncv_score: -0.34554766910460294 \n-------------------------------------------------------------------------------- \n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.71184\t\t valid l1: 0.70783 \ncv_score: -0.34554766910460294 \n-------------------------------------------------------------------------------- \n100%|██████████| 20/20 [53:48<00:00, 247.69s/it, best loss: -0.34554766910460294]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 1}\n\n********************************************************************************\n- Training of type 1\n********************************************************************************\n{'alpha': 0.05, 'l1_ratio': 0.5} \ntraining l1: 0.78354\t\t valid l1: 0.78592 \ncv_score: -0.24090224949160532 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.5} \ntraining l1: 0.78354\t\t valid l1: 0.78592 \ncv_score: -0.24090224949160532 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.5} \ntraining l1: 4.07264\t\t valid l1: 4.09357 \ncv_score: 1.4094175370701707 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.7} \ntraining l1: 0.67312\t\t valid l1: 0.67603 \ncv_score: -0.3915176561993669 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.7} \ntraining l1: 0.93265\t\t valid l1: 0.93865 \ncv_score: -0.06331257537356183 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.40528\t\t valid l1: 0.40450 \ncv_score: -0.9051024260884974 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.40528\t\t valid l1: 0.40450 \ncv_score: -0.9051024260884974 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.40860\t\t valid l1: 0.40942 \ncv_score: -0.8930173413436192 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0} \ntraining l1: 2.50820\t\t valid l1: 2.51723 \ncv_score: 0.9231573323454739 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.3} \ntraining l1: 1.97328\t\t valid l1: 1.98334 \ncv_score: 0.6847809970079919 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50996\t\t valid l1: 0.51278 \ncv_score: -0.6679097010470093 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.40528\t\t valid l1: 0.40450 \ncv_score: -0.9051024260884974 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 1} \ntraining l1: 2.05690\t\t valid l1: 2.08095 \ncv_score: 0.7328237778016239 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.1} \ntraining l1: 1.17992\t\t valid l1: 1.18177 \ncv_score: 0.16701709794142386 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 1} \ntraining l1: 4.04774\t\t valid l1: 4.09720 \ncv_score: 1.4103041173278206 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0} \ntraining l1: 1.95070\t\t valid l1: 1.95659 \ncv_score: 0.6712055604209224 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.40860\t\t valid l1: 0.40942 \ncv_score: -0.8930173413436192 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.40528\t\t valid l1: 0.40450 \ncv_score: -0.9051024260884974 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.40528\t\t valid l1: 0.40450 \ncv_score: -0.9051024260884974 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.40528\t\t valid l1: 0.40450 \ncv_score: -0.9051024260884974 \n--------------------------------------------------------------------------------\n100%|██████████| 20/20 [02:07<00:00, 6.98s/it, best loss: -0.9051024260884974]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 1}\n\n********************************************************************************\n- Training of type 2\n********************************************************************************\n{'alpha': 2, 'l1_ratio': 0.9} \ntraining l1: 2.60813\t\t valid l1: 2.60201 \ncv_score: 0.9562860001044381 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.9} \ntraining l1: 2.60813\t\t valid l1: 2.60201 \ncv_score: 0.9562860001044381 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.9} \ntraining l1: 2.11405\t\t valid l1: 2.11195 \ncv_score: 0.747612730561078 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.3} \ntraining l1: 1.83115\t\t valid l1: 1.82889 \ncv_score: 0.6037100374269 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.3} \ntraining l1: 1.18822\t\t valid l1: 1.18617 \ncv_score: 0.17073121084813633 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.7} \ntraining l1: 1.12153\t\t valid l1: 1.12063 \ncv_score: 0.11389162226475327 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.7} \ntraining l1: 0.38719\t\t valid l1: 0.38785 \ncv_score: -0.9471457533833002 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.42461\t\t valid l1: 0.42511 \ncv_score: -0.8554081518402021 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.52263\t\t valid l1: 0.52293 \ncv_score: -0.6483171786977835 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 1} \ntraining l1: 2.30646\t\t valid l1: 2.30226 \ncv_score: 0.8338907141599672 \n-------------------------------------------------------------------------------- \n{'alpha': 0.05, 'l1_ratio': 0.1} \ntraining l1: 0.96761\t\t valid l1: 0.96600 \ncv_score: -0.034591615533887794 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.7} \ntraining l1: 0.38719\t\t valid l1: 0.38785 \ncv_score: -0.9471457533833002 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.7} \ntraining l1: 2.20956\t\t valid l1: 2.20627 \ncv_score: 0.79130201915712 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.1} \ntraining l1: 0.96761\t\t valid l1: 0.96600 \ncv_score: -0.034591615533887794 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.33605\t\t valid l1: 0.33706 \ncv_score: -1.0875076106847903 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.33605\t\t valid l1: 0.33706 \ncv_score: -1.0875076106847903 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 1} \ntraining l1: 2.13906\t\t valid l1: 2.13683 \ncv_score: 0.7593223788556726 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.33605\t\t valid l1: 0.33706 \ncv_score: -1.0875076106847903 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.33605\t\t valid l1: 0.33706 \ncv_score: -1.0875076106847903 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.33605\t\t valid l1: 0.33706 \ncv_score: -1.0875076106847903 \n--------------------------------------------------------------------------------\n100%|██████████| 20/20 [33:24<00:00, 31.54s/it, best loss: -1.0875076106847903]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 1}\n\n********************************************************************************\n- Training of type 3\n********************************************************************************\n{'alpha': 0.05, 'l1_ratio': 0.5} \ntraining l1: 0.50985\t\t valid l1: 0.51223 \ncv_score: -0.668984864140646 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.5} \ntraining l1: 0.50985\t\t valid l1: 0.51223 \ncv_score: -0.668984864140646 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0} \ntraining l1: 0.51190\t\t valid l1: 0.51432 \ncv_score: -0.664902779402611 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.7} \ntraining l1: 2.00489\t\t valid l1: 2.00820 \ncv_score: 0.6972401785292501 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.3} \ntraining l1: 1.07123\t\t valid l1: 1.07053 \ncv_score: 0.06815101866875571 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.9} \ntraining l1: 1.33809\t\t valid l1: 1.34031 \ncv_score: 0.2929008796440587 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 1} \ntraining l1: 0.50125\t\t valid l1: 0.50360 \ncv_score: -0.6859691059994688 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 1} \ntraining l1: 0.50125\t\t valid l1: 0.50360 \ncv_score: -0.6859691059994688 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.1} \ntraining l1: 0.60550\t\t valid l1: 0.60732 \ncv_score: -0.4986913770000181 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.30876\t\t valid l1: 0.30911 \ncv_score: -1.1740639925628147 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.30876\t\t valid l1: 0.30911 \ncv_score: -1.1740639925628147 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.38440\t\t valid l1: 0.38609 \ncv_score: -0.9516881707183729 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.38609\t\t valid l1: 0.38786 \ncv_score: -0.9471112282851323 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.3} \ntraining l1: 1.43497\t\t valid l1: 1.43381 \ncv_score: 0.360335030755188 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.7} \ntraining l1: 0.37623\t\t valid l1: 0.37714 \ncv_score: -0.9751333315431794 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 1} \ntraining l1: 2.06031\t\t valid l1: 2.06469 \ncv_score: 0.7249812211526173 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.9} \ntraining l1: 0.90222\t\t valid l1: 0.90481 \ncv_score: -0.10003184139312105 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.30876\t\t valid l1: 0.30911 \ncv_score: -1.1740639925628147 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.30876\t\t valid l1: 0.30911 \ncv_score: -1.1740639925628147 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.30876\t\t valid l1: 0.30911 \ncv_score: -1.1740639925628147 \n--------------------------------------------------------------------------------\n100%|██████████| 20/20 [14:54<00:00, 17.03s/it, best loss: -1.1740639925628147]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 1}\n\n********************************************************************************\n- Training of type 4\n********************************************************************************\n{'alpha': 0.1, 'l1_ratio': 0.9} \ntraining l1: 1.21388\t\t valid l1: 1.22406 \ncv_score: 0.20217262707612946 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.9} \ntraining l1: 1.21388\t\t valid l1: 1.22406 \ncv_score: 0.20217262707612946 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.9} \ntraining l1: 1.21388\t\t valid l1: 1.22406 \ncv_score: 0.20217262707612946 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.53693\t\t valid l1: 0.53968 \ncv_score: -0.6167802960828966 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.53693\t\t valid l1: 0.53968 \ncv_score: -0.6167802960828966 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.52099\t\t valid l1: 0.52350 \ncv_score: -0.6472120131244256 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.1} \ntraining l1: 0.86890\t\t valid l1: 0.87496 \ncv_score: -0.13357302653275707 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.5} \ntraining l1: 2.16801\t\t valid l1: 2.16015 \ncv_score: 0.7701756503052452 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.3} \ntraining l1: 1.43522\t\t valid l1: 1.44220 \ncv_score: 0.36617048822293685 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.7} \ntraining l1: 1.90118\t\t valid l1: 1.89818 \ncv_score: 0.6408936596188873 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.23391\t\t valid l1: 0.23289 \ncv_score: -1.4571680707001173 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.23391\t\t valid l1: 0.23289 \ncv_score: -1.4571680707001173 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 1} \ntraining l1: 0.63999\t\t valid l1: 0.64299 \ncv_score: -0.44162008622910776 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 1} \ntraining l1: 2.07147\t\t valid l1: 2.06575 \ncv_score: 0.7254928950069434 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.5} \ntraining l1: 2.16801\t\t valid l1: 2.16015 \ncv_score: 0.7701756503052452 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.3} \ntraining l1: 1.43522\t\t valid l1: 1.44220 \ncv_score: 0.36617048822293685 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.7} \ntraining l1: 0.38464\t\t valid l1: 0.38522 \ncv_score: -0.9539471479759075 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.23391\t\t valid l1: 0.23289 \ncv_score: -1.4571680707001173 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.23391\t\t valid l1: 0.23289 \ncv_score: -1.4571680707001173 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 1} \ntraining l1: 0.23391\t\t valid l1: 0.23289 \ncv_score: -1.4571680707001173 \n--------------------------------------------------------------------------------\n100%|██████████| 20/20 [05:04<00:00, 3.08s/it, best loss: -1.4571680707001173]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 1}\n\n********************************************************************************\n- Training of type 5\n********************************************************************************\n{'alpha': 2, 'l1_ratio': 1} \ntraining l1: 2.48976\t\t valid l1: 2.48758 \ncv_score: 0.9113110318223562 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 1} \ntraining l1: 2.48976\t\t valid l1: 2.48758 \ncv_score: 0.9113110318223562 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.44739\t\t valid l1: 0.44756 \ncv_score: -0.8039439767465442 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.44739\t\t valid l1: 0.44756 \ncv_score: -0.8039439767465442 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.34436\t\t valid l1: 0.34412 \ncv_score: -1.06676635011003 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.9} \ntraining l1: 0.82112\t\t valid l1: 0.82031 \ncv_score: -0.1980710192267788 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.1} \ntraining l1: 2.07441\t\t valid l1: 2.07247 \ncv_score: 0.7287424860966952 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.7} \ntraining l1: 2.18041\t\t valid l1: 2.17850 \ncv_score: 0.7786366735208434 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0} \ntraining l1: 1.48380\t\t valid l1: 1.48365 \ncv_score: 0.39450326325695034 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.3} \ntraining l1: 0.49909\t\t valid l1: 0.49937 \ncv_score: -0.6944158114510802 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.34436\t\t valid l1: 0.34412 \ncv_score: -1.06676635011003 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.9} \ntraining l1: 2.20188\t\t valid l1: 2.19994 \ncv_score: 0.788429168568534 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.7} \ntraining l1: 2.30177\t\t valid l1: 2.29938 \ncv_score: 0.8326399250540745 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.3} \ntraining l1: 1.49245\t\t valid l1: 1.49200 \ncv_score: 0.4001180599540092 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.9} \ntraining l1: 0.82112\t\t valid l1: 0.82031 \ncv_score: -0.1980710192267788 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.54766\t\t valid l1: 0.54808 \ncv_score: -0.6013370963205882 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.57061\t\t valid l1: 0.57116 \ncv_score: -0.5600807495015571 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.34436\t\t valid l1: 0.34412 \ncv_score: -1.06676635011003 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.34436\t\t valid l1: 0.34412 \ncv_score: -1.06676635011003 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.9} \ntraining l1: 0.34436\t\t valid l1: 0.34412 \ncv_score: -1.06676635011003 \n--------------------------------------------------------------------------------\n100%|██████████| 20/20 [1:31:15<00:00, 371.23s/it, best loss: -1.06676635011003]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 0.9}\n\n********************************************************************************\n- Training of type 6\n********************************************************************************\n{'alpha': 0.1, 'l1_ratio': 0.7} \ntraining l1: 1.50712\t\t valid l1: 1.50667 \ncv_score: 0.4099035105587547 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.7} \ntraining l1: 1.50712\t\t valid l1: 1.50667 \ncv_score: 0.4099035105587547 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.7} \ntraining l1: 1.50712\t\t valid l1: 1.50667 \ncv_score: 0.4099035105587547 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 1} \ntraining l1: 0.50946\t\t valid l1: 0.50858 \ncv_score: -0.6761319022232997 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 1} \ntraining l1: 0.50946\t\t valid l1: 0.50858 \ncv_score: -0.6761319022232997 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.1} \ntraining l1: 1.21333\t\t valid l1: 1.21243 \ncv_score: 0.19262693487460866 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0} \ntraining l1: 2.28899\t\t valid l1: 2.28802 \ncv_score: 0.827684798830762 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.3} \ntraining l1: 0.42243\t\t valid l1: 0.42113 \ncv_score: -0.8648152053613666 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.3} \ntraining l1: 0.42243\t\t valid l1: 0.42113 \ncv_score: -0.8648152053613666 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.36755\t\t valid l1: 0.36640 \ncv_score: -1.0040213795582982 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.5} \ntraining l1: 2.91908\t\t valid l1: 2.91980 \ncv_score: 1.0715158268742593 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.9} \ntraining l1: 2.86205\t\t valid l1: 2.86241 \ncv_score: 1.0516639635999498 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.36755\t\t valid l1: 0.36640 \ncv_score: -1.0040213795582982 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.5} \ntraining l1: 2.91908\t\t valid l1: 2.91980 \ncv_score: 1.0715158268742593 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.49907\t\t valid l1: 0.49777 \ncv_score: -0.6976129807730289 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.5} \ntraining l1: 2.81092\t\t valid l1: 2.81052 \ncv_score: 1.0333677714145109 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.9} \ntraining l1: 2.76039\t\t valid l1: 2.75974 \ncv_score: 1.0151348240947347 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.36755\t\t valid l1: 0.36640 \ncv_score: -1.0040213795582982 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.36755\t\t valid l1: 0.36640 \ncv_score: -1.0040213795582982 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.5} \ntraining l1: 0.36755\t\t valid l1: 0.36640 \ncv_score: -1.0040213795582982 \n--------------------------------------------------------------------------------\n100%|██████████| 20/20 [26:10<00:00, 54.33s/it, best loss: -1.0040213795582982]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 0.5}\n\n********************************************************************************\n- Training of type 7\n********************************************************************************\n{'alpha': 2, 'l1_ratio': 0.7} \ntraining l1: 0.96716\t\t valid l1: 0.96758 \ncv_score: -0.03295245528469052 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 0.7} \ntraining l1: 0.96716\t\t valid l1: 0.96758 \ncv_score: -0.03295245528469052 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.3} \ntraining l1: 0.86919\t\t valid l1: 0.86979 \ncv_score: -0.1395029790425932 \n--------------------------------------------------------------------------------\n{'alpha': 0.5, 'l1_ratio': 0.3} \ntraining l1: 0.86919\t\t valid l1: 0.86979 \ncv_score: -0.1395029790425932 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.3} \ntraining l1: 0.72702\t\t valid l1: 0.72765 \ncv_score: -0.31793448687946796 \n--------------------------------------------------------------------------------\n{'alpha': 0.05, 'l1_ratio': 0.9} \ntraining l1: 0.79830\t\t valid l1: 0.79927 \ncv_score: -0.2240552133649467 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.51154\t\t valid l1: 0.51189 \ncv_score: -0.6696412132352598 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0} \ntraining l1: 0.51154\t\t valid l1: 0.51189 \ncv_score: -0.6696412132352598 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.5} \ntraining l1: 0.80917\t\t valid l1: 0.81025 \ncv_score: -0.21041718674799917 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 1} \ntraining l1: 0.96716\t\t valid l1: 0.96758 \ncv_score: -0.03295245528469052 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50819\t\t valid l1: 0.50847 \ncv_score: -0.6763443179272158 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50819\t\t valid l1: 0.50847 \ncv_score: -0.6763443179272158 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50819\t\t valid l1: 0.50847 \ncv_score: -0.6763443179272158 \n--------------------------------------------------------------------------------\n{'alpha': 1, 'l1_ratio': 0.1} \ntraining l1: 0.85708\t\t valid l1: 0.85786 \ncv_score: -0.15330980975168645 \n--------------------------------------------------------------------------------\n{'alpha': 0.1, 'l1_ratio': 0.5} \ntraining l1: 0.80917\t\t valid l1: 0.81025 \ncv_score: -0.21041718674799917 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50819\t\t valid l1: 0.50847 \ncv_score: -0.6763443179272158 \n--------------------------------------------------------------------------------\n{'alpha': 2, 'l1_ratio': 1} \ntraining l1: 0.96716\t\t valid l1: 0.96758 \ncv_score: -0.03295245528469052 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50819\t\t valid l1: 0.50847 \ncv_score: -0.6763443179272158 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50819\t\t valid l1: 0.50847 \ncv_score: -0.6763443179272158 \n--------------------------------------------------------------------------------\n{'alpha': 0.01, 'l1_ratio': 0.1} \ntraining l1: 0.50819\t\t valid l1: 0.50847 \ncv_score: -0.6763443179272158 \n--------------------------------------------------------------------------------\n100%|██████████| 20/20 [10:18<00:00, 30.71s/it, best loss: -0.6763443179272158]\nBEST PARAMETERS: {'alpha': 0.01, 'l1_ratio': 0.1}\n\n" ], [ "best_params_list", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a704f99d8f33c773ff634cbfa6f496f712f0f13
26,492
ipynb
Jupyter Notebook
tp_final/notebooks/thumb_extraction.ipynb
MT2321/ImageProcessingFinalProject
d29cfd90dfa8997399dce12be464c9e859145458
[ "MIT" ]
1
2022-03-27T13:10:10.000Z
2022-03-27T13:10:10.000Z
tp_final/notebooks/thumb_extraction.ipynb
MT2321/ImageProcessingFinalProject
d29cfd90dfa8997399dce12be464c9e859145458
[ "MIT" ]
null
null
null
tp_final/notebooks/thumb_extraction.ipynb
MT2321/ImageProcessingFinalProject
d29cfd90dfa8997399dce12be464c9e859145458
[ "MIT" ]
null
null
null
36.191257
77
0.474483
[ [ [ "import cv2 as cv\nimport matplotlib.pyplot as plt\nimport os", "_____no_output_____" ], [ "root = \"../../assets/videos\"\nimg_root = \"../../assets/imgs\"\nvideos_path_raw = os.listdir(\"../../assets/videos\")\nvideos_path = [root+\"/\"+path for path in videos_path_raw]\nfor video_path, video_name in zip(videos_path, videos_path_raw):\n print(video_path)\n video_capture = cv.VideoCapture(video_path)\n length_in_frames = video_capture.get(cv.CAP_PROP_FRAME_COUNT)\n i = 0\n while i < length_in_frames//2:\n ret, frame = video_capture.read()\n i+=1\n ret, frame = video_capture.read()\n frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\n img_name = video_name.replace(\".mp4\", \".jpg\")\n img_path = img_root + \"/\" + img_name\n # plt.imshow(frame)\n print(img_path)\n plt.imsave(img_path, frame)", "../../assets/videos/0.mp4\n../../assets/imgs/0.jpg\n../../assets/videos/1.mp4\n../../assets/imgs/1.jpg\n../../assets/videos/10.mp4\n../../assets/imgs/10.jpg\n../../assets/videos/100.mp4\n../../assets/imgs/100.jpg\n../../assets/videos/101.mp4\n../../assets/imgs/101.jpg\n../../assets/videos/102.mp4\n../../assets/imgs/102.jpg\n../../assets/videos/103.mp4\n../../assets/imgs/103.jpg\n../../assets/videos/104.mp4\n../../assets/imgs/104.jpg\n../../assets/videos/105.mp4\n../../assets/imgs/105.jpg\n../../assets/videos/106.mp4\n../../assets/imgs/106.jpg\n../../assets/videos/107.mp4\n../../assets/imgs/107.jpg\n../../assets/videos/108.mp4\n../../assets/imgs/108.jpg\n../../assets/videos/109.mp4\n../../assets/imgs/109.jpg\n../../assets/videos/11.mp4\n../../assets/imgs/11.jpg\n../../assets/videos/110.mp4\n../../assets/imgs/110.jpg\n../../assets/videos/111.mp4\n../../assets/imgs/111.jpg\n../../assets/videos/112.mp4\n../../assets/imgs/112.jpg\n../../assets/videos/113.mp4\n../../assets/imgs/113.jpg\n../../assets/videos/114.mp4\n../../assets/imgs/114.jpg\n../../assets/videos/115.mp4\n../../assets/imgs/115.jpg\n../../assets/videos/116.mp4\n../../assets/imgs/116.jpg\n../../assets/videos/117.mp4\n../../assets/imgs/117.jpg\n../../assets/videos/118.mp4\n../../assets/imgs/118.jpg\n../../assets/videos/119.mp4\n../../assets/imgs/119.jpg\n../../assets/videos/12.mp4\n../../assets/imgs/12.jpg\n../../assets/videos/120.mp4\n../../assets/imgs/120.jpg\n../../assets/videos/121.mp4\n../../assets/imgs/121.jpg\n../../assets/videos/122.mp4\n../../assets/imgs/122.jpg\n../../assets/videos/123.mp4\n../../assets/imgs/123.jpg\n../../assets/videos/124.mp4\n../../assets/imgs/124.jpg\n../../assets/videos/125.mp4\n../../assets/imgs/125.jpg\n../../assets/videos/126.mp4\n../../assets/imgs/126.jpg\n../../assets/videos/127.mp4\n../../assets/imgs/127.jpg\n../../assets/videos/128.mp4\n../../assets/imgs/128.jpg\n../../assets/videos/129.mp4\n../../assets/imgs/129.jpg\n../../assets/videos/13.mp4\n../../assets/imgs/13.jpg\n../../assets/videos/130.mp4\n../../assets/imgs/130.jpg\n../../assets/videos/131.mp4\n../../assets/imgs/131.jpg\n../../assets/videos/132.mp4\n../../assets/imgs/132.jpg\n../../assets/videos/133.mp4\n../../assets/imgs/133.jpg\n../../assets/videos/134.mp4\n../../assets/imgs/134.jpg\n../../assets/videos/135.mp4\n../../assets/imgs/135.jpg\n../../assets/videos/136.mp4\n../../assets/imgs/136.jpg\n../../assets/videos/137.mp4\n../../assets/imgs/137.jpg\n../../assets/videos/138.mp4\n../../assets/imgs/138.jpg\n../../assets/videos/139.mp4\n../../assets/imgs/139.jpg\n../../assets/videos/140.mp4\n../../assets/imgs/140.jpg\n../../assets/videos/141.mp4\n../../assets/imgs/141.jpg\n../../assets/videos/142.mp4\n../../assets/imgs/142.jpg\n../../assets/videos/143.mp4\n../../assets/imgs/143.jpg\n../../assets/videos/144.mp4\n../../assets/imgs/144.jpg\n../../assets/videos/145.mp4\n../../assets/imgs/145.jpg\n../../assets/videos/146.mp4\n../../assets/imgs/146.jpg\n../../assets/videos/147.mp4\n../../assets/imgs/147.jpg\n../../assets/videos/148.mp4\n../../assets/imgs/148.jpg\n../../assets/videos/149.mp4\n../../assets/imgs/149.jpg\n../../assets/videos/150.mp4\n../../assets/imgs/150.jpg\n../../assets/videos/151.mp4\n../../assets/imgs/151.jpg\n../../assets/videos/152.mp4\n../../assets/imgs/152.jpg\n../../assets/videos/153.mp4\n../../assets/imgs/153.jpg\n../../assets/videos/154.mp4\n../../assets/imgs/154.jpg\n../../assets/videos/155.mp4\n../../assets/imgs/155.jpg\n../../assets/videos/156.mp4\n../../assets/imgs/156.jpg\n../../assets/videos/157.mp4\n../../assets/imgs/157.jpg\n../../assets/videos/158.mp4\n../../assets/imgs/158.jpg\n../../assets/videos/159.mp4\n../../assets/imgs/159.jpg\n../../assets/videos/160.mp4\n../../assets/imgs/160.jpg\n../../assets/videos/161.mp4\n../../assets/imgs/161.jpg\n../../assets/videos/162.mp4\n../../assets/imgs/162.jpg\n../../assets/videos/163.mp4\n../../assets/imgs/163.jpg\n../../assets/videos/164.mp4\n../../assets/imgs/164.jpg\n../../assets/videos/165.mp4\n../../assets/imgs/165.jpg\n../../assets/videos/166.mp4\n../../assets/imgs/166.jpg\n../../assets/videos/167.mp4\n../../assets/imgs/167.jpg\n../../assets/videos/168.mp4\n../../assets/imgs/168.jpg\n../../assets/videos/169.mp4\n../../assets/imgs/169.jpg\n../../assets/videos/170.mp4\n../../assets/imgs/170.jpg\n../../assets/videos/171.mp4\n../../assets/imgs/171.jpg\n../../assets/videos/172.mp4\n../../assets/imgs/172.jpg\n../../assets/videos/173.mp4\n../../assets/imgs/173.jpg\n../../assets/videos/174.mp4\n../../assets/imgs/174.jpg\n../../assets/videos/175.mp4\n../../assets/imgs/175.jpg\n../../assets/videos/176.mp4\n../../assets/imgs/176.jpg\n../../assets/videos/177.mp4\n../../assets/imgs/177.jpg\n../../assets/videos/178.mp4\n../../assets/imgs/178.jpg\n../../assets/videos/179.mp4\n../../assets/imgs/179.jpg\n../../assets/videos/18.mp4\n../../assets/imgs/18.jpg\n../../assets/videos/180.mp4\n../../assets/imgs/180.jpg\n../../assets/videos/181.mp4\n../../assets/imgs/181.jpg\n../../assets/videos/182.mp4\n../../assets/imgs/182.jpg\n../../assets/videos/183.mp4\n../../assets/imgs/183.jpg\n../../assets/videos/184.mp4\n../../assets/imgs/184.jpg\n../../assets/videos/185.mp4\n../../assets/imgs/185.jpg\n../../assets/videos/186.mp4\n../../assets/imgs/186.jpg\n../../assets/videos/187.mp4\n../../assets/imgs/187.jpg\n../../assets/videos/188.mp4\n../../assets/imgs/188.jpg\n../../assets/videos/189.mp4\n../../assets/imgs/189.jpg\n../../assets/videos/19.mp4\n../../assets/imgs/19.jpg\n../../assets/videos/190.mp4\n../../assets/imgs/190.jpg\n../../assets/videos/191.mp4\n../../assets/imgs/191.jpg\n../../assets/videos/192.mp4\n../../assets/imgs/192.jpg\n../../assets/videos/193.mp4\n../../assets/imgs/193.jpg\n../../assets/videos/194.mp4\n../../assets/imgs/194.jpg\n../../assets/videos/195.mp4\n../../assets/imgs/195.jpg\n../../assets/videos/196.mp4\n../../assets/imgs/196.jpg\n../../assets/videos/197.mp4\n../../assets/imgs/197.jpg\n../../assets/videos/198.mp4\n../../assets/imgs/198.jpg\n../../assets/videos/199.mp4\n../../assets/imgs/199.jpg\n../../assets/videos/2.mp4\n../../assets/imgs/2.jpg\n../../assets/videos/20.mp4\n../../assets/imgs/20.jpg\n../../assets/videos/200.mp4\n../../assets/imgs/200.jpg\n../../assets/videos/201.mp4\n../../assets/imgs/201.jpg\n../../assets/videos/202.mp4\n../../assets/imgs/202.jpg\n../../assets/videos/203.mp4\n../../assets/imgs/203.jpg\n../../assets/videos/204.mp4\n../../assets/imgs/204.jpg\n../../assets/videos/205.mp4\n../../assets/imgs/205.jpg\n../../assets/videos/206.mp4\n../../assets/imgs/206.jpg\n../../assets/videos/207.mp4\n../../assets/imgs/207.jpg\n../../assets/videos/208.mp4\n../../assets/imgs/208.jpg\n../../assets/videos/209.mp4\n../../assets/imgs/209.jpg\n../../assets/videos/21.mp4\n../../assets/imgs/21.jpg\n../../assets/videos/210.mp4\n../../assets/imgs/210.jpg\n../../assets/videos/211.mp4\n../../assets/imgs/211.jpg\n../../assets/videos/212.mp4\n../../assets/imgs/212.jpg\n../../assets/videos/213.mp4\n../../assets/imgs/213.jpg\n../../assets/videos/214.mp4\n../../assets/imgs/214.jpg\n../../assets/videos/215.mp4\n../../assets/imgs/215.jpg\n../../assets/videos/216.mp4\n../../assets/imgs/216.jpg\n../../assets/videos/217.mp4\n../../assets/imgs/217.jpg\n../../assets/videos/218.mp4\n../../assets/imgs/218.jpg\n../../assets/videos/219.mp4\n../../assets/imgs/219.jpg\n../../assets/videos/22.mp4\n../../assets/imgs/22.jpg\n../../assets/videos/220.mp4\n../../assets/imgs/220.jpg\n../../assets/videos/221.mp4\n../../assets/imgs/221.jpg\n../../assets/videos/222.mp4\n../../assets/imgs/222.jpg\n../../assets/videos/223.mp4\n../../assets/imgs/223.jpg\n../../assets/videos/224.mp4\n../../assets/imgs/224.jpg\n../../assets/videos/225.mp4\n../../assets/imgs/225.jpg\n../../assets/videos/226.mp4\n../../assets/imgs/226.jpg\n../../assets/videos/227.mp4\n../../assets/imgs/227.jpg\n../../assets/videos/228.mp4\n../../assets/imgs/228.jpg\n../../assets/videos/229.mp4\n../../assets/imgs/229.jpg\n../../assets/videos/23.mp4\n../../assets/imgs/23.jpg\n../../assets/videos/230.mp4\n../../assets/imgs/230.jpg\n../../assets/videos/231.mp4\n../../assets/imgs/231.jpg\n../../assets/videos/232.mp4\n../../assets/imgs/232.jpg\n../../assets/videos/233.mp4\n../../assets/imgs/233.jpg\n../../assets/videos/234.mp4\n../../assets/imgs/234.jpg\n../../assets/videos/235.mp4\n../../assets/imgs/235.jpg\n../../assets/videos/236.mp4\n../../assets/imgs/236.jpg\n../../assets/videos/237.mp4\n../../assets/imgs/237.jpg\n../../assets/videos/238.mp4\n../../assets/imgs/238.jpg\n../../assets/videos/239.mp4\n../../assets/imgs/239.jpg\n../../assets/videos/24.mp4\n../../assets/imgs/24.jpg\n../../assets/videos/240.mp4\n../../assets/imgs/240.jpg\n../../assets/videos/241.mp4\n../../assets/imgs/241.jpg\n../../assets/videos/242.mp4\n../../assets/imgs/242.jpg\n../../assets/videos/243.mp4\n../../assets/imgs/243.jpg\n../../assets/videos/244.mp4\n../../assets/imgs/244.jpg\n../../assets/videos/245.mp4\n../../assets/imgs/245.jpg\n../../assets/videos/246.mp4\n../../assets/imgs/246.jpg\n../../assets/videos/247.mp4\n../../assets/imgs/247.jpg\n../../assets/videos/248.mp4\n../../assets/imgs/248.jpg\n../../assets/videos/249.mp4\n../../assets/imgs/249.jpg\n../../assets/videos/25.mp4\n../../assets/imgs/25.jpg\n../../assets/videos/250.mp4\n../../assets/imgs/250.jpg\n../../assets/videos/251.mp4\n../../assets/imgs/251.jpg\n../../assets/videos/252.mp4\n../../assets/imgs/252.jpg\n../../assets/videos/253.mp4\n../../assets/imgs/253.jpg\n../../assets/videos/254.mp4\n../../assets/imgs/254.jpg\n../../assets/videos/255.mp4\n../../assets/imgs/255.jpg\n../../assets/videos/256.mp4\n../../assets/imgs/256.jpg\n../../assets/videos/257.mp4\n../../assets/imgs/257.jpg\n../../assets/videos/258.mp4\n../../assets/imgs/258.jpg\n../../assets/videos/259.mp4\n../../assets/imgs/259.jpg\n../../assets/videos/26.mp4\n../../assets/imgs/26.jpg\n../../assets/videos/260.mp4\n../../assets/imgs/260.jpg\n../../assets/videos/261.mp4\n../../assets/imgs/261.jpg\n../../assets/videos/262.mp4\n../../assets/imgs/262.jpg\n../../assets/videos/263.mp4\n../../assets/imgs/263.jpg\n../../assets/videos/264.mp4\n../../assets/imgs/264.jpg\n../../assets/videos/265.mp4\n../../assets/imgs/265.jpg\n../../assets/videos/266.mp4\n../../assets/imgs/266.jpg\n../../assets/videos/267.mp4\n../../assets/imgs/267.jpg\n../../assets/videos/268.mp4\n../../assets/imgs/268.jpg\n../../assets/videos/269.mp4\n../../assets/imgs/269.jpg\n../../assets/videos/27.mp4\n../../assets/imgs/27.jpg\n../../assets/videos/270.mp4\n../../assets/imgs/270.jpg\n../../assets/videos/271.mp4\n../../assets/imgs/271.jpg\n../../assets/videos/272.mp4\n../../assets/imgs/272.jpg\n../../assets/videos/273.mp4\n../../assets/imgs/273.jpg\n../../assets/videos/274.mp4\n../../assets/imgs/274.jpg\n../../assets/videos/275.mp4\n../../assets/imgs/275.jpg\n../../assets/videos/276.mp4\n../../assets/imgs/276.jpg\n../../assets/videos/277.mp4\n../../assets/imgs/277.jpg\n../../assets/videos/278.mp4\n../../assets/imgs/278.jpg\n../../assets/videos/279.mp4\n../../assets/imgs/279.jpg\n../../assets/videos/28.mp4\n../../assets/imgs/28.jpg\n../../assets/videos/280.mp4\n../../assets/imgs/280.jpg\n../../assets/videos/281.mp4\n../../assets/imgs/281.jpg\n../../assets/videos/282.mp4\n../../assets/imgs/282.jpg\n../../assets/videos/283.mp4\n../../assets/imgs/283.jpg\n../../assets/videos/284.mp4\n../../assets/imgs/284.jpg\n../../assets/videos/285.mp4\n../../assets/imgs/285.jpg\n../../assets/videos/286.mp4\n../../assets/imgs/286.jpg\n../../assets/videos/287.mp4\n../../assets/imgs/287.jpg\n../../assets/videos/288.mp4\n../../assets/imgs/288.jpg\n../../assets/videos/289.mp4\n../../assets/imgs/289.jpg\n../../assets/videos/29.mp4\n../../assets/imgs/29.jpg\n../../assets/videos/290.mp4\n../../assets/imgs/290.jpg\n../../assets/videos/291.mp4\n../../assets/imgs/291.jpg\n../../assets/videos/292.mp4\n../../assets/imgs/292.jpg\n../../assets/videos/293.mp4\n../../assets/imgs/293.jpg\n../../assets/videos/294.mp4\n../../assets/imgs/294.jpg\n../../assets/videos/295.mp4\n../../assets/imgs/295.jpg\n../../assets/videos/296.mp4\n../../assets/imgs/296.jpg\n../../assets/videos/297.mp4\n../../assets/imgs/297.jpg\n../../assets/videos/298.mp4\n../../assets/imgs/298.jpg\n../../assets/videos/299.mp4\n../../assets/imgs/299.jpg\n../../assets/videos/3.mp4\n../../assets/imgs/3.jpg\n../../assets/videos/30.mp4\n../../assets/imgs/30.jpg\n../../assets/videos/300.mp4\n../../assets/imgs/300.jpg\n../../assets/videos/301.mp4\n../../assets/imgs/301.jpg\n../../assets/videos/302.mp4\n../../assets/imgs/302.jpg\n../../assets/videos/303.mp4\n../../assets/imgs/303.jpg\n../../assets/videos/304.mp4\n../../assets/imgs/304.jpg\n../../assets/videos/305.mp4\n../../assets/imgs/305.jpg\n../../assets/videos/306.mp4\n../../assets/imgs/306.jpg\n../../assets/videos/307.mp4\n../../assets/imgs/307.jpg\n../../assets/videos/308.mp4\n../../assets/imgs/308.jpg\n../../assets/videos/309.mp4\n../../assets/imgs/309.jpg\n../../assets/videos/31.mp4\n../../assets/imgs/31.jpg\n../../assets/videos/310.mp4\n../../assets/imgs/310.jpg\n../../assets/videos/311.mp4\n../../assets/imgs/311.jpg\n../../assets/videos/312.mp4\n../../assets/imgs/312.jpg\n../../assets/videos/313.mp4\n../../assets/imgs/313.jpg\n../../assets/videos/314.mp4\n../../assets/imgs/314.jpg\n../../assets/videos/315.mp4\n../../assets/imgs/315.jpg\n../../assets/videos/316.mp4\n../../assets/imgs/316.jpg\n../../assets/videos/317.mp4\n../../assets/imgs/317.jpg\n../../assets/videos/318.mp4\n../../assets/imgs/318.jpg\n../../assets/videos/319.mp4\n../../assets/imgs/319.jpg\n../../assets/videos/32.mp4\n../../assets/imgs/32.jpg\n../../assets/videos/320.mp4\n../../assets/imgs/320.jpg\n../../assets/videos/321.mp4\n../../assets/imgs/321.jpg\n../../assets/videos/322.mp4\n../../assets/imgs/322.jpg\n../../assets/videos/323.mp4\n../../assets/imgs/323.jpg\n../../assets/videos/324.mp4\n../../assets/imgs/324.jpg\n../../assets/videos/325.mp4\n../../assets/imgs/325.jpg\n../../assets/videos/326.mp4\n../../assets/imgs/326.jpg\n../../assets/videos/327.mp4\n../../assets/imgs/327.jpg\n../../assets/videos/328.mp4\n../../assets/imgs/328.jpg\n../../assets/videos/33.mp4\n../../assets/imgs/33.jpg\n../../assets/videos/34.mp4\n../../assets/imgs/34.jpg\n../../assets/videos/35.mp4\n../../assets/imgs/35.jpg\n../../assets/videos/36.mp4\n../../assets/imgs/36.jpg\n../../assets/videos/37.mp4\n../../assets/imgs/37.jpg\n../../assets/videos/38.mp4\n../../assets/imgs/38.jpg\n../../assets/videos/39.mp4\n../../assets/imgs/39.jpg\n../../assets/videos/4.mp4\n../../assets/imgs/4.jpg\n../../assets/videos/40.mp4\n../../assets/imgs/40.jpg\n../../assets/videos/41.mp4\n../../assets/imgs/41.jpg\n../../assets/videos/42.mp4\n../../assets/imgs/42.jpg\n../../assets/videos/43.mp4\n../../assets/imgs/43.jpg\n../../assets/videos/44.mp4\n../../assets/imgs/44.jpg\n../../assets/videos/45.mp4\n../../assets/imgs/45.jpg\n../../assets/videos/46.mp4\n../../assets/imgs/46.jpg\n../../assets/videos/47.mp4\n../../assets/imgs/47.jpg\n../../assets/videos/48.mp4\n../../assets/imgs/48.jpg\n../../assets/videos/49.mp4\n../../assets/imgs/49.jpg\n../../assets/videos/5.mp4\n../../assets/imgs/5.jpg\n../../assets/videos/50.mp4\n../../assets/imgs/50.jpg\n../../assets/videos/51.mp4\n../../assets/imgs/51.jpg\n../../assets/videos/52.mp4\n../../assets/imgs/52.jpg\n../../assets/videos/53.mp4\n../../assets/imgs/53.jpg\n../../assets/videos/54.mp4\n../../assets/imgs/54.jpg\n../../assets/videos/55.mp4\n../../assets/imgs/55.jpg\n../../assets/videos/56.mp4\n../../assets/imgs/56.jpg\n../../assets/videos/57.mp4\n../../assets/imgs/57.jpg\n../../assets/videos/58.mp4\n../../assets/imgs/58.jpg\n../../assets/videos/59.mp4\n../../assets/imgs/59.jpg\n../../assets/videos/6.mp4\n../../assets/imgs/6.jpg\n../../assets/videos/60.mp4\n../../assets/imgs/60.jpg\n../../assets/videos/61.mp4\n../../assets/imgs/61.jpg\n../../assets/videos/62.mp4\n../../assets/imgs/62.jpg\n../../assets/videos/63.mp4\n../../assets/imgs/63.jpg\n../../assets/videos/64.mp4\n../../assets/imgs/64.jpg\n../../assets/videos/65.mp4\n../../assets/imgs/65.jpg\n../../assets/videos/66.mp4\n../../assets/imgs/66.jpg\n../../assets/videos/67.mp4\n../../assets/imgs/67.jpg\n../../assets/videos/68.mp4\n../../assets/imgs/68.jpg\n../../assets/videos/69.mp4\n../../assets/imgs/69.jpg\n../../assets/videos/7.mp4\n../../assets/imgs/7.jpg\n../../assets/videos/70.mp4\n../../assets/imgs/70.jpg\n../../assets/videos/71.mp4\n../../assets/imgs/71.jpg\n../../assets/videos/72.mp4\n../../assets/imgs/72.jpg\n../../assets/videos/73.mp4\n../../assets/imgs/73.jpg\n../../assets/videos/74.mp4\n../../assets/imgs/74.jpg\n../../assets/videos/75.mp4\n../../assets/imgs/75.jpg\n../../assets/videos/76.mp4\n../../assets/imgs/76.jpg\n../../assets/videos/77.mp4\n../../assets/imgs/77.jpg\n../../assets/videos/78.mp4\n../../assets/imgs/78.jpg\n../../assets/videos/79.mp4\n../../assets/imgs/79.jpg\n../../assets/videos/8.mp4\n../../assets/imgs/8.jpg\n../../assets/videos/80.mp4\n../../assets/imgs/80.jpg\n../../assets/videos/81.mp4\n../../assets/imgs/81.jpg\n../../assets/videos/82.mp4\n../../assets/imgs/82.jpg\n../../assets/videos/83.mp4\n../../assets/imgs/83.jpg\n../../assets/videos/84.mp4\n../../assets/imgs/84.jpg\n../../assets/videos/85.mp4\n../../assets/imgs/85.jpg\n../../assets/videos/86.mp4\n../../assets/imgs/86.jpg\n../../assets/videos/87.mp4\n../../assets/imgs/87.jpg\n../../assets/videos/88.mp4\n../../assets/imgs/88.jpg\n../../assets/videos/89.mp4\n../../assets/imgs/89.jpg\n../../assets/videos/9.mp4\n../../assets/imgs/9.jpg\n../../assets/videos/90.mp4\n../../assets/imgs/90.jpg\n../../assets/videos/91.mp4\n../../assets/imgs/91.jpg\n../../assets/videos/92.mp4\n../../assets/imgs/92.jpg\n../../assets/videos/93.mp4\n../../assets/imgs/93.jpg\n../../assets/videos/94.mp4\n../../assets/imgs/94.jpg\n../../assets/videos/95.mp4\n../../assets/imgs/95.jpg\n../../assets/videos/96.mp4\n../../assets/imgs/96.jpg\n../../assets/videos/97.mp4\n../../assets/imgs/97.jpg\n../../assets/videos/98.mp4\n../../assets/imgs/98.jpg\n../../assets/videos/99.mp4\n../../assets/imgs/99.jpg\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
4a70559976e86b7617a4478545c4e7e7d0ebc0d8
2,527
ipynb
Jupyter Notebook
selenium/selenium_datepicker.ipynb
barnwalp/jupyter_notebook
025fd532f6a334a64063e2f66fba6104cc4f48aa
[ "MIT" ]
1
2020-08-20T02:40:24.000Z
2020-08-20T02:40:24.000Z
selenium/selenium_datepicker.ipynb
barnwalp/jupyter_notebook
025fd532f6a334a64063e2f66fba6104cc4f48aa
[ "MIT" ]
null
null
null
selenium/selenium_datepicker.ipynb
barnwalp/jupyter_notebook
025fd532f6a334a64063e2f66fba6104cc4f48aa
[ "MIT" ]
null
null
null
28.077778
101
0.56114
[ [ [ "from selenium import webdriver", "_____no_output_____" ], [ "driver = webdriver.Chrome()\ndriver.get('https://www.nseindia.com/products/content/derivatives/equities/archieve_fo.htm')\nprint(f'Page Title is: {driver.title}')\n\n# entering the text directly in the input box\n# -------------------------------------------\n# driver.find_element_by_css_selector('#date.hasDatepicker').send_keys(\"10-04-2018\")\n\ndriver.implicitly_wait(2)\n\n# selecting date using datepicker calendar\n# ----------------------------------------\ndatepicker = driver.find_element_by_css_selector('#date.hasDatepicker')\ndatepicker.click()\n\nselectMonth = driver.find_element_by_css_selector('.ui-datepicker-month')\n# selecting all option tags and iterating over it untill correct month is found\nfor option in selectMonth.find_elements_by_tag_name('option'):\n if option.text == 'Mar':\n option.click()\n break\n\nselectYear = driver.find_element_by_css_selector('.ui-datepicker-year')\n# selecting all option tags and iterating over it untill correct year is found\nfor option in selectYear.find_elements_by_tag_name('option'):\n if option.text == '2017':\n option.click()\n break\n \n# selecting all elements with the class 'ui-state-defualt'\ndays = driver.find_elements_by_css_selector('.ui-state-default')\ndays[4].click()", "Page Title is: NSE - National Stock Exchange of India Ltd.\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]