hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cbc2b6e4f77e90575a32d20bee2c7f978f768528
2,411
ipynb
Jupyter Notebook
Prelim.ipynb
cocolleen/OOP-1-2
c8a1580de9d6992e692851e6543a8863a9ecc348
[ "Apache-2.0" ]
null
null
null
Prelim.ipynb
cocolleen/OOP-1-2
c8a1580de9d6992e692851e6543a8863a9ecc348
[ "Apache-2.0" ]
null
null
null
Prelim.ipynb
cocolleen/OOP-1-2
c8a1580de9d6992e692851e6543a8863a9ecc348
[ "Apache-2.0" ]
null
null
null
30.1375
220
0.486105
[ [ [ "<a href=\"https://colab.research.google.com/github/cocolleen/OOP-1-2/blob/main/Prelim.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "class Student:\n def __init__(self, Name, Student_No, Age, School, Course):\n self.Name = Name\n self.Student_No = Student_No\n self.Age = Age\n self.School = School\n self.Course = Course\n \n def Info(self):\n print(\"\\n\", \"Hi, I am\", self.Name, \"and my student number is\", self.Student_No,\n \"\\n\", \"I am currently\", self.Age, \"\\n\",\n \"Studying at\", self.School, \"taking\", self.Course)\n\nMyself = Student(\"Colleen M. Quijano\", \"202102070\", \"19 years old\", \"Cavite State Univeristy - Main Campus\", \"Bachelor of Science in Computer Engineering\") \nMyself.Info()\n", "\n Hi, I am Colleen M. Quijano and my student number is 202102070 \n I am currently 19 years old \n Studying at Cavite State Univeristy - Main Campus taking Bachelor of Science in Computer Engineering\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
cbc2ba11652a9d023d50abb0a1d4a283b0a03892
21,775
ipynb
Jupyter Notebook
2D/Testing_Time.ipynb
mtcarilli/CME_approximations
1ffd1cc0bd17679116964ee33634c0d76c50064e
[ "MIT" ]
null
null
null
2D/Testing_Time.ipynb
mtcarilli/CME_approximations
1ffd1cc0bd17679116964ee33634c0d76c50064e
[ "MIT" ]
null
null
null
2D/Testing_Time.ipynb
mtcarilli/CME_approximations
1ffd1cc0bd17679116964ee33634c0d76c50064e
[ "MIT" ]
null
null
null
27.563291
262
0.493226
[ [ [ "import numpy as np\nimport time\nimport torch\n\nimport scipy.stats as stats\nfrom scipy.special import gammaln\n\n\nimport train_2D_rt as tr", "_____no_output_____" ] ], [ [ "# Testing the Timing of exact CME vs. NN prediction\n\n\nFirst, generate some number of parameters to use in timing. I'll start with 15 but maybe increase this? \n\n", "_____no_output_____" ] ], [ [ "set_size = 1\nnum_files = 15\nN = num_files*set_size\n\n\nparams = tr.generate_param_vectors(N)", "_____no_output_____" ] ], [ [ "-----\n\n\n## Timing for exact CME\n\nNow, define the calculate exact CME function and get_moments. Get moments now accepts a multiple of sigma over which to calculate the solution.", "_____no_output_____" ] ], [ [ "def get_moments(p,N):\n b,beta,gamma=p\n \n r = torch.tensor([1/beta, 1/gamma])\n MU = b*r\n VAR = MU*torch.tensor([1+b,1+b*beta/(beta+gamma)])\n STD = torch.sqrt(VAR)\n xmax = torch.ceil(MU)\n xmax = torch.ceil(xmax + N*STD)\n xmax = torch.clip(xmax,30,np.inf).int()\n return MU, VAR, STD, xmax\n\ndef calculate_exact_cme(p,method,N):\n \n '''Given parameter vector p, calculate the exact probabilites using CME integrator.'''\n p1 = torch.from_numpy(p).float()\n p1 = 10**p1\n \n MU, VAR, STD, xmaxc = get_moments(p1,N)\n\n \n xmaxc = np.array([int(xmaxc[0]),int(xmaxc[1])])\n \n y = tr.cme_integrator(np.array(p1),xmaxc+1,method=method)\n \n return(xmaxc[0]*xmaxc[1])", "_____no_output_____" ] ], [ [ "----\n\n## Increasing the State Space of Each Grid (multiple sigmas)\n\n\n### Quad_vec ", "_____no_output_____" ] ], [ [ "P = 15 \n\nsigmas = [1,2,3,5,10,15,25,50]\nstate_spaces = []\n\ntime_sigmas_fixedquad = []\n\nfor sig in sigmas:\n print(sig)\n t1 = time.time()\n\n state_spaces_ = np.zeros(P)\n\n for i in range(P):\n\n s_ = calculate_exact_cme(params[i], method = 'fixed_quad',N=sig)\n state_spaces_[i] = s_\n \n state_spaces.append(state_spaces_)\n t2 = time.time()\n \n time_sigmas_fixedquad.append(t2-t1)", "_____no_output_____" ], [ "P = 15 \n\nsigmas = [1,2,3,5,10,15,25,50]\nstate_spaces = []\n\ntime_sigmas_quadvec = []\n\nfor sig in sigmas:\n print(sig)\n t1 = time.time()\n\n state_spaces_ = np.zeros(P)\n\n for i in range(P):\n\n s_ = calculate_exact_cme(params[i], method = 'quad_vec',N=sig)\n state_spaces_[i] = s_\n \n state_spaces.append(state_spaces_)\n t2 = time.time()\n \n time_sigmas_quadvec.append(t2-t1)", "_____no_output_____" ] ], [ [ "------\n# Increasing the Number of P vectors ", "_____no_output_____" ] ], [ [ "P = 15 \n\np_vecs = [1,2,3,5,10,15,25]\n\ntime_repeatP_fixedquad = []\n\nfor p in p_vecs:\n print(p)\n param_list = list(params)\n params_ = np.array(p*list(params))\n \n t1 = time.time()\n\n\n for i in range(P*p):\n\n s_ = calculate_exact_cme(params_[i], method = 'fixed_quad',N=1)\n\n t2 = time.time()\n \n time_repeatP_fixedquad.append(t2-t1)", "_____no_output_____" ], [ "P = 15 \n\np_vecs = [1,2,3,5,10,15,25]\n\ntime_repeatP_quadvec = []\n\nfor p in p_vecs:\n print(p)\n param_list = list(params)\n params_ = np.array(p*list(params))\n \n t1 = time.time()\n\n\n for i in range(P*p):\n\n s_ = calculate_exact_cme(params_[i], method = 'quad_vec',N=1)\n\n t2 = time.time()\n \n time_repeatP_quadvec.append(t2-t1)", "_____no_output_____" ] ], [ [ "### Nice.\n\n\nGreat, we now have the timings for 1) increasing the grid size over which we integrate the exact CME and 2) increasing the number of parameters we use (kinda the same as increasing grid sizes, just in chunks? i think?) for 1) fixed_quad and 2) quad_vec.\n\n\nLet's do the same timing tests for the NN, with several different generating basis functions.", "_____no_output_____" ], [ "------\n# Timing for NN \n\nFirst, I'll define the grid and get_ypred_at_RT functions! ", "_____no_output_____" ] ], [ [ "def generate_grid(npdf,VAR,MU,quantiles=None):\n if quantiles=='PRESET':\n logstd = torch.sqrt(np.log((VAR/MU**2)+1))\n logmean = torch.log(MU**2/np.sqrt(VAR+MU**2))\n translin_0 = torch.exp(logmean[0]+logstd[0]*NORM_nas)\n translin_1 = torch.exp(logmean[1]+logstd[1]*NORM_mat)\n return translin_0,translin_1\n return(translin)\n\ndef get_ypred_at_RT(p,npdf,w,N,hyp=2.4,quantiles='PRESET',\n first_special=False,special_std='tail_prob'):\n p = 10**p\n MU, VAR, STD, xmax = get_moments(p,N)\n \n #two separate variables. a bit ugly and leaves room for error. \n grid_nas,grid_mat = generate_grid(npdf,VAR,MU,quantiles=quantiles) \n # no zs implementation yet. not sure i want to implement it.\n\n s_nas = torch.zeros(npdf[0])\n s_mat = torch.zeros(npdf[1])\n\n spec = 0 if first_special else -1\n if first_special:\n s_nas[1:] = torch.diff(grid_nas)\n s_mat[1:] = torch.diff(grid_mat)\n else: #last special... for now\n s_nas[:-1] = torch.diff(grid_nas)\n s_mat[:-1] = torch.diff(grid_mat)\n \n if special_std == 'mean':\n s_nas[spec] = grid_nas[spec]\n s_mat[spec] = grid_mat[spec]\n elif special_std == 'neighbor': #assign_neighbor_to_special\n s_nas[spec] = s_nas[1] if first_special else s_nas[-2]\n s_mat[spec] = s_mat[1] if first_special else s_mat[-2]\n elif special_std == 'tail_prob':\n if first_special:\n print('If you are using this setting, you are doing something wrong.')\n t_max = torch.log(p[1]/p[2])/(p[1] - p[2])\n f = (torch.exp(-p[2]*t_max) - torch.exp(-p[1]*t_max)) * p[1]/(p[1] - p[2]) * p[0]\n tailratio = 1/(1+1/f) #the mature tail ratio\n s_mat[spec] = torch.sqrt(grid_mat[spec] / (1-tailratio))\n tailratio = p[0]/(1+p[0]) #the nascent tail ratio\n s_nas[spec] = torch.sqrt(grid_nas[spec] / (1-tailratio))\n else:\n print('did not specify a standard deviation convention!')\n \n s_nas *= hyp\n s_mat *= hyp\n v_nas = s_nas**2\n v_mat = s_mat**2\n\n r_nas = grid_nas**2/(v_nas-grid_nas)\n p_nas = 1-grid_nas/v_nas \n r_mat = grid_mat**2/(v_mat-grid_mat)\n p_mat = 1-grid_mat/v_mat \n \n xgrid_nas = torch.arange(xmax[0]+1)\n xgrid_mat = torch.arange(xmax[1]+1)\n \n gammaln_xgrid_nas = lnfactorial[1:(xmax[0]+2)]\n gammaln_xgrid_mat = lnfactorial[1:(xmax[1]+2)] \n\n Y = torch.zeros((xmax[0]+1,xmax[1]+1))\n \n for i in range(npdf[0]):\n lnas = -grid_nas[i] + xgrid_nas * torch.log(grid_nas[i]) - gammaln_xgrid_nas\n if p_nas[i] > 1e-10:\n lnas += torch.special.gammaln(xgrid_nas+r_nas[i]) - torch.special.gammaln(r_nas[i]) \\\n - xgrid_nas*torch.log(r_nas[i] + grid_nas[i]) + grid_nas[i] \\\n + r_nas[i]*torch.log(1-p_nas[i])\n for j in range(npdf[1]):\n lmat = - grid_mat[j] + xgrid_mat * torch.log(grid_mat[j]) - gammaln_xgrid_mat\n if p_mat[j] > 1e-10:\n lmat += torch.special.gammaln(xgrid_mat+r_mat[j]) - torch.special.gammaln(r_mat[j]) \\\n - xgrid_mat*torch.log(r_mat[j] + grid_mat[j]) + grid_mat[j] \\\n + r_mat[j]*torch.log(1-p_mat[j]) #wasteful: we're recomputing a lot of stuff.\n Y += w[i*npdf[1] + j] * torch.exp(lnas[:,None] + lmat[None,:])\n #note convention change. Y = the predicted PMF is now returned in the same shape as the original histogram.\n #this is fine bc Y is flattened anyway later on down the line.\n return Y", "_____no_output_____" ], [ "# define NORM and YPRED_FUN\n\ndef NORM_function(npdf):\n if npdf[0] == npdf[1]:\n n = np.arange(npdf[0])\n q = np.flip((np.cos((2*(n+1)-1)/(2*npdf)*np.pi)+1)/2)\n NORM = stats.norm.ppf(q)\n NORM_nas = torch.tensor(NORM)\n NORM_mat = NORM_nas\n else:\n n = np.arange(npdf[0])\n q = np.flip((np.cos((2*(n+1)-1)/(2*npdf[0])*np.pi)+1)/2)\n #print(q)\n NORM_nas = torch.tensor(stats.norm.ppf(q))\n n = np.arange(npdf[1])\n q = np.flip((np.cos((2*(n+1)-1)/(2*npdf[1])*np.pi)+1)/2)\n #print(q)\n NORM_mat = torch.tensor(stats.norm.ppf(q))\n \n\n n_n = np.linspace(0,1,npdf[0]+2)[1:-1]\n n_m = np.linspace(0,1,npdf[1]+2)[1:-1]\n NORM_nas = stats.norm.ppf(n_n)\n NORM_mat = stats.norm.ppf(n_m)\n #print(NORM_nas)\n return(NORM_nas,NORM_mat)\n\nlnfactorial = torch.special.gammaln(torch.arange(10000000))\n \n\nYPRED_FUN = lambda p, npdf, w, N: get_ypred_at_RT(p=p,npdf=npdf,w=w,N=N,hyp=2.4,\n quantiles='PRESET')", "_____no_output_____" ], [ "def get_predicted_PMF(p_list,npdf,N,position,model,get_ypred_at_RT):\n '''Returns predicted histogram for p given current state of model.'''\n model.eval()\n\n p1 = p_list[position:position+1]\n w_p1 = model(p1)[0]\n p1 = p1[0]\n predicted_y1 = get_ypred_at_RT(p1,npdf,w_p1,N)\n \n return(predicted_y1)", "_____no_output_____" ] ], [ [ "The next thing to do is load in the models. :)\n\n\nI'll try for models with the following number of basis functions:\n1. [10,11]\n2. [20,21]\n3. [30,31]\n4. [50,51]", "_____no_output_____" ] ], [ [ "npdf = [10,11]\nmodel_10 = tr.my_MLP1(3,npdf[0]*npdf[1])\nmodel_10.load_state_dict(torch.load('./quadvec_models/10npdf_256params_qlin_MODEL'))\nmodel_10.eval();", "_____no_output_____" ], [ "npdf = [20,21]\n# pre-loaded model\nmodel_20 = tr.my_MLP1(3,npdf[0]*npdf[1])\nmodel_20.load_state_dict(torch.load('./quadvec_models/07032022_20npdf_1train_qlin_15epochs_MODEL'))\nmodel_20.eval();", "_____no_output_____" ], [ "npdf = [30,31]\n# pre-loaded model\nmodel_30 = tr.my_MLP1(3,npdf[0]*npdf[1])\nmodel_30.load_state_dict(torch.load('./quadvec_models/30npdf_256params_qlin_MODEL'))\nmodel_30.eval();", "_____no_output_____" ], [ "npdf = [50,51]\n# pre-loaded model\nmodel_50 = tr.my_MLP1(3,npdf[0]*npdf[1])\nmodel_50.load_state_dict(torch.load('./quadvec_models/50npdf_256params_qlin_MODEL'))\nmodel_50.eval();", "_____no_output_____" ], [ "npdf = [30,31]\n# pre-loaded model\nmodel_30 = tr.my_MLP1(3,npdf[0]*npdf[1])\nmodel_30.load_state_dict(torch.load('./quadvec_models/30npdf_256params_qlin_MODEL'))\nmodel_30.eval();", "_____no_output_____" ] ], [ [ "# Increasing Sigma (grid size) ", "_____no_output_____" ] ], [ [ "# need to work with tensors now!\n\nparams_tensor = torch.from_numpy(params).float()", "_____no_output_____" ], [ "# def get_predicted_PMF(p_list,npdf,position,model,get_ypred_at_RT)\n\nP = 15 \nsigmas = [1,2,3,5,10,15,25,50]\n\n\nnpdf = [10,11]\ntime_sigmas_NN_10 = []\n\nNORM_nas,NORM_mat = NORM_function(np.array(npdf))\n\nfor sig in sigmas:\n print(sig)\n t1 = time.time()\n\n\n for i in range(P):\n\n s_ = get_predicted_PMF(params_tensor[i:i+1],npdf,sig,0,model_10,\n YPRED_FUN)\n t2 = time.time()\n \n time_sigmas_NN_10.append(t2-t1)\n ", "_____no_output_____" ], [ "P = 15 \nsigmas = [1,2,3,5,10,15,25,50]\n\n\nnpdf = [20,21]\ntime_sigmas_NN_20 = []\n\nNORM_nas,NORM_mat = NORM_function(np.array(npdf))\n\nfor sig in sigmas:\n print(sig)\n t1 = time.time()\n\n\n for i in range(P):\n\n s_ = get_predicted_PMF(params_tensor[i:i+1],npdf,sig,0,model_20,\n YPRED_FUN)\n t2 = time.time()\n \n time_sigmas_NN_20.append(t2-t1)", "_____no_output_____" ], [ "P = 15 \nsigmas = [1,2,3,5,10,15,25,50]\n\n\nnpdf = [30,31]\ntime_sigmas_NN_30 = []\n\nNORM_nas,NORM_mat = NORM_function(np.array(npdf))\n\nfor sig in sigmas:\n print(sig)\n t1 = time.time()\n\n\n for i in range(P):\n\n s_ = get_predicted_PMF(params_tensor[i:i+1],npdf,sig,0,model_30,\n YPRED_FUN)\n t2 = time.time()\n \n time_sigmas_NN_30.append(t2-t1)", "_____no_output_____" ] ], [ [ "-----\n\n# Calculating with increasing P vectors", "_____no_output_____" ] ], [ [ "time_repeatP_NN_10 = []\n\nnpdf = [10,11]\nNORM_nas,NORM_mat = NORM_function(np.array(npdf))\n\nfor p in p_vecs:\n print(p)\n param_list = list(params)\n params_ = np.array(p*list(params))\n params_ = torch.from_numpy(params_).float()\n \n t1 = time.time()\n\n\n for i in range(P*p):\n\n ss_ = get_predicted_PMF(params_[i:i+1],npdf,sig,0,model_10,\n YPRED_FUN)\n\n t2 = time.time()\n \n time_repeatP_NN_10.append(t2-t1)", "_____no_output_____" ], [ "time_repeatP_NN_20 = []\n\nnpdf = [20,21]\nNORM_nas,NORM_mat = NORM_function(np.array(npdf))\n\nfor p in p_vecs:\n print(p)\n param_list = list(params)\n params_ = p*list(params)\n params_ = torch.from_numpy(params_).float()\n \n t1 = time.time()\n\n\n for i in range(P*p):\n\n ss_ = get_predicted_PMF(params_[i:i+1],npdf,sig,0,model_20,\n YPRED_FUN)\n\n t2 = time.time()\n \n time_repeatP_NN_20.append(t2-t1)", "_____no_output_____" ], [ "time_repeatP_NN_30 = []\n\nnpdf = [30,31]\nNORM_nas,NORM_mat = NORM_function(np.array(npdf))\n\nfor p in p_vecs:\n print(p)\n param_list = list(params)\n params_ = p*list(params)\n params_ = torch.from_numpy(params_).float()\n \n t1 = time.time()\n\n\n for i in range(P*p):\n\n ss_ = get_predicted_PMF(params_[i:i+1],npdf,sig,0,model_30,\n YPRED_FUN)\n\n t2 = time.time()\n \n time_repeatP_NN_30.append(t2-t1)", "_____no_output_____" ] ], [ [ "Amaxing! We now have the timing for various state spaces and generating methods.\n\nLet's see how the timing looks.\n\nThis should be fairly interesting.\n\n\n----\n\n# Plotting\n\n## Increasing Sigma", "_____no_output_____" ] ], [ [ "sigma_state_space = [np.sum(a) for a in state_spaces]\n\n\n\nplt.plot(sigma_state_space,time_sigmas_quadvec,c='red',label='Quad Vec')\nplt.plot(sigma_state_space,time_sigmas_fixedquad,c='green',label='Fixed Quad')\n\nplt.plot(sigma_state_space,time_sigmas_NN_10,c='turquoise',label='NN, 10 basis')\nplt.plot(sigma_state_space,time_sigmas_NN_20,c='teal',label='NN, 10 basis')\nplt.plot(sigma_state_space,time_sigmas_NN_30,c='blue',label='NN, 10 basis')\n\n\n\nplt.xlabel('State Space')\nplt.ylabel('Generating Time')\nplt.legend()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cbc2cc1f0ad6dd65774c72af04967e82183985e1
9,787
ipynb
Jupyter Notebook
Not My Work just references/Data-Structures-using-Python-master/Arrays/Arrays.ipynb
knaik/PythonPractice
0340758f9ff2b409d92f8890799852a15eabc7c6
[ "MIT" ]
null
null
null
Not My Work just references/Data-Structures-using-Python-master/Arrays/Arrays.ipynb
knaik/PythonPractice
0340758f9ff2b409d92f8890799852a15eabc7c6
[ "MIT" ]
null
null
null
Not My Work just references/Data-Structures-using-Python-master/Arrays/Arrays.ipynb
knaik/PythonPractice
0340758f9ff2b409d92f8890799852a15eabc7c6
[ "MIT" ]
null
null
null
27.803977
337
0.535711
[ [ [ "#### Author: OMKAR PATHAK", "_____no_output_____" ], [ "# Arrays", "_____no_output_____" ], [ "## What is an Array?", "_____no_output_____" ], [ "* Array is a data structure used to store homogeneous elements at contiguous locations.\n* One memory block is allocated for the entire array to hold the elements of the array. The array elements can be accessed in constant time by using the index of the parliculnr element as the subscript.", "_____no_output_____" ], [ "## Properties of Arrays:", "_____no_output_____" ], [ "* Arrays stores similar data types. That is, array can hold data of same data type values. This is one of the limitations of arrays compared to other data structures.\n\n* Each value stored, in an array, is known as an element and all elements are indexed. The first element added, by default, gets 0 index. That is, the 5th element added gets an index number of 4.\n\n* Elements can be retrieved by their index number. (__random access__)\n\n* Array elements are stored in contiguous (continuous) memory locations.\n\n* One array name can represent multiple values. Array is the easiest way to store a large quantity of data of same data types. For example, to store the salary of 100 employees, it is required to declare 100 variables. But with arrays, with one array name all the 100 employees salaries can be stored.\n\n* At the time of creation itself, array size should be declared (array initialization does not require size).", "_____no_output_____" ], [ "## Arrays in Python:", "_____no_output_____" ], [ "Python does not have a native support for arrays, but has a more generic data structure called LIST. List provides all the options as array with more functionality.\nBut with few tweaks we can implement Array data structure in Python.\nWe will be seeing how to do this.", "_____no_output_____" ], [ "### Creating an array:", "_____no_output_____" ] ], [ [ "class Array(object):\n ''' sizeOfArray: denotes the total size of the array to be initialized\n arrayType: denotes the data type of the array(as all the elements of the array have same data type)\n arrayItems: values at each position of array\n '''\n def __init__(self, sizeOfArray, arrayType = int):\n self.sizeOfArray = len(list(map(arrayType, range(sizeOfArray))))\n self.arrayItems =[arrayType(0)] * sizeOfArray # initialize array with zeroes\n \n def __str__(self):\n return ' '.join([str(i) for i in self.arrayItems])\n \n # function for search\n def search(self, keyToSearch):\n for i in range(self.sizeOfArray):\n if (self.arrayItems[i] == keyToSearch): # brute-forcing\n return i # index at which element/ key was found\n \n return -1 # if key not found, return -1\n \n # function for inserting an element\n def insert(self, keyToInsert, position):\n if(self.sizeOfArray > position):\n for i in range(self.sizeOfArray - 2, position - 1, -1):\n self.arrayItems[i + 1] = self.arrayItems[i]\n self.arrayItems[position] = keyToInsert\n else:\n print('Array size is:', self.sizeOfArray)\n \n # function to delete an element\n def delete(self, keyToDelete, position):\n if(self.sizeOfArray > position):\n for i in range(position, self.sizeOfArray - 1):\n self.arrayItems[i] = self.arrayItems[i + 1]\n else:\n print('Array size is:', self.sizeOfArray)\n \na = Array(10, int)\nprint(a)", "0 0 0 0 0 0 0 0 0 0\n" ] ], [ [ "### Common array operations:", "_____no_output_____" ], [ "* Search\n* Insert\n* Delete\n\n__Time complexity__:\n\n* Search: O(n)\n* Insert: O(n)\n* Delete: O(n)\n* Indexing: O(1)", "_____no_output_____" ], [ "### Search Operation on Array:", "_____no_output_____" ] ], [ [ "a = Array(10, int)\nindex = a.search(0)\nprint('Element found at:', index)", "Element found at: 0\n" ] ], [ [ "### Insert Operation:", "_____no_output_____" ] ], [ [ "a = Array(10, int)\na.insert(1, 2)\na.insert(2,3)\na.insert(3,4)\nprint(a)", "0 0 1 2 3 0 0 0 0 0\n" ] ], [ [ "### Delete Operation:", "_____no_output_____" ] ], [ [ "a = Array(10, int)\na.insert(1, 2)\na.insert(2,3)\na.insert(3,4)\na.delete(3, 4)\nprint(a)\nindex = a.search(1)\nprint('Element found at:',index)", "0 0 1 2 0 0 0 0 0 0\nElement found at: 2\n" ] ], [ [ "#### These were the basics of how to implement Array using Python. Now we will see how to use Python built-in module 'array'.\n\n", "_____no_output_____" ], [ "Syntax: array(dataType, valueList)", "_____no_output_____" ] ], [ [ "# importing 'array' module \nimport array\n\n# initializing array\narr = array.array('i', [1, 2, 3, 4, 5]) # initialize array with integers ('i')\n\n# printing original array\nprint (\"The new created array is : \",end=\"\")\nfor i in range (0, 5):\n print (arr[i], end=\" \")\n\n# using append() to insert new value at end\narr.append(6);\n\n# printing appended array\nprint (\"\\nThe appended array is : \", end=\"\")\nfor i in range (0, 6):\n print (arr[i], end=\" \")\n\n# using insert() to insert value at specific position\n# inserts 5 at 2nd position\narr.insert(2, 5)\n\n# printing array after insertion\nprint (\"\\nThe array after insertion is : \", end=\"\")\nfor i in range (0, 7):\n print (arr[i], end=\" \")\n \narr.remove(1)\n\n# deleting a value from array\nprint (\"\\nThe array after deletion is : \", end=\"\")\nfor i in range (0, 6):\n print (arr[i], end=\" \")\n", "The new created array is : 1 2 3 4 5 \nThe appended array is : 1 2 3 4 5 6 \nThe array after insertion is : 1 2 5 3 4 5 6 \nThe array after deletion is : 2 5 3 4 5 6 " ] ], [ [ "### Disadvantages of Array", "_____no_output_____" ], [ "* __Fixed size__: The size of the array is static (specify the array size before using it, this can be overcome using Dynamic Arrays).\n* __One block allocation__: To allocate the array itself at the beginning, sometimes it may not be possible to get the memory for the complete array (if the array size is big).\n* __Complex position-based insertion__: To insert an element at a given position, we may need to shift the existing elements. This will create a position for us to insert the new element at the desired position. If the position at which we want to add an element is at the beginning, then the shifting operation is more expensive .", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cbc2d8efbba593a580c698368d071ce604d4c365
8,244
ipynb
Jupyter Notebook
notebooks/utils/jetutil.ipynb
naisy/jetbot
49235c9219b048e4c30ffd183e4bea8774d2e43f
[ "MIT" ]
1
2021-07-28T07:51:52.000Z
2021-07-28T07:51:52.000Z
notebooks/utils/jetutil.ipynb
naisy/jetbot
49235c9219b048e4c30ffd183e4bea8774d2e43f
[ "MIT" ]
null
null
null
notebooks/utils/jetutil.ipynb
naisy/jetbot
49235c9219b048e4c30ffd183e4bea8774d2e43f
[ "MIT" ]
null
null
null
24.682635
145
0.51492
[ [ [ "# I2Cの認識確認\nI2Cの認識状況を確認する", "_____no_output_____" ] ], [ [ "!i2cdetect -r -y 1", " 0 1 2 3 4 5 6 7 8 9 a b c d e f\n00: -- -- -- -- -- -- -- -- -- -- -- -- -- \n10: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n20: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n30: -- -- -- -- -- -- -- -- -- -- -- -- 3c -- -- -- \n40: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n50: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n60: 60 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n70: 70 -- -- -- -- -- -- -- \n" ] ], [ [ "# カメラの認識確認\nカメラの認識を確認する", "_____no_output_____" ] ], [ [ "!dmesg | grep imx219", "[ 3.510518] imx219 6-0010: tegracam sensor driver:imx219_v2.0.6\n[ 5.030429] vi 54080000.vi: subdev imx219 6-0010 bound\n" ] ], [ [ "# カメラ関連のデーモンの再起動 \n\nカメラ関連デーモンの再起動", "_____no_output_____" ] ], [ [ "!echo jetbot | sudo -S systemctl restart nvargus-daemon", "[sudo] password for jetbot: " ] ], [ [ "# カメラ関連のデーモンのログ確認\nカメラ関連のデーモンのログを確認する。ログの確認が終わったら、プロセスが戻ってこないので、■でユーザにより停止させる", "_____no_output_____" ] ], [ [ "!journalctl -u nvargus-daemon.service -f", "-- Logs begin at Sun 2018-01-28 07:58:17 PST. --\nNov 11 19:45:48 jetbot systemd[1]: Started Argus daemon.\n^C\n" ] ], [ [ "# 電力モードの変更(5Wモード)\n\nCPUが2つ起動", "_____no_output_____" ] ], [ [ "!echo jetbot | sudo -S nvpmodel -m 1", "[sudo] password for jetbot: " ] ], [ [ "# 電力モードの変更(10Wモード)\nCPUが4つ起動", "_____no_output_____" ] ], [ [ "!echo jetbot | sudo -S nvpmodel -m 0", "[sudo] password for jetbot: " ] ], [ [ "# 電力モードの確認\n電力モードの確認", "_____no_output_____" ] ], [ [ "!nvpmodel -q --verbose", "NVPM VERB: Config file: /etc/nvpmodel.conf\nNVPM VERB: parsing done for /etc/nvpmodel.conf\nNVPM VERB: Current mode: NV Power Mode: 5W\n1\nNVPM VERB: PARAM CPU_ONLINE: ARG CORE_0: PATH /sys/devices/system/cpu/cpu0/online: REAL_VAL: 1 CONF_VAL: 1\nNVPM VERB: PARAM CPU_ONLINE: ARG CORE_1: PATH /sys/devices/system/cpu/cpu1/online: REAL_VAL: 1 CONF_VAL: 1\nNVPM VERB: PARAM CPU_ONLINE: ARG CORE_2: PATH /sys/devices/system/cpu/cpu2/online: REAL_VAL: 0 CONF_VAL: 0\nNVPM VERB: PARAM CPU_ONLINE: ARG CORE_3: PATH /sys/devices/system/cpu/cpu3/online: REAL_VAL: 0 CONF_VAL: 0\nNVPM VERB: PARAM CPU_A57: ARG MIN_FREQ: PATH /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq: REAL_VAL: 102000 CONF_VAL: 0\nNVPM VERB: PARAM CPU_A57: ARG MAX_FREQ: PATH /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq: REAL_VAL: 921600 CONF_VAL: 918000\nNVPM VERB: PARAM GPU_POWER_CONTROL_ENABLE: ARG GPU_PWR_CNTL_EN: PATH /sys/devices/gpu.0/power/control: REAL_VAL: auto CONF_VAL: on\nNVPM VERB: PARAM GPU: ARG MIN_FREQ: PATH /sys/devices/gpu.0/devfreq/57000000.gpu/min_freq: REAL_VAL: 76800000 CONF_VAL: 0\nNVPM VERB: PARAM GPU: ARG MAX_FREQ: PATH /sys/devices/gpu.0/devfreq/57000000.gpu/max_freq: REAL_VAL: 614400000 CONF_VAL: 640000000\nNVPM VERB: PARAM GPU_POWER_CONTROL_DISABLE: ARG GPU_PWR_CNTL_DIS: PATH /sys/devices/gpu.0/power/control: REAL_VAL: auto CONF_VAL: auto\nNVPM ERROR: Error opening /sys/kernel/nvpmodel_emc_cap/emc_iso_cap: 13\nNVPM ERROR: failed to read PARAM EMC: ARG MAX_FREQ: PATH /sys/kernel/nvpmodel_emc_cap/emc_iso_cap\n" ] ], [ [ "# メモリ使用量の確認\n\nfreeコマンドでメモリ使用量を確認", "_____no_output_____" ] ], [ [ "!free -h", " total used free shared buff/cache available\nMem: 3.9G 1.1G 1.6G 31M 1.2G 2.5G\nSwap: 4.0G 0B 4.0G\n" ] ], [ [ "# クロックの高速化\nクロックを高速化します。", "_____no_output_____" ] ], [ [ "!echo jetbot | sudo -S jetson_clocks", "[sudo] password for jetbot: " ] ], [ [ "# クロックの状態の確認\n\nクロックの状態を表示します。", "_____no_output_____" ] ], [ [ "!echo jetbot | sudo -S sudo jetson_clocks --show", "[sudo] password for jetbot: SOC family:tegra210 Machine:jetson-nano\nOnline CPUs: 0-3\nCPU Cluster Switching: Disabled\ncpu0: Online=1 Governor=schedutil MinFreq=921600 MaxFreq=921600 CurrentFreq=921600 IdleStates: WFI=0 c7=0 \ncpu1: Online=1 Governor=schedutil MinFreq=921600 MaxFreq=921600 CurrentFreq=921600 IdleStates: WFI=0 c7=0 \ncpu2: Online=1 Governor=schedutil MinFreq=921600 MaxFreq=921600 CurrentFreq=921600 IdleStates: WFI=0 c7=0 \ncpu3: Online=1 Governor=schedutil MinFreq=921600 MaxFreq=921600 CurrentFreq=921600 IdleStates: WFI=0 c7=0 \nGPU MinFreq=614400000 MaxFreq=614400000 CurrentFreq=614400000\nEMC MinFreq=204000000 MaxFreq=1600000000 CurrentFreq=1600000000 FreqOverride=1\nFan: speed=255\nNV Power Mode: 5W\n" ] ], [ [ "# Wifiへの接続", "_____no_output_____" ] ], [ [ "!echo jetbot | sudo -S nmcli device wifi connect 'アクセスポイント名' password 'パスワード' ifname wlan0", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc2e3f643295d9257beb3a5abfb25d4492e6589
3,050
ipynb
Jupyter Notebook
notebooks/.ipynb_checkpoints/vmd_beta_color_test-checkpoint.ipynb
alayah2626517/enmspring
c77c86de2254177863c344c566ef0791306a5112
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/vmd_beta_color_test-checkpoint.ipynb
alayah2626517/enmspring
c77c86de2254177863c344c566ef0791306a5112
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/vmd_beta_color_test-checkpoint.ipynb
alayah2626517/enmspring
c77c86de2254177863c344c566ef0791306a5112
[ "MIT" ]
null
null
null
19.805195
93
0.524918
[ [ [ "from os import path\nimport numpy as np\nfrom enmspring import PDB", "_____no_output_____" ] ], [ [ "### Part 1: Set the location of the test pdb", "_____no_output_____" ] ], [ [ "rootfolder = '/home/yizaochen/codes/dna_rna/enmspring/pdb_gro'\npdb_in = path.join(rootfolder, 'g_tract_21mer.perfect.pdb')\nprint(f'vmd -pdb {pdb_in}')\nprint(f'vim {pdb_in}')", "vmd -pdb /home/yizaochen/codes/dna_rna/enmspring/pdb_gro/g_tract_21mer.perfect.pdb\nvim /home/yizaochen/codes/dna_rna/enmspring/pdb_gro/g_tract_21mer.perfect.pdb\n" ] ], [ [ "### Part 2: Read pdb in", "_____no_output_____" ] ], [ [ "reader = PDB.PDBReader(pdb_in, skip_header=4, skip_footer=2)", "_____no_output_____" ], [ "atom1 = reader.atomgroup[0]\natom1.tempFactor", "_____no_output_____" ] ], [ [ "### Part 3: Random generate tempFactor in [0,1]", "_____no_output_____" ] ], [ [ "for atom in reader.atomgroup:\n atom1.tempFactor = np.random.random()", "_____no_output_____" ] ], [ [ "### Part 4: Output PDB", "_____no_output_____" ] ], [ [ "pdb_out = path.join(rootfolder, 'tempfactor_test.pdb')\nwriter = PDB.PDBWriter(pdb_out, reader.atomgroup)\nwriter.write_pdb()\nprint(f'vmd -pdb {pdb_out}')\nprint(f'vim {pdb_out}')", "_____no_output_____" ] ], [ [ "### Reference:\nhttps://www.mdanalysis.org/MDAnalysisTutorial/writing.html", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cbc2ed44ec00a7fb3146a9a9b2d7334eb17fccf9
60,536
ipynb
Jupyter Notebook
Under fitting and Over fitting.ipynb
AdicherlaVenkataSai/leet-challenge
5e1491e198c429c9e3be2784ca44ea2610346a37
[ "MIT" ]
3
2020-08-28T19:50:34.000Z
2021-05-19T18:41:40.000Z
Under fitting and Over fitting.ipynb
AdicherlaVenkataSai/leet-challenge
5e1491e198c429c9e3be2784ca44ea2610346a37
[ "MIT" ]
null
null
null
Under fitting and Over fitting.ipynb
AdicherlaVenkataSai/leet-challenge
5e1491e198c429c9e3be2784ca44ea2610346a37
[ "MIT" ]
2
2020-11-09T10:45:24.000Z
2020-11-12T12:03:25.000Z
77.709884
23,240
0.762108
[ [ [ "# Underfitting and Overfitting demo using KNN", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "data = pd.read_csv('data_knn_classification_cleaned_titanic.csv')", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "x = data.drop(['Survived'], axis=1)\ny = data['Survived']", "_____no_output_____" ], [ "#Scaling the data\nfrom sklearn.preprocessing import StandardScaler\nss = StandardScaler()\nx = ss.fit_transform(x)", "_____no_output_____" ], [ "#split the data\nfrom sklearn.model_selection import train_test_split\ntrain_x, test_x, train_y, test_y = train_test_split(x, y, random_state=96, stratify=y)", "_____no_output_____" ] ], [ [ "# implementing KNN", "_____no_output_____" ] ], [ [ "#imporing KNN classifier and f1 score\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\nfrom sklearn.metrics import f1_score", "_____no_output_____" ], [ "#creating an instance of KNN\nclf = KNN(n_neighbors = 12)\nclf.fit(train_x, train_y)\n\ntrain_predict = clf.predict(train_x)\nk1 = f1_score(train_predict, train_y)\nprint(\"training: \",k1)\n\ntest_predict = clf.predict(test_x)\nk = f1_score(test_predict, test_y) \nprint(\"testing: \",k)", "training: 0.7194570135746606\ntesting: 0.6950354609929079\n" ], [ "def f1score(k):\n train_f1 = []\n test_f1 = []\n \n for i in k:\n clf = KNN(n_neighbors = i)\n clf.fit(train_x, train_y)\n\n train_predict = clf.predict(train_x)\n k1 = f1_score(train_predict, train_y)\n train_f1.append(k1)\n\n test_predict = clf.predict(test_x)\n k = f1_score(test_predict, test_y) \n test_f1.append(k)\n return train_f1, test_f1\n ", "_____no_output_____" ], [ "k = range(1,50)\ntrain_f1, test_f1 = f1score(k)\ntrain_f1, test_f1", "_____no_output_____" ], [ "score = pd.DataFrame({'train score': train_f1, 'test_score':test_f1}, index = k)\nscore", "_____no_output_____" ], [ "#visulaising \nplt.plot(k, test_f1, color ='red', label ='test')\nplt.plot(k, train_f1, color ='green', label ='train')\nplt.xlabel('K Neighbors')\nplt.ylabel('F1 score')\nplt.title('f1 curve')\nplt.ylim(0,4,1)\nplt.legend()", "_____no_output_____" ], [ "#split the data\nfrom sklearn.model_selection import train_test_split\ntrain_x, test_x, train_y, test_y = train_test_split(x, y, random_state=42, stratify=y)", "_____no_output_____" ], [ "k = range(1,50)\ntrain_f1, test_f1 = f1score(k)", "_____no_output_____" ], [ "#visulaising \nplt.plot(k, test_f1, color ='red', label ='test')\nplt.plot(k, train_f1, color ='green', label ='train')\nplt.xlabel('K Neighbors')\nplt.ylabel('F1 score')\nplt.title('f1 curve')\n#plt.ylim(0,4,1)\nplt.legend()", "_____no_output_____" ], [ "'''\nhere the value of k is decided by using both train and test data\n, instead of (testset) that we can use validation set\n\n\ntypes:\n\n1. Hold-out validation\n as we directly divide the data into praprotions, there might be a \n case where the validation set is biased to only one class\n (which mean validation set might have data of only one class,\n these results in set have no idea about the other class)\n \n in this we have different distributions\n \n2. Stratified hold out \n \n in this we have equal distributions\n\nin the hold out scenario we need good amount of data to maintain,\nso we need to train with lot data. if the dataset is small?\nand we want to bulid the complex relations out of them? \n'''", "_____no_output_____" ] ], [ [ "# Bias Variance Tradeoff", "_____no_output_____" ] ], [ [ "'''\nif variance is high then bias is low\nif bias is high then variance is low \n\n\nerror high bias high variance optimally in btw\n\nfit underfit overfit bestfit\nk range 21<k k<11 12<k<21\ncomplexity low high optimum\n\n\nGeneralization error : defines the optimum model btw high bias and high varaince\n\n\nHigh variance refers to overfitting whereas high bias \nrefers to underfitting and we do not want both of these scenarios. \nSo, the best model is said to have low bias and low variance.\n'''", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cbc2f6449737c3d28b0001d5ad71d26fc387072a
4,940
ipynb
Jupyter Notebook
Notebooks/ML.Net - StopWords.ipynb
DaviRamos/ML.Net
3b73599bddc49cda2692edcd9e3657ed4d224dc4
[ "MIT" ]
null
null
null
Notebooks/ML.Net - StopWords.ipynb
DaviRamos/ML.Net
3b73599bddc49cda2692edcd9e3657ed4d224dc4
[ "MIT" ]
null
null
null
Notebooks/ML.Net - StopWords.ipynb
DaviRamos/ML.Net
3b73599bddc49cda2692edcd9e3657ed4d224dc4
[ "MIT" ]
null
null
null
25.463918
207
0.536842
[ [ [ "# ML.Net - StopWords", "_____no_output_____" ], [ "## Davi Ramos -> Cientista de Dados 👋\n([email protected])\n\n[![Linkedin Badge](https://img.shields.io/badge/-LinkedIn-blue?style=flat-square&logo=Linkedin&logoColor=white&link=https://www.linkedin.com/in/davi-ramos/)](https://www.linkedin.com/in/davi-ramos/)\n[![Twitter Badge](https://img.shields.io/badge/-Twitter-1DA1F2?style=flat-square&logo=Twitter&logoColor=white&link=https://twitter.com/Daviinfo/)](https://twitter.com/Daviinfo/)\n<a href=\"https://github.com/DaviRamos\"><img src=\"https://img.shields.io/github/followers/DaviRamos.svg?label=GitHub&style=social\" alt=\"GitHub\"></a>", "_____no_output_____" ] ], [ [ "// ML.NET Nuget packages installation\n//#r \"nuget:Microsoft.ML,1.3.1\" \n#r \"nuget:Microsoft.ML\" ", "_____no_output_____" ] ], [ [ "## Using C# Class", "_____no_output_____" ] ], [ [ "using Microsoft.ML;\nusing Microsoft.ML.Data;\nusing System;\nusing System.Collections.Generic;\nusing System.Text;", "_____no_output_____" ] ], [ [ "## Declare data-classes for input data and predictions", "_____no_output_____" ] ], [ [ "public class TextData\n{\n public string Text { get; set; }\n}\n \npublic class TextTokens\n{\n public string[] Tokens { get; set; }\n}", "_____no_output_____" ] ], [ [ "## Função Auxiliar para Imprimir os Tokens", "_____no_output_____" ] ], [ [ "private static void PrintTokens(TextTokens tokens)\n{\n Console.WriteLine(Environment.NewLine);\n\n var sb = new StringBuilder();\n\n foreach (var token in tokens.Tokens)\n {\n sb.AppendLine(token);\n }\n\n Console.WriteLine(sb.ToString());\n}", "_____no_output_____" ], [ "var context = new MLContext();\n\nvar emptyData = new List<TextData>();\n\nvar data = context.Data.LoadFromEnumerable(emptyData);\n\nvar tokenization = context.Transforms.Text.TokenizeIntoWords(\"Tokens\", \"Text\", separators: new[] { ' ', '.', ',' })\n .Append(context.Transforms.Text.RemoveDefaultStopWords(\"Tokens\", \"Tokens\",\n Microsoft.ML.Transforms.Text.StopWordsRemovingEstimator.Language.English));\n\nvar stopWordsModel = tokenization.Fit(data);\n\nvar engine = context.Model.CreatePredictionEngine<TextData, TextTokens>(stopWordsModel);\n\nvar newText = engine.Predict(new TextData { Text = \"This is a test sentence, and it is a long one.\" });\n\nPrintTokens(newText);\n\nvar customTokenization = context.Transforms.Text.TokenizeIntoWords(\"Tokens\", \"Text\", separators: new[] { ' ', '.', ',' })\n .Append(context.Transforms.Text.RemoveStopWords(\"Tokens\", \"Tokens\", new[] { \"and\", \"a\" }));\n\nvar customStopWordsModel = customTokenization.Fit(data);\n\nvar customEngine = context.Model.CreatePredictionEngine<TextData, TextTokens>(customStopWordsModel);\n\nvar newCustomText = customEngine.Predict(new TextData { Text = \"This is a test sentence, and it is a long one.\" });\n\nPrintTokens(newCustomText);\n\nConsole.ReadLine();", "\n\ntest\nsentence\nlong\n\n\n\nThis\nis\ntest\nsentence\nit\nis\nlong\none\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cbc302017ad8526def5c7014f8dee7a8941a70ca
1,080
ipynb
Jupyter Notebook
Data-Science-HYD-2k19/Daily Task/Day 2.ipynb
Sanjay9921/Python
05ac161dd46f9b4731a5c14ff5ef52adb705e8e6
[ "MIT" ]
null
null
null
Data-Science-HYD-2k19/Daily Task/Day 2.ipynb
Sanjay9921/Python
05ac161dd46f9b4731a5c14ff5ef52adb705e8e6
[ "MIT" ]
null
null
null
Data-Science-HYD-2k19/Daily Task/Day 2.ipynb
Sanjay9921/Python
05ac161dd46f9b4731a5c14ff5ef52adb705e8e6
[ "MIT" ]
null
null
null
16.119403
34
0.49537
[ [ [ "import time", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "import datetime", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cbc3189f428b99cd41a96a498898980f223d42d7
171,761
ipynb
Jupyter Notebook
Lectures/Lecture17 -- Ice albedo feedback in the EBM.ipynb
gavin971/ClimateModeling_courseware
9c8b446d6a274d88868c24570155f50c32d27b89
[ "MIT" ]
4
2017-12-06T04:36:30.000Z
2020-12-02T13:16:02.000Z
Lectures/Lecture17 -- Ice albedo feedback in the EBM.ipynb
gavin971/ClimateModeling_courseware
9c8b446d6a274d88868c24570155f50c32d27b89
[ "MIT" ]
null
null
null
Lectures/Lecture17 -- Ice albedo feedback in the EBM.ipynb
gavin971/ClimateModeling_courseware
9c8b446d6a274d88868c24570155f50c32d27b89
[ "MIT" ]
4
2018-08-09T04:03:45.000Z
2021-12-20T11:28:17.000Z
183.505342
51,876
0.891594
[ [ [ "# [ATM 623: Climate Modeling](../index.ipynb)\n[Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany\n# Lecture 17: Ice albedo feedback in the EBM", "_____no_output_____" ], [ "### About these notes:\n\nThis document uses the interactive [`IPython notebook`](http://ipython.org/notebook.html) format (now also called [`Jupyter`](https://jupyter.org)). The notes can be accessed in several different ways:\n\n- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware\n- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)\n- A complete snapshot of the notes as of May 2015 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).\n\nMany of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab", "_____no_output_____" ], [ "## Contents\n\n1. [Interactive snow and ice line in the EBM](#section1)\n2. [Polar-amplified warming in the EBM](#section2)\n3. [Effects of diffusivity in the annual mean EBM with albedo feedback](#section3)\n4. [Diffusive response to a point source of energy](#section4)", "_____no_output_____" ], [ "____________\n<a id='section1'></a>\n\n## 1. Interactive snow and ice line in the EBM\n____________\n", "_____no_output_____" ], [ "### The annual mean EBM\n\nthe equation is\n\n$$ C(\\phi) \\frac{\\partial T_s}{\\partial t} = (1-\\alpha) ~ Q - \\left( A + B~T_s \\right) + \\frac{D}{\\cos⁡\\phi } \\frac{\\partial }{\\partial \\phi} \\left( \\cos⁡\\phi ~ \\frac{\\partial T_s}{\\partial \\phi} \\right) $$\n\n\n", "_____no_output_____" ], [ "### Temperature-dependent ice line\n\nLet the surface albedo be larger wherever the temperature is below some threshold $T_f$:\n\n$$ \\alpha\\left(\\phi, T(\\phi) \\right) = \\left\\{\\begin{array}{ccc} \n\\alpha_0 + \\alpha_2 P_2(\\sin\\phi) & ~ & T(\\phi) > T_f \\\\\na_i & ~ & T(\\phi) \\le T_f \\\\\n\\end{array} \\right. $$\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport climlab", "_____no_output_____" ], [ "# for convenience, set up a dictionary with our reference parameters\nparam = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.}\nmodel1 = climlab.EBM_annual( num_lat=180, D=0.55, **param )\nprint model1", "climlab Process of type <class 'climlab.model.ebm.EBM_annual'>. \nState variables and domain shapes: \n Ts: (180, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.ebm.EBM_annual'>\n diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>\n LW: <class 'climlab.radiation.AplusBT.AplusBT'>\n albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'>\n iceline: <class 'climlab.surface.albedo.Iceline'>\n warm albedo: <class 'climlab.surface.albedo.P2Albedo'>\n cold albedo: <class 'climlab.surface.albedo.ConstantAlbedo'>\n insolation: <class 'climlab.radiation.insolation.AnnualMeanInsolation'>\n\n" ] ], [ [ "Because we provided a parameter `ai` for the icy albedo, our model now contains several sub-processes contained within the process called `albedo`. Together these implement the step-function formula above.\n\nThe process called `iceline` simply looks for grid cells with temperature below $T_f$.", "_____no_output_____" ] ], [ [ "print model1.param", "{'A': 210, 'B': 2, 'D': 0.55, 'ai': 0.62, 'timestep': 350632.51200000005, 'S0': 1365.2, 'a0': 0.3, 'a2': 0.078, 'Tf': -10.0, 'water_depth': 10.0}\n" ], [ "def ebm_plot( model, figsize=(8,12), show=True ):\n '''This function makes a plot of the current state of the model,\n including temperature, energy budget, and heat transport.'''\n templimits = -30,35\n radlimits = -340, 340\n htlimits = -7,7\n latlimits = -90,90\n lat_ticks = np.arange(-90,90,30)\n \n fig = plt.figure(figsize=figsize)\n \n ax1 = fig.add_subplot(3,1,1)\n ax1.plot(model.lat, model.Ts)\n ax1.set_xlim(latlimits)\n ax1.set_ylim(templimits)\n ax1.set_ylabel('Temperature (deg C)')\n ax1.set_xticks( lat_ticks )\n ax1.grid()\n \n ax2 = fig.add_subplot(3,1,2)\n ax2.plot(model.lat, model.diagnostics['ASR'], 'k--', label='SW' )\n ax2.plot(model.lat, -model.diagnostics['OLR'], 'r--', label='LW' )\n ax2.plot(model.lat, model.diagnostics['net_radiation'], 'c-', label='net rad' )\n ax2.plot(model.lat, model.heat_transport_convergence(), 'g--', label='dyn' )\n ax2.plot(model.lat, model.diagnostics['net_radiation'].squeeze() \n + model.heat_transport_convergence(), 'b-', label='total' )\n ax2.set_xlim(latlimits)\n ax2.set_ylim(radlimits)\n ax2.set_ylabel('Energy budget (W m$^{-2}$)')\n ax2.set_xticks( lat_ticks )\n ax2.grid()\n ax2.legend()\n \n ax3 = fig.add_subplot(3,1,3)\n ax3.plot(model.lat_bounds, model.heat_transport() )\n ax3.set_xlim(latlimits)\n ax3.set_ylim(htlimits)\n ax3.set_ylabel('Heat transport (PW)')\n ax3.set_xlabel('Latitude')\n ax3.set_xticks( lat_ticks )\n ax3.grid()\n\n return fig", "_____no_output_____" ], [ "model1.integrate_years(5)\nf = ebm_plot(model1)", "Integrating for 450 steps, 1826.211 days, or 5 years.\nTotal elapsed time is 5.0 years.\n" ], [ "model1.diagnostics['icelat']", "_____no_output_____" ] ], [ [ "____________\n<a id='section2'></a>\n\n## 2. Polar-amplified warming in the EBM\n____________\n\n", "_____no_output_____" ], [ "### Add a small radiative forcing\n\nThe equivalent of doubling CO2 in this model is something like \n\n$$ A \\rightarrow A - \\delta A $$\n\nwhere $\\delta A = 4$ W m$^{-2}$.\n", "_____no_output_____" ] ], [ [ "deltaA = 4.\n\nmodel2 = climlab.process_like(model1)\nmodel2.subprocess['LW'].A = param['A'] - deltaA\nmodel2.integrate_years(5, verbose=False)\n\nplt.plot(model1.lat, model1.Ts)\nplt.plot(model2.lat, model2.Ts)", "_____no_output_____" ] ], [ [ "The warming is polar-amplified: more warming at the poles than elsewhere.\n\nWhy?\n\nAlso, the current ice line is now:", "_____no_output_____" ] ], [ [ "model2.diagnostics['icelat']", "_____no_output_____" ] ], [ [ "There is no ice left!\n\nLet's do some more greenhouse warming:", "_____no_output_____" ] ], [ [ "model3 = climlab.process_like(model1)\nmodel3.subprocess['LW'].A = param['A'] - 2*deltaA\nmodel3.integrate_years(5, verbose=False)\n\nplt.plot(model1.lat, model1.Ts)\nplt.plot(model2.lat, model2.Ts)\nplt.plot(model3.lat, model3.Ts)\nplt.xlim(-90, 90)\nplt.grid()", "_____no_output_____" ] ], [ [ "In the ice-free regime, there is no polar-amplified warming. A uniform radiative forcing produces a uniform warming.", "_____no_output_____" ], [ "____________\n<a id='section3'></a>\n\n## 3. Effects of diffusivity in the annual mean EBM with albedo feedback\n____________\n\n", "_____no_output_____" ], [ "### In-class investigation:\n\nWe will repeat the exercise from Lecture 14, but this time with albedo feedback included in our model.\n\n- Solve the annual-mean EBM (integrate out to equilibrium) over a range of different diffusivity parameters.\n- Make three plots:\n - Global-mean temperature as a function of $D$\n - Equator-to-pole temperature difference $\\Delta T$ as a function of $D$\n - Poleward heat transport across 35 degrees $\\mathcal{H}_{max}$ as a function of $D$\n- Choose a value of $D$ that gives a reasonable approximation to observations:\n - $\\Delta T \\approx 45$ ºC\n\nUse these parameter values:", "_____no_output_____" ] ], [ [ "param = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.}\nprint param", "{'A': 210, 'B': 2, 'ai': 0.62, 'a0': 0.3, 'a2': 0.078, 'Tf': -10.0}\n" ] ], [ [ "### One possible way to do this:", "_____no_output_____" ] ], [ [ "Darray = np.arange(0., 2.05, 0.05)", "_____no_output_____" ], [ "model_list = []\nTmean_list = []\ndeltaT_list = []\nHmax_list = []\nfor D in Darray:\n ebm = climlab.EBM_annual(num_lat=360, D=D, **param )\n #ebm.subprocess['insolation'].s2 = -0.473\n ebm.integrate_years(5., verbose=False)\n Tmean = ebm.global_mean_temperature()\n deltaT = np.max(ebm.Ts) - np.min(ebm.Ts)\n HT = ebm.heat_transport()\n #Hmax = np.max(np.abs(HT))\n ind = np.where(ebm.lat_bounds==35.5)[0]\n Hmax = HT[ind]\n model_list.append(ebm)\n Tmean_list.append(Tmean)\n deltaT_list.append(deltaT)\n Hmax_list.append(Hmax)", "_____no_output_____" ], [ "color1 = 'b'\ncolor2 = 'r'\n\nfig = plt.figure(figsize=(8,6))\nax1 = fig.add_subplot(111)\nax1.plot(Darray, deltaT_list, color=color1, label='$\\Delta T$')\nax1.plot(Darray, Tmean_list, '--', color=color1, label='$\\overline{T}$')\nax1.set_xlabel('D (W m$^{-2}$ K$^{-1}$)', fontsize=14)\nax1.set_xticks(np.arange(Darray[0], Darray[-1], 0.2))\nax1.set_ylabel('Temperature ($^\\circ$C)', fontsize=14, color=color1)\nfor tl in ax1.get_yticklabels():\n tl.set_color(color1)\nax1.legend(loc='center right')\nax2 = ax1.twinx()\nax2.plot(Darray, Hmax_list, color=color2)\nax2.set_ylabel('Poleward heat transport across 35.5$^\\circ$ (PW)', fontsize=14, color=color2)\nfor tl in ax2.get_yticklabels():\n tl.set_color(color2)\nax1.set_title('Effect of diffusivity on EBM with albedo feedback', fontsize=16)\nax1.grid()\n", "_____no_output_____" ] ], [ [ "____________\n<a id='section4'></a>\n\n## 4. Diffusive response to a point source of energy\n____________\n\nLet's add a point heat source to the EBM and see what sets the spatial structure of the response.\n\nWe will add a heat source at about 45º latitude.\n\nFirst, we will calculate the response in a model **without albedo feedback**.", "_____no_output_____" ] ], [ [ "param_noalb = {'A': 210, 'B': 2, 'D': 0.55, 'Tf': -10.0, 'a0': 0.3, 'a2': 0.078}\nm1 = climlab.EBM_annual(num_lat=180, **param_noalb)\nprint m1", "climlab Process of type <class 'climlab.model.ebm.EBM_annual'>. \nState variables and domain shapes: \n Ts: (180, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.ebm.EBM_annual'>\n diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>\n LW: <class 'climlab.radiation.AplusBT.AplusBT'>\n albedo: <class 'climlab.surface.albedo.P2Albedo'>\n insolation: <class 'climlab.radiation.insolation.AnnualMeanInsolation'>\n\n" ], [ "m1.integrate_years(5.)", "Integrating for 450 steps, 1826.211 days, or 5.0 years.\nTotal elapsed time is 5.0 years.\n" ], [ "m2 = climlab.process_like(m1)", "_____no_output_____" ], [ "point_source = climlab.process.energy_budget.ExternalEnergySource(state=m2.state)\nind = np.where(m2.lat == 45.5)\npoint_source.heating_rate['Ts'][ind] = 100.\n\nm2.add_subprocess('point source', point_source)\nprint m2", "climlab Process of type <class 'climlab.model.ebm.EBM_annual'>. \nState variables and domain shapes: \n Ts: (180, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.ebm.EBM_annual'>\n diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>\n LW: <class 'climlab.radiation.AplusBT.AplusBT'>\n albedo: <class 'climlab.surface.albedo.P2Albedo'>\n insolation: <class 'climlab.radiation.insolation.AnnualMeanInsolation'>\n point source: <class 'climlab.process.energy_budget.ExternalEnergySource'>\n\n" ], [ "m2.integrate_years(5.)", "Integrating for 450 steps, 1826.211 days, or 5.0 years.\nTotal elapsed time is 10.0 years.\n" ], [ "plt.plot(m2.lat, m2.Ts - m1.Ts)\nplt.xlim(-90,90)\nplt.grid()", "_____no_output_____" ] ], [ [ "The warming effects of our point source are felt **at all latitudes** but the effects decay away from the heat source.\n\nSome analysis will show that the length scale of the warming is proportional to \n\n$$ \\sqrt{\\frac{D}{B}} $$\n\nso increases with the diffusivity.", "_____no_output_____" ], [ "Now repeat this calculate **with ice albedo feedback**", "_____no_output_____" ] ], [ [ "m3 = climlab.EBM_annual(num_lat=180, **param)\nm3.integrate_years(5.)\nm4 = climlab.process_like(m3)\npoint_source = climlab.process.energy_budget.ExternalEnergySource(state=m4.state)\npoint_source.heating_rate['Ts'][ind] = 100.\nm4.add_subprocess('point source', point_source)\nm4.integrate_years(5.)", "Integrating for 450 steps, 1826.211 days, or 5.0 years.\nTotal elapsed time is 5.0 years.\nIntegrating for 450 steps, 1826.211 days, or 5.0 years.\nTotal elapsed time is 10.0 years.\n" ], [ "plt.plot(m4.lat, m4.Ts - m3.Ts)\nplt.xlim(-90,90)\nplt.grid()", "_____no_output_____" ] ], [ [ "Now the maximum warming **does not coincide with the heat source at 45º**!\n\nOur heat source has led to melting of snow and ice, which induces an additional heat source in the high northern latitudes.\n\n**Heat transport communicates the external warming to the ice cap, and also commuicates the increased shortwave absorption due to ice melt globally!**", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\n[Back to ATM 623 notebook home](../index.ipynb)\n</div>", "_____no_output_____" ], [ "____________\n## Credits\n\nThe author of this notebook is [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.\n\nIt was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php), offered in Spring 2015.\n____________", "_____no_output_____" ], [ "____________\n## Version information\n____________\n", "_____no_output_____" ] ], [ [ "%install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py\n%load_ext version_information\n%version_information numpy, climlab", "Installed version_information.py. To use it, type:\n %load_ext version_information\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
cbc33e653b5816accfe0eabb10fff538afdf00a9
18,356
ipynb
Jupyter Notebook
code/notebook/test_dev.ipynb
NicolasMakaroff/deep_stoping_time
8cab26173253db9a0c2239f1b8974cf9dbe3177f
[ "MIT" ]
null
null
null
code/notebook/test_dev.ipynb
NicolasMakaroff/deep_stoping_time
8cab26173253db9a0c2239f1b8974cf9dbe3177f
[ "MIT" ]
null
null
null
code/notebook/test_dev.ipynb
NicolasMakaroff/deep_stoping_time
8cab26173253db9a0c2239f1b8974cf9dbe3177f
[ "MIT" ]
null
null
null
55.456193
8,347
0.687459
[ [ [ "\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy.integrate import quad\nfrom scipy.optimize import root", "_____no_output_____" ], [ "\"\"\"\n%% Summary of CJK_Func.m %%\nThe function generates the value of CJK representation. \nNote that, this function is not used directly, it is used to solve for Bt\n--------------------------------------------------------------------------\nInput:\n Bt - the value of boundary at t\n Bs - the value of boundary at s\n k - strike price\n r - risk-free interest rate\n q - continuously compounded dividend rate\n vol - annualized volatility\n T - maturity\n t - current time\n--------------------------------------------------------------------------\nOutput:\n y - value of CJK reprentation\n--------------------------------------------------------------------------\nAuthor:\n Nattapong Kongmuang\n [email protected]\n MSc Financial Engineering, ICMA Centre, Henley Business School, \n University of Reading, UK\n 24 July 2015\n--------------------------------------------------------------------------\n\"\"\"\n\ndef d1(x,y,z,b,vol):\n return (np.log(x/y)+(b+0.5*vol**2)*z)/(vol*np.sqrt(z)) \n\ndef d2(x,y,z,b,vol):\n return d1(x,y,z,b,vol)-vol*np.sqrt(z) \n\ndef CJK_Func( Bt,Bs,k,r,q,vol,T,t ):\n \n T1 = T-t\n b=r-q\n term = np.zeros(5)\n term[0] = Bt\n term[1] = -k\n term[2] = k*np.exp(-r*T1)*norm.cdf(-d2(Bt,k,T1,b,vol))\n term[3] = -Bt*np.exp(-q*T1)*norm.cdf(-d1(Bt,k,T1,b,vol))\n integralFun = lambda s: r*k*np.exp(-r*(s))*norm.cdf(-d2(Bt,Bs,(s),b,vol)) - q*Bt*np.exp(-q*(s))*norm.cdf(-d1(Bt,Bs,(s),b,vol))\n term[4] = quad(integralFun,t,T)[0]\n y = np.sum(term)\n return y\n", "_____no_output_____" ], [ "\"\"\"\n%% Summary of Boundary.m %%\nThe function generates the early exercise boundary and spot of time by \nCJK representation\n--------------------------------------------------------------------------\nInput:\n k - strike price\n r - risk-free interest rate\n q - continuously compounded dividend rate\n vol - annualized volatility\n T - time to maturity\n steps - a number of time steps in the calculation\n--------------------------------------------------------------------------\nOutput:\n B - the values of early exercise boundary \n time - the point of time that each B-value is calculated\n--------------------------------------------------------------------------\nAuthor:\n Nattapong Kongmuang\n [email protected]\n MSc Financial Engineering, ICMA Centre, Henley Business School, \n University of Reading, UK\n 24 July 2015\n--------------------------------------------------------------------------\n\"\"\"\ndef Boundary( k,r,q,vol,T,steps ):\n dt=T/steps\n t=T\n B = np.zeros(steps+1)\n time = np.linspace(0,T,np.floor(dt).astype(np.int))\n for i in range(steps,1,-1):\n if i==steps:\n if q == 0:\n B[i]=np.min(k,k*r/q)\n else:\n B[i]=k\n else:\n t=t-dt\n res=root(lambda Bt: CJK_Func(Bt,B[i+1],k,r,q,vol,T,t) ,k)\n B[i] = res.x\n \n return B", "_____no_output_____" ], [ "s0 = 100\nk = 100\nr = 0.07\nq = 0.03\nvol = 0.25\nT=1\n#paths = 100000\nsteps = 10000\ndt=T/steps\n \nB = Boundary( k,r,q,vol,T,steps) ", "_____no_output_____" ], [ "from matplotlib import pyplot as plt\nplt.plot(np.linspace(0,1,10001),B)", "_____no_output_____" ], [ "from scipy.stats.distributions import norm, lognorm, rv_frozen\nclass GeometricBrownianMotion:\n '''Geometric Brownian Motion.(with optional drift).'''\n def __init__(self, mu: float=0.0, sigma: float=1.0):\n self.mu = mu\n self.sigma = sigma\n\n def simulate(self, t: np.array, n: int, rnd: np.random.RandomState) \\\n -> np.array:\n assert t.ndim == 1, 'One dimensional time vector required'\n assert t.size > 0, 'At least one time point is required'\n dt = np.concatenate((t[0:1], np.diff(t)))\n assert (dt >= 0).all(), 'Increasing time vector required'\n # transposed simulation for automatic broadcasting\n dW = (rnd.normal(size=(t.size, n)).T * np.sqrt(dt)).T\n W = np.cumsum(dW, axis=0)\n return np.exp(self.sigma * W.T + (self.mu - self.sigma**2 / 2) * t).T\n\n def distribution(self, t: float) -> rv_frozen:\n mu_t = (self.mu - self.sigma**2/2) * t\n sigma_t = self.sigma * np.sqrt(t)\n return lognorm(scale=np.exp(mu_t), s=sigma_t)", "_____no_output_____" ], [ "from scipy.optimize import newton\nclass LS:\n def __init__(self, X, t, r, strike):\n self.X = X\n self.t = t\n self.r = r\n self.strike = strike\n\n def _ls_american_option_quadratic_iter(self, X, t, r, strike):\n # given no prior exercise we just receive the payoff of a European option\n cashflow = np.maximum(strike - X[-1, :], 0.0)\n # iterating backwards in time\n for i in reversed(range(1, X.shape[1] - 1)):\n # discount factor between t[i] and t[i+1]\n df = np.exp(-r * (t[i+1]-t[i]))\n # discount cashflows from next period\n cashflow = cashflow * df\n x = X[:, i]\n # exercise value for time t[i]\n exercise = np.maximum(strike - x, 0.0)\n # boolean index of all in-the-money paths\n itm = exercise > 0\n # fit polynomial of degree 2\n fitted = Polynomial.fit(x[itm], cashflow[itm], 2)\n # approximate continuation value\n continuation = fitted(x)\n # boolean index where exercise is beneficial\n ex_idx = itm & (exercise > continuation)\n # update cashflows with early exercises\n cashflow[ex_idx] = exercise[ex_idx]\n\n func = cashflow - strike\n res = newton(func,strike)\n yield res,cashflow, x, fitted, continuation, exercise, ex_idx\n\n def simulate(self):\n for res,cashflow, *_ in self._ls_american_option_quadratic_iter(self.X, self.t, self.r, self.strike):\n pass\n return res,cashflow.mean(axis=0) * np.exp(-self.r * (self.t[1] - self.t[0]))", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc348942552f031a29a13916f73181df7e7a7de
3,677
ipynb
Jupyter Notebook
Scratch.ipynb
antoinehirtz/jupyterWorkflow
85720f2a939b29f876cc50029475842611fec470
[ "MIT" ]
null
null
null
Scratch.ipynb
antoinehirtz/jupyterWorkflow
85720f2a939b29f876cc50029475842611fec470
[ "MIT" ]
null
null
null
Scratch.ipynb
antoinehirtz/jupyterWorkflow
85720f2a939b29f876cc50029475842611fec470
[ "MIT" ]
null
null
null
28.726563
84
0.460974
[ [ [ "from jupyterworkflow.data import get_fremont_data\nimport pandas as pd\n\ndef test_fremont_data():\n data = get_fremont_data()\n assert all(data.columns == ['West', 'East', 'Total'])\n assert isinstance(data.index, pd.DatetimeIndex)", "_____no_output_____" ], [ "test_fremont_data()", "_____no_output_____" ], [ "data = pd.read_csv('Fremont.csv', index_col='Date')\n\npd.to_datetime(data.index, format='%m/%d/%Y %H:%M:%S %p')", "_____no_output_____" ], [ "data = pd.read_csv('Fremont.csv', index_col='Date')\npd.to_datetime(data.index, format='%m/%d/%Y %H:%M:%S %p')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cbc34e49c6d1d49a53c2498ba786c318fc7dacef
24,464
ipynb
Jupyter Notebook
Test.ipynb
sayanb3/python_for_datascience
47dab4e99d81307e6e5c9aa24beb6442b007e0f4
[ "Apache-2.0" ]
null
null
null
Test.ipynb
sayanb3/python_for_datascience
47dab4e99d81307e6e5c9aa24beb6442b007e0f4
[ "Apache-2.0" ]
null
null
null
Test.ipynb
sayanb3/python_for_datascience
47dab4e99d81307e6e5c9aa24beb6442b007e0f4
[ "Apache-2.0" ]
null
null
null
30.277228
164
0.356524
[ [ [ "! pip install pandas --upgrade", "Requirement already up-to-date: pandas in c:\\programdata\\anaconda3\\lib\\site-packages (1.2.1)\nRequirement already satisfied, skipping upgrade: pytz>=2017.3 in c:\\programdata\\anaconda3\\lib\\site-packages (from pandas) (2020.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.16.5 in c:\\programdata\\anaconda3\\lib\\site-packages (from pandas) (1.19.2)\nRequirement already satisfied, skipping upgrade: python-dateutil>=2.7.3 in c:\\programdata\\anaconda3\\lib\\site-packages (from pandas) (2.8.1)\nRequirement already satisfied, skipping upgrade: six>=1.5 in c:\\programdata\\anaconda3\\lib\\site-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)\n" ], [ "! easy_install bokeh", "Searching for bokeh" ], [ "from datetime import datetime\nStarttime =datetime.now()\nStarttime", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "diamonds =pd.read_csv(\"https://vincentarelbundock.github.io/Rdatasets/csv/ggplot2/diamonds.csv\")", "_____no_output_____" ], [ "diamonds.columns #Single Line Comment starts with # \n# name of variables is given by columns. In R we would use the command names(object)\n# Note also R uses the FUNCTION(OBJECTNAME) syntax while Python uses OBJECTNAME.FUNCTION", "_____no_output_____" ], [ "len(diamonds)", "_____no_output_____" ], [ "diamonds.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 53940 entries, 0 to 53939\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 53940 non-null int64 \n 1 carat 53940 non-null float64\n 2 cut 53940 non-null object \n 3 color 53940 non-null object \n 4 clarity 53940 non-null object \n 5 depth 53940 non-null float64\n 6 table 53940 non-null float64\n 7 price 53940 non-null int64 \n 8 x 53940 non-null float64\n 9 y 53940 non-null float64\n 10 z 53940 non-null float64\ndtypes: float64(6), int64(2), object(3)\nmemory usage: 4.5+ MB\n" ], [ "diamonds.head(20)", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "rows = np.random.choice(diamonds.index.values, round(0.0001*len(diamonds)))\nrows", "_____no_output_____" ], [ "diamonds.describe()", "_____no_output_____" ], [ "diamonds.price.describe()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc361a74a844dc242ae4fa1094b7f84950b4cd1
73,669
ipynb
Jupyter Notebook
__writing/.ipynb_checkpoints/__writing-optimal_probabilistic_clustering_part2_test-checkpoint.ipynb
joaodmrodrigues/elements-financial-machine-learning
9e3dea6ac558576db94d926c94e46bfe8ff42e6f
[ "Apache-2.0" ]
null
null
null
__writing/.ipynb_checkpoints/__writing-optimal_probabilistic_clustering_part2_test-checkpoint.ipynb
joaodmrodrigues/elements-financial-machine-learning
9e3dea6ac558576db94d926c94e46bfe8ff42e6f
[ "Apache-2.0" ]
null
null
null
__writing/.ipynb_checkpoints/__writing-optimal_probabilistic_clustering_part2_test-checkpoint.ipynb
joaodmrodrigues/elements-financial-machine-learning
9e3dea6ac558576db94d926c94e46bfe8ff42e6f
[ "Apache-2.0" ]
null
null
null
92.08625
32,812
0.842675
[ [ [ "# Optimal probabilistic clustering - Part II\n> ...\n\n- toc: true\n- branch: master\n- badges: true\n- comments: true\n- categories: [Clustering, Entropy, Membership Entropy]\n- image: images/post_image_optimal_clustering.png\n- hide: false\n- search_exclude: false\n- author: Joao Rodrigues", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom scipy.linalg import norm\nfrom scipy.spatial.distance import cdist\n\nclass OPC:\n def __init__(self, n_clusters=10, max_iter=150, m=2, error=1e-5, random_state=42):\n self.u, self.centers = None, None\n self.n_clusters = n_clusters\n self.max_iter = max_iter\n self.m = m\n self.error = error\n self.random_state = random_state\n\n def fit(self, X, initial_centers=None):\n N = X.shape[0]\n C = self.n_clusters\n centers = initial_centers\n\n # u = np.random.dirichlet(np.ones(C), size=N)\n r = np.random.RandomState(self.random_state)\n u = r.rand(N,C)\n u = u / np.tile(u.sum(axis=1)[np.newaxis].T,C)\n\n iteration = 0\n while iteration < self.max_iter:\n u2 = u.copy()\n \n if iteration==0 and not centers is None:\n centers = centers\n print(centers.shape)\n print(\"-------------------------------------------\")\n else:\n centers = self.next_centers(X, u)\n \n u = self.next_u(X, centers)\n iteration += 1\n\n # Stopping rule\n if norm(u - u2) < self.error:\n break\n\n self.u = u\n self.centers = centers\n return self\n\n def next_centers(self, X, u):\n um = u ** self.m\n return (X.T @ um / np.sum(um, axis=0)).T\n\n def next_u(self, X, centers):\n return self._predict(X, centers)\n\n def _predict(self, X, centers):\n power = float(2 / (self.m - 1))\n temp = cdist(X, centers) ** power\n denominator_ = temp.reshape((X.shape[0], 1, -1)).repeat(temp.shape[-1], axis=1)\n denominator_ = temp[:, :, np.newaxis] / denominator_\n\n return 1 / denominator_.sum(2)\n\n def predict(self, X):\n if len(X.shape) == 1:\n X = np.expand_dims(X, axis=0)\n\n u = self._predict(X, self.centers)\n return np.argmax(u, axis=-1)", "_____no_output_____" ], [ "######################################## Part I\n#from fcmeans import FCM\n\ndef run_cluster(n_clusters, features, initial_centers=None, random_state=42):\n # membership probabilities\n model = OPC(n_clusters=n_clusters, random_state=random_state, max_iter=1000, error=1e-9)\n model = model.fit(features, initial_centers=initial_centers)\n p = model.u\n centers = model.centers\n # representative cluster\n representative_cluster = np.argmax(p, 1)\n # membership entropy\n Sx = -np.sum(p*np.log(p), 1) / np.log(n_clusters)\n # total membership entropy (across the entire feature space)\n S = np.sum(Sx) \n \n return centers, p, representative_cluster, Sx, S\n\n", "_____no_output_____" ] ], [ [ "Check if I'm introducing a regularization in inferring the optimal number of clusters", "_____no_output_____" ] ], [ [ "regularization = 1.0", "_____no_output_____" ] ], [ [ "## Experimental results", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "(n,k,m) n observations, k clusters, at least m observations per cluster", "_____no_output_____" ] ], [ [ "def construct_random_partition(n, k, m, seed=None):\n rand = np.random.RandomState(seed=seed)\n parts = rand.choice(range(1, n-k*(m-1)), k-1, replace=False)\n parts.sort()\n parts = np.append(parts, n-k*(m-1))\n parts = np.append(parts[0], np.diff(parts)) - 1 + m\n \n return parts", "_____no_output_____" ], [ "partition = construct_random_partition(n=200, k=5, m=2, seed=40)\nprint(partition)", "[ 7 31 62 21 79]\n" ] ], [ [ "**Generation of random datasets**", "_____no_output_____" ] ], [ [ "def generate_random_dataset(partition, n_features, std, seed):\n random_state = np.random.RandomState(seed=seed)\n dataset = list()\n for n in partition:\n # cluster centre coordinates\n cluster_centre = random_state.uniform(-1, 1, n_features)\n # observation coordinates\n for observation in range(0, n):\n dataset.append(cluster_centre+std*random_state.standard_normal(n_features))\n dataset = np.array(dataset)\n # shuffles the observations\n dataset = dataset[random_state.permutation(dataset.shape[0]), :]\n \n return np.array(dataset)", "_____no_output_____" ], [ "dataset = generate_random_dataset(partition=partition, n_features=2, std=0.05, seed=42)", "_____no_output_____" ] ], [ [ "We will, at each iteration, collect the mean-intracluster entropy", "_____no_output_____" ] ], [ [ "Si = list()\niteration = 0\ncenters = None\nn_clusters_trials = np.arange(2, 10, 1)", "_____no_output_____" ] ], [ [ "Some helpful functions", "_____no_output_____" ] ], [ [ "### Minimization of membership entropy\ndef minimize_membership_entropy(n_clusters_trials, dataset, regularization=0, random_state=42):\n total_entropies = list()\n for trial in n_clusters_trials:\n _, _, _, _, total_entropy = run_cluster(n_clusters=trial, \n features=dataset, \n random_state=random_state)\n total_entropies.append(total_entropy+regularization*trial)\n \n optimal_nclusters = n_clusters_trials[np.argmin(total_entropies)]\n \n return optimal_nclusters, total_entropies\n \n \n### Cluster quality\ndef calculate_cluster_quality(p, representative_cluster, PRINT=True):\n Si = dict()\n for clust in set(representative_cluster):\n probs = p[np.argmax(p, 1)==clust, :]\n entropy = -np.sum(probs*np.log(probs), 1) / np.log(probs.shape[1])\n Si.update({clust: np.mean(entropy)})\n\n if PRINT:\n [print(\"Mean membership entropy across cluster {0} = {1}\".format(i, np.round(Si[i], 3))) for i in Si.keys()]\n \n return Si", "_____no_output_____" ] ], [ [ "### Iteration 1", "_____no_output_____" ], [ "**1.1) Minimization of membership entropy**", "_____no_output_____" ] ], [ [ "optimal_nclusters, total_entropies = minimize_membership_entropy(n_clusters_trials, dataset, regularization)\nprint(\"Optimal number of clusters =\", optimal_nclusters)", "Optimal number of clusters = 4\n" ] ], [ [ "**1.2) Clustering**", "_____no_output_____" ] ], [ [ "centers, p, representative_cluster, Sx, S = run_cluster(optimal_nclusters, dataset)", "_____no_output_____" ] ], [ [ "**1.3) Cluster quality**", "_____no_output_____" ] ], [ [ "Si.append(calculate_cluster_quality(p, representative_cluster))", "Mean membership entropy across cluster 0 = 0.293\nMean membership entropy across cluster 1 = 0.181\nMean membership entropy across cluster 2 = 0.172\nMean membership entropy across cluster 3 = 0.106\n" ] ], [ [ "**1.4) Plot**", "_____no_output_____" ] ], [ [ "import matplotlib\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\n\ndef make_rgb_transparent(rgb, alpha):\n bg_rgb = [1, 1, 1]\n return [alpha * c1 + (1 - alpha) * c2 for (c1, c2) in zip(rgb, bg_rgb)]\ncolormap = cm.get_cmap('Accent')\n\nedgecolors = list()\nfacecolors = list()\nfor i in range(0, optimal_nclusters):\n edgecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=1))\n facecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=0.65))", "_____no_output_____" ], [ "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n\naxes[0].plot([optimal_nclusters, optimal_nclusters], [0, np.max(total_entropies)], color=(0.8,0.6,0.6), linewidth=2)\naxes[0].plot(n_clusters_trials, total_entropies, color=(0.46,0.46,0.46), linewidth=2)\naxes[0].set_xlabel('Number of clusters')\naxes[0].set_ylabel('Total membership entropy')\n\ncolor_seq = list()\nfor j in range(0, dataset.shape[0]):\n color_seq.append(make_rgb_transparent(edgecolors[representative_cluster[j]], 1-Sx[j]))\nfor i in range(0, optimal_nclusters):\n axes[1].scatter([], [], label=str(i), color=edgecolors[i])\naxes[1].scatter(dataset[:,0], dataset[:,1], marker='.', s=60, edgecolors=(0.6,0.6,0.6,0.5), c=color_seq)\naxes[1].scatter(centers[:,0], centers[:,1], color=(0.8,0.2,0.2, 0.8), marker=\"v\")\naxes[1].set_xlabel('X')\naxes[1].set_ylabel('Y')\naxes[1].set_xlim(-1.2,1.2)\naxes[1].set_ylim(-1.2,1.2)\naxes[1].legend(loc=\"best\")\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "**1.5) Finds clusters with an below-average mean membership entropy**", "_____no_output_____" ] ], [ [ "print(\"Intra-cluster mean membership entropy\")\nSi[iteration]", "Intra-cluster mean membership entropy\n" ], [ "bad_clusters = np.array(list(Si[iteration].keys()))[list(Si[iteration].values()) > np.mean(list(Si[iteration].values()))]\nprint(\"Clusters with above-average membership entropy\")\nbad_clusters", "Clusters with above-average membership entropy\n" ], [ "good_clusters = np.array(list(set(Si[iteration].keys()).difference(set(bad_clusters))))\ngood_clusters", "_____no_output_____" ], [ "centers_good_clusters = centers[good_clusters,:]", "_____no_output_____" ] ], [ [ "**1.6) Collects observations in the above selected clusters**", "_____no_output_____" ] ], [ [ "inds = []\nfor cluster in bad_clusters:\n inds += list(np.where(representative_cluster==cluster)[0])\ninds = np.squeeze(np.array(inds))\n\ndataset_bad_clusters = dataset[inds,:]", "_____no_output_____" ], [ "optimal_nclusters, total_entropies = minimize_membership_entropy(n_clusters_trials, dataset_bad_clusters, regularization)\nprint(\"Optimal number of clusters =\", optimal_nclusters)", "Optimal number of clusters = 2\n" ], [ "new_centers, p, representative_cluster, Sx, S = run_cluster(optimal_nclusters, dataset)", "_____no_output_____" ], [ "trial_centers = np.vstack((centers_good_clusters, new_centers))", "_____no_output_____" ], [ "centers, p, representative_cluster, Sx, S = run_cluster(centers.shape[0], dataset, initial_centers=trial_centers)", "(5, 2)\n-------------------------------------------\n" ], [ "optimal_nclusters = centers.shape[0]\n\nedgecolors = list()\nfacecolors = list()\nfor i in range(0, optimal_nclusters):\n edgecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=1))\n facecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=0.65))\n \n \nfig, axes = plt.subplots(1, 2, figsize=(10, 4))\n\ncolor_seq = list()\nfor j in range(0, dataset.shape[0]):\n color_seq.append(make_rgb_transparent(edgecolors[representative_cluster[j]], 1-Sx[j]))\nfor i in range(0, optimal_nclusters):\n axes[1].scatter([], [], label=str(i), color=edgecolors[i])\naxes[1].scatter(dataset[:,0], dataset[:,1], marker='.', s=60, edgecolors=(0.6,0.6,0.6,0.5), c=color_seq)\naxes[1].scatter(centers[:,0], trial_centers[:,1], color=(0.8,0.2,0.2, 0.8), marker=\"v\")\naxes[1].set_xlabel('X')\naxes[1].set_ylabel('Y')\naxes[1].set_xlim(-1.2,1.2)\naxes[1].set_ylim(-1.2,1.2)\naxes[1].legend(loc=\"best\")\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ], [ "len(edgecolors)", "_____no_output_____" ] ], [ [ "Initialize fcmeans with different seeds the do statistics would probably help\n\n", "_____no_output_____" ], [ "**References:**", "_____no_output_____" ], [ "{% bibliography --cited %}", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cbc361e904a4ba9d275adca484db1684ac357194
61,439
ipynb
Jupyter Notebook
deep_learning_pytorch/Part 2 - Neural Networks in PyTorch.ipynb
amitbcp/machine_learning_with_Scikit_Learn_and_TensorFlow
37dda063e316503d53ac45f3b104a5cf1aaa4d78
[ "MIT" ]
11
2019-12-19T08:55:52.000Z
2021-10-01T13:07:13.000Z
deep_learning_pytorch/Part 2 - Neural Networks in PyTorch.ipynb
amitbcp/sckit-learn-examples
0c26f9178a0cf96ff79faf3b9b250dd5b8f6c49a
[ "MIT" ]
5
2019-10-09T01:41:19.000Z
2022-02-10T00:19:01.000Z
deep_learning_pytorch/Part 2 - Neural Networks in PyTorch.ipynb
amitbcp/sckit-learn-examples
0c26f9178a0cf96ff79faf3b9b250dd5b8f6c49a
[ "MIT" ]
7
2019-10-08T06:10:14.000Z
2020-12-01T07:49:21.000Z
115.270169
16,728
0.827129
[ [ [ "# Neural networks with PyTorch\n\nNext I'll show you how to build a neural network with PyTorch.", "_____no_output_____" ] ], [ [ "# Import things like usual\n\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport torch\n\nimport helper\n\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms", "_____no_output_____" ] ], [ [ "First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later.", "_____no_output_____" ] ], [ [ "# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n# Download and load the training data\ntrainset = datasets.MNIST('MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.MNIST('MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)", "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\nProcessing...\nDone!\n" ], [ "dataiter = iter(trainloader)\nimages, labels = dataiter.next()", "_____no_output_____" ] ], [ [ "We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. We'd use this to loop through the dataset for training, but here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size (64, 1, 28, 28). So, 64 images per batch, 1 color channel, and 28x28 images.", "_____no_output_____" ] ], [ [ "plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');", "_____no_output_____" ] ], [ [ "## Building networks with PyTorch\n\nHere I'll use PyTorch to build a simple feedfoward network to classify the MNIST images. That is, the network will receive a digit image as input and predict the digit in the image.\n\n<img src=\"assets/mlp_mnist.png\" width=600px>\n\nTo build a neural network with PyTorch, you use the `torch.nn` module. The network itself is a class inheriting from `torch.nn.Module`. You define each of the operations separately, like `nn.Linear(784, 128)` for a fully connected linear layer with 784 inputs and 128 units.\n\nThe class needs to include a `forward` method that implements the forward pass through the network. In this method, you pass some input tensor `x` through each of the operations you defined earlier. The `torch.nn` module also has functional equivalents for things like ReLUs in `torch.nn.functional`. This module is usually imported as `F`. Then to use a ReLU activation on some layer (which is just a tensor), you'd do `F.relu(x)`. Below are a few different commonly used activation functions.\n\n<img src=\"assets/activation.png\" width=700px>\n\nSo, for this network, I'll build it with three fully connected layers, then a softmax output for predicting classes. The softmax function is similar to the sigmoid in that it squashes inputs between 0 and 1, but it's also normalized so that all the values sum to one like a proper probability distribution.", "_____no_output_____" ] ], [ [ "from torch import nn\nfrom torch import optim\nimport torch.nn.functional as F", "_____no_output_____" ], [ "class Network(nn.Module):\n def __init__(self):\n super().__init__()\n # Defining the layers, 128, 64, 10 units each\n self.fc1 = nn.Linear(784, 128)\n self.fc2 = nn.Linear(128, 64)\n # Output layer, 10 units - one for each digit\n self.fc3 = nn.Linear(64, 10)\n \n def forward(self, x):\n ''' Forward pass through the network, returns the output logits '''\n \n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n x = F.softmax(x, dim=1)\n \n return x\n\nmodel = Network()\nmodel", "_____no_output_____" ] ], [ [ "### Initializing weights and biases\n\nThe weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance.", "_____no_output_____" ] ], [ [ "print(model.fc1.weight)\nprint(model.fc1.bias)", "Parameter containing:\ntensor([[-2.6058e-02, 3.1424e-02, 2.1148e-03, ..., 1.1542e-02,\n -2.6522e-02, -3.4025e-02],\n [ 3.0572e-02, 3.3338e-02, -2.6931e-02, ..., 5.7425e-05,\n 2.1721e-02, -3.3412e-02],\n [ 3.4925e-03, 2.9037e-02, 1.0347e-02, ..., -2.9559e-02,\n 3.1794e-03, 3.4490e-02],\n ...,\n [-3.1773e-02, -3.4363e-02, -1.7279e-02, ..., -7.3001e-03,\n -7.4463e-03, 2.4893e-02],\n [-6.8170e-03, -1.5293e-02, -1.0804e-02, ..., -1.8655e-02,\n 1.4348e-02, -1.1746e-02],\n [ 2.5862e-02, -1.2672e-03, -1.0714e-02, ..., -1.4800e-02,\n -1.5675e-03, -1.3676e-02]])\nParameter containing:\ntensor(1.00000e-02 *\n [ 0.1686, -1.9158, -0.6910, -2.2148, 2.7405, 1.6166, 3.5038,\n -3.0429, 0.6742, 2.0730, 1.5177, 3.3484, 0.6643, 1.6872,\n 2.6293, 3.4846, -3.0008, 0.6693, 2.5440, -2.4925, -0.8574,\n 0.4322, 1.2354, -0.9304, 0.2392, -3.3509, 3.3629, 0.6310,\n 2.3125, 1.7735, -1.2755, -2.9089, 2.4392, 0.8636, -1.0626,\n 3.4954, 0.9678, -0.4400, -1.8525, -0.8659, 0.9357, 2.5460,\n -3.5023, -3.3836, -1.6945, 1.7945, 1.5075, -1.4474, 1.1298,\n -0.8535, -0.1105, 1.7625, 2.5759, 0.3475, -0.3717, 1.9920,\n -2.9399, 3.0557, -1.3507, 2.5886, -1.6885, -3.2012, -0.3042,\n -1.7932, 2.3024, 2.2674, -1.4366, 2.2976, 0.3026, -0.6845,\n -1.7643, -2.2432, 1.1491, 2.7507, 0.4540, -0.3610, -0.4619,\n 1.1470, 3.0486, -0.2853, 0.7022, 1.0666, -2.9304, 2.0395,\n 0.7392, 0.7528, 1.5047, -0.2269, -1.0421, 2.0012, 2.8392,\n -3.0624, 1.1166, -2.2294, -2.3319, 2.0527, 1.9425, 3.5627,\n -2.1015, -3.5139, -2.4443, -3.4681, 1.9482, -1.8649, -1.2089,\n -2.4046, 1.7744, -2.6773, -2.4332, -0.7620, 1.1949, -2.8041,\n -2.2364, -0.4441, -2.2495, -2.6019, 2.7152, 1.7264, -2.2761,\n 1.2306, -1.0942, 2.7313, 0.5079, -0.8688, 3.3801, 2.6146,\n -2.5973, -0.5548])\n" ] ], [ [ "For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.", "_____no_output_____" ] ], [ [ "# Set biases to all zeros\nmodel.fc1.bias.data.fill_(0)", "_____no_output_____" ], [ "# sample from random normal with standard dev = 0.01\nmodel.fc1.weight.data.normal_(std=0.01)", "_____no_output_____" ] ], [ [ "### Forward pass\n\nNow that we have a network, let's see what happens when we pass in an image. This is called the forward pass. We're going to convert the image data into a tensor, then pass it through the operations defined by the network architecture.", "_____no_output_____" ] ], [ [ "# Grab some data \ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\n\n# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels) \nimages.resize_(64, 1, 784)\n# or images.resize_(images.shape[0], 1, 784) to not automatically get batch size\n\n# Forward pass through the network\nimg_idx = 0\nps = model.forward(images[img_idx,:])\n\nimg = images[img_idx]\nhelper.view_classify(img.view(1, 28, 28), ps)", "_____no_output_____" ] ], [ [ "As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random!\n\nPyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)). Using this to build the equivalent network:", "_____no_output_____" ] ], [ [ "# Hyperparameters for our network\ninput_size = 784\nhidden_sizes = [128, 64]\noutput_size = 10\n\n# Build a feed-forward network\nmodel = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),\n nn.ReLU(),\n nn.Linear(hidden_sizes[0], hidden_sizes[1]),\n nn.ReLU(),\n nn.Linear(hidden_sizes[1], output_size),\n nn.Softmax(dim=1))\nprint(model)\n\n# Forward pass through the network and display output\nimages, labels = next(iter(trainloader))\nimages.resize_(images.shape[0], 1, 784)\nps = model.forward(images[0,:])\nhelper.view_classify(images[0].view(1, 28, 28), ps)", "Sequential(\n (0): Linear(in_features=784, out_features=128, bias=True)\n (1): ReLU()\n (2): Linear(in_features=128, out_features=64, bias=True)\n (3): ReLU()\n (4): Linear(in_features=64, out_features=10, bias=True)\n (5): Softmax()\n)\n" ] ], [ [ "You can also pass in an `OrderedDict` to name the individual layers and operations. Note that a dictionary keys must be unique, so _each operation must have a different name_.", "_____no_output_____" ] ], [ [ "from collections import OrderedDict\nmodel = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(input_size, hidden_sizes[0])),\n ('relu1', nn.ReLU()),\n ('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),\n ('relu2', nn.ReLU()),\n ('output', nn.Linear(hidden_sizes[1], output_size)),\n ('softmax', nn.Softmax(dim=1))]))\nmodel", "_____no_output_____" ] ], [ [ "Now it's your turn to build a simple network, use any method I've covered so far. In the next notebook, you'll learn how to train a network so it can make good predictions.\n\n>**Exercise:** Build a network to classify the MNIST images with _three_ hidden layers. Use 400 units in the first hidden layer, 200 units in the second layer, and 100 units in the third layer. Each hidden layer should have a ReLU activation function, and use softmax on the output layer. ", "_____no_output_____" ] ], [ [ "## TODO: Your network here", "_____no_output_____" ], [ "## Run this cell with your model to make sure it works ##\n# Forward pass through the network and display output\nimages, labels = next(iter(trainloader))\nimages.resize_(images.shape[0], 1, 784)\nps = model.forward(images[0,:])\nhelper.view_classify(images[0].view(1, 28, 28), ps)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cbc361fbfd42072c243fbb3e2a9e928b66f0900d
11,815
ipynb
Jupyter Notebook
tutorials/notebook/cx_site_chart_examples/boxplot_11.ipynb
docinfosci/canvasxpress-python
532a981b04d0f50bbde1852c695117a6220f4589
[ "MIT" ]
4
2021-03-18T17:23:40.000Z
2022-02-01T19:07:01.000Z
tutorials/notebook/cx_site_chart_examples/boxplot_11.ipynb
docinfosci/canvasxpress-python
532a981b04d0f50bbde1852c695117a6220f4589
[ "MIT" ]
8
2021-04-30T20:46:57.000Z
2022-03-10T07:25:31.000Z
tutorials/notebook/cx_site_chart_examples/boxplot_11.ipynb
docinfosci/canvasxpress-python
532a981b04d0f50bbde1852c695117a6220f4589
[ "MIT" ]
1
2022-02-03T00:35:14.000Z
2022-02-03T00:35:14.000Z
11,815
11,815
0.228692
[ [ [ "# Example: CanvasXpress boxplot Chart No. 11\n\nThis example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:\n\nhttps://www.canvasxpress.org/examples/boxplot-11.html\n\nThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.\n\nEverything required for the chart to render is included in the code below. Simply run the code block.", "_____no_output_____" ] ], [ [ "from canvasxpress.canvas import CanvasXpress \nfrom canvasxpress.js.collection import CXEvents \nfrom canvasxpress.render.jupyter import CXNoteBook \n\ncx = CanvasXpress(\n render_to=\"boxplot11\",\n data={\n \"y\": {\n \"smps\": [\n \"Var1\",\n \"Var2\",\n \"Var3\",\n \"Var4\",\n \"Var5\",\n \"Var6\",\n \"Var7\",\n \"Var8\",\n \"Var9\",\n \"Var10\",\n \"Var11\",\n \"Var12\",\n \"Var13\",\n \"Var14\",\n \"Var15\",\n \"Var16\",\n \"Var17\",\n \"Var18\",\n \"Var19\",\n \"Var20\",\n \"Var21\",\n \"Var22\",\n \"Var23\",\n \"Var24\",\n \"Var25\",\n \"Var26\",\n \"Var27\",\n \"Var28\",\n \"Var29\",\n \"Var30\",\n \"Var31\",\n \"Var32\",\n \"Var33\",\n \"Var34\",\n \"Var35\",\n \"Var36\",\n \"Var37\",\n \"Var38\",\n \"Var39\",\n \"Var40\",\n \"Var41\",\n \"Var42\",\n \"Var43\",\n \"Var44\",\n \"Var45\",\n \"Var46\",\n \"Var47\",\n \"Var48\",\n \"Var49\",\n \"Var50\",\n \"Var51\",\n \"Var52\",\n \"Var53\",\n \"Var54\",\n \"Var55\",\n \"Var56\",\n \"Var57\",\n \"Var58\",\n \"Var59\",\n \"Var60\"\n ],\n \"data\": [\n [\n 4.2,\n 11.5,\n 7.3,\n 5.8,\n 6.4,\n 10,\n 11.2,\n 11.2,\n 5.2,\n 7,\n 16.5,\n 16.5,\n 15.2,\n 17.3,\n 22.5,\n 17.3,\n 13.6,\n 14.5,\n 18.8,\n 15.5,\n 23.6,\n 18.5,\n 33.9,\n 25.5,\n 26.4,\n 32.5,\n 26.7,\n 21.5,\n 23.3,\n 29.5,\n 15.2,\n 21.5,\n 17.6,\n 9.7,\n 14.5,\n 10,\n 8.2,\n 9.4,\n 16.5,\n 9.7,\n 19.7,\n 23.3,\n 23.6,\n 26.4,\n 20,\n 25.2,\n 25.8,\n 21.2,\n 14.5,\n 27.3,\n 25.5,\n 26.4,\n 22.4,\n 24.5,\n 24.8,\n 30.9,\n 26.4,\n 27.3,\n 29.4,\n 23\n ]\n ],\n \"vars\": [\n \"len\"\n ]\n },\n \"x\": {\n \"supp\": [\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"VC\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\",\n \"OJ\"\n ],\n \"order\": [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10\n ],\n \"dose\": [\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2,\n 2\n ]\n }\n },\n config={\n \"axisAlgorithm\": \"rPretty\",\n \"axisTickScaleFontFactor\": 1.8,\n \"axisTitleFontStyle\": \"bold\",\n \"axisTitleScaleFontFactor\": 1.8,\n \"colorBy\": \"dose\",\n \"graphOrientation\": \"vertical\",\n \"graphType\": \"Boxplot\",\n \"groupingFactors\": [\n \"dose\"\n ],\n \"legendScaleFontFactor\": 1.8,\n \"showLegend\": True,\n \"smpLabelRotate\": 90,\n \"smpLabelScaleFontFactor\": 1.8,\n \"smpTitle\": \"dose\",\n \"smpTitleFontStyle\": \"bold\",\n \"smpTitleScaleFontFactor\": 1.8,\n \"stringSampleFactors\": [\n \"dose\"\n ],\n \"theme\": \"CanvasXpress\",\n \"title\": \"The Effect of Vitamin C on Tooth Growth in Guinea Pigs\",\n \"xAxis2Show\": False,\n \"xAxisMinorTicks\": False,\n \"xAxisTitle\": \"len\"\n },\n width=613,\n height=613,\n events=CXEvents(),\n after_render=[\n [\n \"switchNumericToString\",\n [\n \"dose\",\n True\n ]\n ]\n ],\n other_init_params={\n \"version\": 35,\n \"events\": False,\n \"info\": False,\n \"afterRenderInit\": False,\n \"noValidate\": True\n }\n)\n\ndisplay = CXNoteBook(cx) \ndisplay.render(output_file=\"boxplot_11.html\") \n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
cbc3634bf6de28f778ca8cee92ff908d6ca9eb01
6,046
ipynb
Jupyter Notebook
Colab_ArteMaisComp.ipynb
KinsleyDavis/Novo
4fcd5e112a5bd9fef7c1b0a8956c18122c928613
[ "MIT" ]
null
null
null
Colab_ArteMaisComp.ipynb
KinsleyDavis/Novo
4fcd5e112a5bd9fef7c1b0a8956c18122c928613
[ "MIT" ]
null
null
null
Colab_ArteMaisComp.ipynb
KinsleyDavis/Novo
4fcd5e112a5bd9fef7c1b0a8956c18122c928613
[ "MIT" ]
null
null
null
34.352273
232
0.523817
[ [ [ "<a href=\"https://colab.research.google.com/github/KinsleyDavis/Novo/blob/main/Colab_ArteMaisComp.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "def escolher_arquivo():\n import ipywidgets as widgets\n from IPython.display import clear_output\n import os\n import matplotlib.pyplot as plt\n !pip install chainer &> /dev/null \n !pip install cupy-cuda101==7.7.0\n \n !git clone https://github.com/artemaiscomp/chainer-fast-neuralstyle &> /dev/null\n from google.colab import files\n content_img = files.upload() \n \n CONTENT_IMAGE_FN = list(content_img)[0]\n CONTENT_IMAGE_FN_temp = CONTENT_IMAGE_FN.strip().replace(\" \", \"_\")\n \n if CONTENT_IMAGE_FN != CONTENT_IMAGE_FN_temp:\n os.rename(CONTENT_IMAGE_FN, CONTENT_IMAGE_FN_temp)\n CONTENT_IMAGE_FN = CONTENT_IMAGE_FN_temp\n \n #print(\"Nome do arquivo da imagem :\", CONTENT_IMAGE_FN)\n %matplotlib inline\n \n \n fig = plt.figure(figsize=(10, 10)) \n img = plt.imread(CONTENT_IMAGE_FN) \n plt.axis('off')\n #plt.title('Content image')\n plt.imshow(img)", "_____no_output_____" ], [ "#@title Clique no Play e após em \"Escolher Arquivo\" para escolher sua imagem a ser estilizada \nimport ipywidgets as widgets\nfrom IPython.display import clear_output\nimport os\nimport matplotlib.pyplot as plt\n!pip install chainer &> /dev/null\n!pip install cupy-cuda101 &> /dev/null\n \n!git clone https://github.com/artemaiscomp/chainer-fast-neuralstyle &> /dev/null\nfrom google.colab import files\ncontent_img = files.upload() \n \nCONTENT_IMAGE_FN = list(content_img)[0]\nCONTENT_IMAGE_FN_temp = CONTENT_IMAGE_FN.strip().replace(\" \", \"_\")\n \nif CONTENT_IMAGE_FN != CONTENT_IMAGE_FN_temp:\n os.rename(CONTENT_IMAGE_FN, CONTENT_IMAGE_FN_temp)\n CONTENT_IMAGE_FN = CONTENT_IMAGE_FN_temp\n \n #print(\"Nome do arquivo da imagem :\", CONTENT_IMAGE_FN)\n%matplotlib inline\n \n \nfig = plt.figure(figsize=(10, 10)) \nimg = plt.imread(CONTENT_IMAGE_FN) \nplt.axis('off')\n#plt.title('Content image')\nplt.imshow(img)", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "#@title Selecione a arte a ser aplicada.\nimport os, ipywidgets as widgets\nfrom IPython.display import clear_output\nmodel_files = [f for f in os.listdir('/content/chainer-fast-neuralstyle/models') if f.endswith('.model')]\nmodel=widgets.Dropdown(\noptions=model_files,\nvalue='hokusai.model',\ndescription='Modelo:',\ndisabled=False,\n)\nmodel", "_____no_output_____" ], [ "#@title Clique no botão Play e abaixo no botao OK para converter a imagem com o estilo escolhido.\nclear_output()\n#@title Clique no botão Play e abaixo no botao OK para converter a imagem com o estilo escolhido.\nfrom IPython.display import clear_output\nbutton = widgets.Button(description='OK')\n!pip install chainer &> /dev/null\n!pip install cupy-cuda101==7.7.0 &> /dev/null\nclear_output() \nout = widgets.Output()\ndef on_button_clicked(_):\n # \"linkar funcão com saída\"\n with out: \n !python chainer-fast-neuralstyle/generate.py $CONTENT_IMAGE_FN unique -m chainer-fast-neuralstyle/models/$model.value -o output.jpg --gpu 0 &> /dev/null\n fig = plt.figure(figsize=(10, 10))\n img = plt.imread('output.jpg')\n plt.axis('off')\n plt.title('imagem estilizada')\n plt.imshow(img)\n# unir butão e funcão juntos usando um métodos no butão\nbutton.on_click(on_button_clicked)\n# mostrar butão e sua saída juntos\nwidgets.VBox([button,out])", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cbc365658d94b4b2d9c2cc34703ae66a0f65df4a
258,048
ipynb
Jupyter Notebook
lectures/Arbitrage Pricing Theory.ipynb
NunoEdgarGFlowHub/research_public
824bcc0a2abcc1651031caf2d224c1c7e42328ee
[ "CC-BY-4.0" ]
null
null
null
lectures/Arbitrage Pricing Theory.ipynb
NunoEdgarGFlowHub/research_public
824bcc0a2abcc1651031caf2d224c1c7e42328ee
[ "CC-BY-4.0" ]
null
null
null
lectures/Arbitrage Pricing Theory.ipynb
NunoEdgarGFlowHub/research_public
824bcc0a2abcc1651031caf2d224c1c7e42328ee
[ "CC-BY-4.0" ]
null
null
null
526.628571
72,120
0.929129
[ [ [ "# Arbitrage Pricing Theory\n\nBy Evgenia \"Jenny\" Nitishinskaya, Delaney Granizo-Mackenzie, and Maxwell Margenot.\n\nPart of the Quantopian Lecture Series:\n\n* [www.quantopian.com/lectures](https://www.quantopian.com/lectures)\n* [github.com/quantopian/research_public](https://github.com/quantopian/research_public)\n\nNotebook released under the Creative Commons Attribution 4.0 License.\n\n---\n\nArbitrage pricing theory is a major asset pricing theory that relies on expressing the returns using a linear factor model:\n\n$$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \\ldots + b_{iK} F_K + \\epsilon_i$$\n\nThis theory states that if we have modelled our rate of return as above, then the expected returns obey\n\n$$ E(R_i) = R_F + b_{i1} \\lambda_1 + b_{i2} \\lambda_2 + \\ldots + b_{iK} \\lambda_K $$\n\nwhere $R_F$ is the risk-free rate, and $\\lambda_j$ is the risk premium - the return in excess of the risk-free rate - for factor $j$. This premium arises because investors require higher returns to compensate them for incurring risk. This generalizes the capital asset pricing model (CAPM), which uses the return on the market as its only factor.\n\nWe can compute $\\lambda_j$ by constructing a portfolio that has a sensitivity of 1 to factor $j$ and 0 to all others (called a <i>pure factor portfolio</i> for factor $j$), and measure its return in excess of the risk-free rate. Alternatively, we could compute the factor sensitivities for $K$ well-diversified (no asset-specific risk, i.e. $\\epsilon_p = 0$) portfolios, and then solve the resulting system of linear equations.", "_____no_output_____" ], [ "## Arbitrage\n\nThere are generally many, many securities in our universe. If we use different ones to compute the $\\lambda$s, will our results be consistent? If our results are inconsistent, there is an <i>arbitrage opportunity</i> (in expectation). Arbitrage is an operation that earns a profit without incurring risk and with no net investment of money, and an arbitrage opportunity is an opportunity to conduct such an operation. In this case, we mean that there is a risk-free operation with <i>expected</i> positive return that requires no net investment. It occurs when expectations of returns are inconsistent, i.e. risk is not priced consistently across securities.\n\nFor instance, there is an arbitrage opportunity in the following case: say there is an asset with expected rate of return 0.2 for the next year and a $\\beta$ of 1.2 with the market, while the market is expected to have a rate of return of 0.1, and the risk-free rate on 1-year bonds is 0.05. Then the APT model tells us that the expected rate of return on the asset should be\n\n$$ R_F + \\beta \\lambda = 0.05 + 1.2 (0.1 - 0.05) = 0.11$$\n\nThis does not agree with the prediction that the asset will have a rate of return of 0.2. So, if we buy \\$100 of our asset, short \\$120 of the market, and buy \\$20 of bonds, we will have invested no net money and are not exposed to any systematic risk (we are market-neutral), but we expect to earn $0.2 \\cdot 100 - 0.1 \\cdot 120 + 20 \\cdot 0.05 = 9$ dollars at the end of the year.\n\nThe APT assumes that these opportunities will be taken advantage of until prices shift and the arbitrage opportunities disappear. That is, it assumes that there are arbitrageurs who have sufficient amounts of patience and capital. This provides a justification for the use of empirical factor models in pricing securities: if the model were inconsistent, there would be an arbitrage opportunity, and so the prices would adjust.", "_____no_output_____" ], [ "##Goes Both Ways\n\nOften knowing $E(R_i)$ is incredibly difficult, but notice that this model tells us what the expected returns should be if the market is fully arbitraged. This lays the groundwork for long-short equity strategies based on factor model ranking systems. If you know what the expected return of an asset is given that the market is arbitraged, and you hypothesize that the market will be mostly arbitraged over the timeframe on which you are trading, then you can construct a ranking.\n\n##Long-Short Equity\n\nTo do this, estimate the expected return for each asset on the market, then rank them. Long the top percentile and short the bottom percentile, and you will make money on the difference in returns. Said another way, if the assets at the top of the ranking on average tend to make $5\\%$ more per year than the market, and assets at the bottom tend to make $5\\%$ less, then you will make $(M + 0.05) - (M - 0.05) = 0.10$ or $10\\%$ percent per year, where $M$ is the market return that gets canceled out.\n\nLong-short equity accepts that any individual asset is very difficult to model, relies on broad trends holding true. We can't accurately predict expected returns for an asset, but we can predict the expected returns for a group of 1000 assets as the errors average out.\n\nWe will have a full lecture on long-short models later.\n", "_____no_output_____" ], [ "##How many factors do you want?\n\nAs discussed in other lectures, noteably Overfitting, having more factors will explain more and more of your returns, but at the cost of being more and more fit to noise in your data. Do discover true signals and make good predictions going forward, you want to select as few parameters as possible that still explain a large amount of the variance in returns.", "_____no_output_____" ], [ "##Example: Computing Expected Returns for Two Assets", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom statsmodels import regression\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "Let's get some data.", "_____no_output_____" ] ], [ [ "start_date = '2014-06-30'\nend_date = '2015-06-30'\n\n# We will look at the returns of an asset one-month into the future to model future returns.\noffset_start_date = '2014-07-31'\noffset_end_date = '2015-07-31'\n\n# Get returns data for our assets\nasset1 = get_pricing('HSC', fields='price', start_date=offset_start_date, end_date=offset_end_date).pct_change()[1:]\nasset2 = get_pricing('MSFT', fields='price', start_date=offset_start_date, end_date=offset_end_date).pct_change()[1:]\n# Get returns for the market\nbench = get_pricing('SPY', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]\n# Use an ETF that tracks 3-month T-bills as our risk-free rate of return\ntreasury_ret = get_pricing('BIL', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]", "_____no_output_____" ], [ "# Define a constant to compute intercept\nconstant = pd.TimeSeries(np.ones(len(asset1.index)), index=asset1.index)\n\ndf = pd.DataFrame({'R1': asset1,\n 'R2': asset2,\n 'SPY': bench,\n 'RF': treasury_ret,\n 'Constant': constant})\ndf = df.dropna()", "_____no_output_____" ] ], [ [ "We'll start by computing static regressions over the whole time period.", "_____no_output_____" ] ], [ [ "OLS_model = regression.linear_model.OLS(df['R1'], df[['SPY', 'RF', 'Constant']])\nfitted_model = OLS_model.fit()\nprint 'p-value', fitted_model.f_pvalue\nprint fitted_model.params\nR1_params = fitted_model.params\n\nOLS_model = regression.linear_model.OLS(df['R2'], df[['SPY', 'RF', 'Constant']])\nfitted_model = OLS_model.fit()\nprint 'p-value', fitted_model.f_pvalue\nprint fitted_model.params\nR2_params = fitted_model.params", "p-value 6.68669273225e-26\nSPY 1.768275\nRF -8.594705\nConstant -0.002203\ndtype: float64\np-value 6.48439859144e-23\nSPY 1.208441\nRF 5.352250\nConstant -0.000133\ndtype: float64\n" ] ], [ [ "As we've said before in other lectures, these numbers don't tell us too much by themselves. We need to look at the distribution of estimated coefficients and whether it's stable. Let's look at the rolling 100-day regression to see how it looks.", "_____no_output_____" ] ], [ [ "model = pd.stats.ols.MovingOLS(y = df['R1'], x=df[['SPY', 'RF']], \n window_type='rolling', \n window=100)\nrolling_parameter_estimates = model.beta\nrolling_parameter_estimates.plot();\n\nplt.hlines(R1_params['SPY'], df.index[0], df.index[-1], linestyles='dashed', colors='blue')\nplt.hlines(R1_params['RF'], df.index[0], df.index[-1], linestyles='dashed', colors='green')\nplt.hlines(R1_params['Constant'], df.index[0], df.index[-1], linestyles='dashed', colors='red')\n\nplt.title('Asset1 Computed Betas');\nplt.legend(['Market Beta', 'Risk Free Beta', 'Intercept', 'Market Beta Static', 'Risk Free Beta Static', 'Intercept Static']);", "_____no_output_____" ], [ "model = pd.stats.ols.MovingOLS(y = df['R2'], x=df[['SPY', 'RF']], \n window_type='rolling', \n window=100)\nrolling_parameter_estimates = model.beta\nrolling_parameter_estimates.plot();\n\nplt.hlines(R2_params['SPY'], df.index[0], df.index[-1], linestyles='dashed', colors='blue')\nplt.hlines(R2_params['RF'], df.index[0], df.index[-1], linestyles='dashed', colors='green')\nplt.hlines(R2_params['Constant'], df.index[0], df.index[-1], linestyles='dashed', colors='red')\n\nplt.title('Asset2 Computed Betas');\nplt.legend(['Market Beta', 'Risk Free Beta', 'Intercept', 'Market Beta Static', 'Risk Free Beta Static', 'Intercept Static']);", "_____no_output_____" ] ], [ [ "It might seem like the market betas are stable here, but let's zoom in to check.", "_____no_output_____" ] ], [ [ "model = pd.stats.ols.MovingOLS(y = df['R2'], x=df[['SPY', 'RF']], \n window_type='rolling', \n window=100)\nrolling_parameter_estimates = model.beta\nrolling_parameter_estimates['SPY'].plot();\n\nplt.hlines(R2_params['SPY'], df.index[0], df.index[-1], linestyles='dashed', colors='blue')\n\nplt.title('Asset2 Computed Betas');\nplt.legend(['Market Beta', 'Market Beta Static']);", "_____no_output_____" ] ], [ [ "As you can see, the plot scale massively affects how we perceive estimate quality.", "_____no_output_____" ], [ "##Predicting the Future\n\nLet's use this model to predict future prices for these assets.", "_____no_output_____" ] ], [ [ "start_date = '2014-07-25'\nend_date = '2015-07-25'\n\n# We will look at the returns of an asset one-month into the future to model future returns.\noffset_start_date = '2014-08-25'\noffset_end_date = '2015-08-25'\n\n# Get returns data for our assets\nasset1 = get_pricing('HSC', fields='price', start_date=offset_start_date, end_date=offset_end_date).pct_change()[1:]\n# Get returns for the market\nbench = get_pricing('SPY', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]\n# Use an ETF that tracks 3-month T-bills as our risk-free rate of return\ntreasury_ret = get_pricing('BIL', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]\n\n\n# Define a constant to compute intercept\nconstant = pd.TimeSeries(np.ones(len(asset1.index)), index=asset1.index)\n\ndf = pd.DataFrame({'R1': asset1,\n 'SPY': bench,\n 'RF': treasury_ret,\n 'Constant': constant})\ndf = df.dropna()", "_____no_output_____" ] ], [ [ "We'll perform a historical regression to get our model parameter estimates.", "_____no_output_____" ] ], [ [ "OLS_model = regression.linear_model.OLS(df['R1'], df[['SPY', 'RF', 'Constant']])\nfitted_model = OLS_model.fit()\nprint 'p-value', fitted_model.f_pvalue\nprint fitted_model.params\n\nb_SPY = fitted_model.params['SPY']\nb_RF = fitted_model.params['RF']\na = fitted_model.params['Constant']", "p-value 3.74649506793e-24\nSPY 1.738003\nRF -7.382430\nConstant -0.002555\ndtype: float64\n" ] ], [ [ "Get the factor data for the last month so we can predict the next month.", "_____no_output_____" ] ], [ [ "start_date = '2015-07-25'\nend_date = '2015-08-25'\n\n# Get returns for the market\nlast_month_bench = get_pricing('SPY', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]\n# Use an ETF that tracks 3-month T-bills as our risk-free rate of return\nlast_month_treasury_ret = get_pricing('BIL', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]", "_____no_output_____" ] ], [ [ "Make our predictions.", "_____no_output_____" ] ], [ [ "predictions = b_SPY * last_month_bench + b_RF * last_month_treasury_ret + a\npredictions.index = predictions.index + pd.DateOffset(months=1)", "_____no_output_____" ], [ "plt.plot(asset1.index[-30:], asset1.values[-30:], 'b-')\nplt.plot(predictions.index, predictions, 'b--')\nplt.ylabel('Returns')\nplt.legend(['Actual', 'Predicted']);", "_____no_output_____" ] ], [ [ "Of course, this analysis hasn't yet told us anything about the quality of our predictions. To check the quality of our predictions we need to use techniques such as out of sample testing or cross-validation. For the purposes of long-short equity ranking systems, the Spearman Correlation lecture details a way to check the quality of a ranking system.\n\n##Important Note!\n\nAgain, any of these individual predictions will probably be inaccurate. Industry-quality modeling makes predictions for thousands of assets and relies on broad tends holding. If I told you that I have a predictive model with a 51% success rate, you would not make one prediction and bet all your money on it. You would make thousands of predictions and divide your money between them.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cbc373c788956418e6f967d1486e82937e7ac2b4
298,176
ipynb
Jupyter Notebook
week7/week7.ipynb
suhas1999/EE2703
e508f61d7af0c2445c6b30c465eca3fad455f853
[ "MIT" ]
null
null
null
week7/week7.ipynb
suhas1999/EE2703
e508f61d7af0c2445c6b30c465eca3fad455f853
[ "MIT" ]
null
null
null
week7/week7.ipynb
suhas1999/EE2703
e508f61d7af0c2445c6b30c465eca3fad455f853
[ "MIT" ]
null
null
null
318.904813
59,688
0.925883
[ [ [ "<h1 align=\"center\"> Circuit Analysis Using Sympy</h1>\n<h2 align=\"center\"> Assignment 7</h2>\n<h3 align=\"center\"> M V A Suhas kumar,EE17B109</h3>\n<h4 align=\"center\">March 16,2019 </h4>", "_____no_output_____" ], [ "# Introduction\nIn this assignment, we use Sympy to analytically solve a matrix equation governing an analog circuit. We look at two circuits, an active low pass filter and an active high pass filter. We create matrices using node equations for the circuits in sympy, and then solve the equations analytically. We then convert the resulting sympy solution into a numpy function which can be called. We then use the signals toolbox we studied in the last assignment to understand the responses of the two circuits to various inputs.", "_____no_output_____" ], [ "Importing required packages", "_____no_output_____" ] ], [ [ "\nfrom sympy import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as sp\nfrom pylab import *\nfrom IPython.display import *\n\n\n", "_____no_output_____" ] ], [ [ "# Low pass Filter\n", "_____no_output_____" ], [ "![Circuit1](circuit1.png)\n", "_____no_output_____" ], [ "where G =1.586 and R1 = R2 = 10kΩ and C1=C2=10pF. This gives a 3dB Butter-worth filter with cutoff frequency of 1/2πMHz.\n\nCircuit Equations are as follows:\n$$V_{m}=\\frac{V_{o}}{G}$$ \n$$ V_{p} =V_{1} \\frac{1}{1+s R_{2}C_{2}}$$\n$$ V_{o} = G(V_{p} - V_{m})$$\n$$\\frac{V_{i}-V_{1}}{R_{1}} + \\frac{V_{p}-V_{1}}{R_{2}} + s C_{1}(V_{0}-V_{1}) = 0$$\nSolving the above equations with approxmtion gives\n\n$$ V_{o} \\approx \\frac{V_{i}}{s R_{1} C_{1}}$$\n\nWe would like to solve this in Python and also get (and plot) the exact result. For this we need the sympy module.", "_____no_output_____" ], [ "To solve the equtions exactly we use matrix method of solving:\n", "_____no_output_____" ] ], [ [ "init_printing()\nR1,R2,C1,C2,G = symbols(\"R1 R2 C1 C2 G\")\nV1,Vp,Vm,Vo,Vi = symbols(\"V1 Vp Vm Vo Vi\")\ns = symbols(\"s\")\nA = Matrix([[0,0,1,-1/G],\n [-1/(1+s*R2*C2),1,0,0],\n [0,-G,G,1],\n [-1/R1-1/R2-s*C1,1/R2,0,s*C1]])\nM = Matrix([V1,Vp,Vm,Vo])\nb = Matrix([0,0,0,Vi/R1])\ndisplay(Eq(MatMul(A,M),b))", "_____no_output_____" ] ], [ [ "Solving the above matrix yield exact result", "_____no_output_____" ], [ "Function defining low pass filter:", "_____no_output_____" ] ], [ [ "def lowpass(R1=10**4,R2=10**4,C1=10**-11,C2=10**-11,G=1.586,Vi=1):\n s=symbols(\"s\")\n A=Matrix([[0,0,1,-1/G],\n [-1/(1+s*R2*C2),1,0,0],\n [0,-G,G,1],\n [-1/R1-1/R2-s*C1,1/R2,0,s*C1]])\n b=Matrix([0,0,0,Vi/R1])\n V = A.inv()*b\n return(A,b,V)\n ", "_____no_output_____" ] ], [ [ "Function which can take input in laplace domain or time domain and give the output of low pass filter:", "_____no_output_____" ] ], [ [ "def low_pass_output(laplace_fn = None,time_fn=None,t=np.linspace(0,1e-5,1e5),C=10**-11):\n A,b,V = lowpass(C1=C,C2=C)\n v_low_pass = V[-1]\n temp = expand(simplify(v_low_pass))\n n,d = fraction(temp)\n n,d = Poly(n,s),Poly(d,s)\n num,den = n.all_coeffs(),d.all_coeffs()\n H_v_low_pass = sp.lti([-float(f) for f in num],[float(f) for f in den])\n if laplace_fn !=None:\n temp = expand(simplify(laplace_fn))\n n,d = fraction(temp)\n n,d = Poly(n,s),Poly(d,s)\n num,den = n.all_coeffs(),d.all_coeffs()\n lap = sp.lti([float(f) for f in num],[float(f) for f in den])\n t,u = sp.impulse(lap,None,t)\n else:\n u = time_fn\n t,V_out,svec = sp.lsim(H_v_low_pass,u,t)\n return (t,V_out)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:1: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "# High pass filter", "_____no_output_____" ], [ "![High pass filter](high.png)", "_____no_output_____" ], [ "values you can use are R1=R3=10kΩ, C1=C2=1nF, and G=1.586\n\nCircuit Equations are as follows:\n$$V_{n}=\\frac{V_{o}}{G}$$ \n$$ V_{p} =V_{1} \\frac{s R_{3}C_{2}}{1+s R_{3}C_{2}}$$\n$$ V_{o} = G(V_{p} - V_{n})$$\n$$(V_{1}-V_{i})sC_{1} + \\frac{(V_{1}-V_{o})}{R_{1}} + (V_{i}-V_{p})sC_{2} = 0 $$", "_____no_output_____" ] ], [ [ "R1, R3, C1, C2, G, Vi = symbols('R_1 R_3 C_1 C_2 G V_i')\nV1,Vn,Vp,Vo = symbols('V_1 V_n V_p V_o')\nx=Matrix([V1,Vn,Vp,Vo])\n\nA=Matrix([[0,-1,0,1/G],\n [s*C2*R3/(s*C2*R3+1),0,-1,0],\n [0,G,-G,1],\n [-s*C2-1/R1-s*C1,0,s*C2,1/R1]])\n\nb=Matrix([0,0,0,-Vi*s*C1])\ninit_printing\ndisplay(Eq(MatMul(A,x),b))", "_____no_output_____" ] ], [ [ "Function defining high pass filter:", "_____no_output_____" ] ], [ [ "def highpass(R1=10**4,R3=10**4,C1=10**-9,C2=10**-9,G=1.586,Vi=1):\n s= symbols(\"s\")\n A=Matrix([[0,-1,0,1/G],\n [s*C2*R3/(s*C2*R3+1),0,-1,0],\n [0,G,-G,1],\n [-s*C2-1/R1-s*C1,0,s*C2,1/R1]])\n\n b=Matrix([0,0,0,-Vi*s*C1])\n V =A.inv() * b\n return (A,b,V)\n ", "_____no_output_____" ] ], [ [ "Function which can take input in laplace domain or time domain and give the output of high pass filter:", "_____no_output_____" ] ], [ [ "\ndef high_pass_output(laplace_fn = None,time_fn=None,t=np.linspace(0,1e-4,1e5),C=10**-11):\n A,b,V = highpass(C1=C,C2=C)\n v_high_pass = V[-1]\n temp = expand(simplify(v_high_pass))\n n,d = fraction(temp)\n n,d = Poly(n,s),Poly(d,s)\n num,den = n.all_coeffs(),d.all_coeffs()\n H_v_high_pass = sp.lti([float(f) for f in num],[float(f) for f in den])\n if laplace_fn !=None:\n temp = expand(simplify(laplace_fn))\n n,d = fraction(temp)\n n,d = Poly(n,s),Poly(d,s)\n num,den = n.all_coeffs(),d.all_coeffs()\n lap = sp.lti([float(f) for f in num],[float(f) for f in den])\n t,u = sp.impulse(lap,None,t)\n else:\n u = time_fn\n t,V_out,svec = sp.lsim(H_v_high_pass,u,t)\n return (t,V_out)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ] ], [ [ "# Question1", "_____no_output_____" ], [ "Step Response for low pass filter", "_____no_output_____" ] ], [ [ "t,V_low_step = low_pass_output(laplace_fn=1/s)", "_____no_output_____" ], [ "plt.plot(t,V_low_step)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$Step\\ Response\\ V_{o}(t)$\",size=14)\nplt.title(\"Step Response When Capacitance = 10pF in low pass filter\")\nplt.show()", "_____no_output_____" ] ], [ [ "Step response is starting from zero and reaching 0.793 at steady state.This is because DC gain oftransfer function is 0.793.Initial value is 0 because AC gain of low pass filter is zero(impulse can be assumed as High frequency signal and we know low pass filter dosen't pass high frequency signal). ", "_____no_output_____" ], [ "# Question2", "_____no_output_____" ], [ "Finding Output when input signal is $$(sin(2000πt)+cos(2×106πt))u_{o}(t)$$", "_____no_output_____" ] ], [ [ "t = np.linspace(0,1e-3,1e5)\nplt.plot(t,np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t))\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$V_{i}(t)$\",size=14)\nplt.title(\"Mixed frequency input\")\nplt.show()", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:1: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "Band is high frequency wave and envolope is the low frequency wave", "_____no_output_____" ] ], [ [ "\nt = linspace(0,1e-5,1e5)\nt,vout = low_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ], [ "plt.plot(t,vout)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$V_{o}(t)$\",size=14)\nplt.title(\"Output for mixed frequency Sinusoid in lowpass filter in transient time\")\nplt.show()", "_____no_output_____" ] ], [ [ "From above we can clearly see that Output is superposition of High Amplitude low frequency wave and Low amplitude High frquency wave(Since Low pass filter attenuates the High frequencies) ", "_____no_output_____" ] ], [ [ "\nt = linspace(0,1e-5,1e5)\nt,vout = high_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ], [ "plt.plot(t,vout)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$V_{o}(t)$\",size=14)\nplt.title(\"Output for mixed frequency Sinusoid in High pass filter in transient time\")\nplt.show()", "_____no_output_____" ] ], [ [ "The plot which is appearing to be band(closely placed lines) is superposition of High Amplitude High frequency wave and Low amplitude Low frquency wave(Since High pass filter attenuates the Low frequencies) which inturn appears to be non distorted sine wave.", "_____no_output_____" ] ], [ [ "\nt = linspace(0,1e-3,1e5)\nt,vout = low_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ], [ "plt.plot(t,vout)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$V_{o}(t)$\",size=14)\nplt.title(\"Output for mixed frequency Sinusoid in lowpass filter in steady time\")\nplt.show()", "_____no_output_____" ] ], [ [ "From graph we can see frequency is close to 1000Hz(which is low frquency input)", "_____no_output_____" ] ], [ [ "\nt = linspace(0,1e-4,1e5)\nt,vout = high_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ], [ "plt.plot(t,vout)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$V_{o}(t)$\",size=14)\nplt.title(\"Output for mixed frequency Sinusoid in High pass filter in steay time\")\nplt.show()", "_____no_output_____" ] ], [ [ "From graph we can see frequency is close to 1000KHz(which is high frquency input)", "_____no_output_____" ], [ "# Question 3,4", "_____no_output_____" ], [ "Damped Sinusoid -----> $exp(-300t)sin(10^{6}t)$", "_____no_output_____" ] ], [ [ "\nt = linspace(0,1e-3,1e6)\nf = np.exp(-3000*t) * np.sin(10**6 *t)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ], [ "plt.title(\"High frequency damped sinusoid\")\nplt.xlabel(\"$t$\")\nplt.ylabel(\"$v_i(t)$\",size=20)\nplt.plot(t,f)\nplt.grid()\nplt.show()", "_____no_output_____" ], [ "\nt = linspace(0,1e-3,1e6)\nt,vout = high_pass_output(time_fn=f,t=t,C=10**-9)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ], [ "plt.plot(t,vout)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$V_{o}(t)$\",size=14)\nplt.title(\"Output for High frequency damped input in High pass filter\")\nplt.show()", "_____no_output_____" ] ], [ [ "From above graph we can clearly see that High pass filter passed high frequency sinusoid with out attenuating much.(Since property of high pass filter)", "_____no_output_____" ] ], [ [ "\nt = linspace(0,1e-3,1e6)\nt,vout = low_pass_output(time_fn=f,t=t,C=10**-9)", "/home/suhas/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n \n" ], [ "plt.plot(t,vout)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$V_{o}(t)$\",size=14)\nplt.title(\"Output for High frequency damped input in low pass filter\")\nplt.show()", "_____no_output_____" ] ], [ [ "From above graph Low pass filter quickly attenuated the High frequency Sinusoid and gives distorted Output", "_____no_output_____" ], [ "# Question 5", "_____no_output_____" ] ], [ [ "t,V_high_step = high_pass_output(laplace_fn=1/s,C=10**-9)", "_____no_output_____" ], [ "plt.plot(t,V_high_step)\nplt.grid(True)\nplt.xlabel(\"t ------>\",size=14)\nplt.ylabel(r\"$Step\\ Response\\ V_{o}(t)$\",size=14)\nplt.title(\"Step Response When Capacitance = 1nF in high pass filter\")\nplt.show()", "_____no_output_____" ] ], [ [ "Step response here saturates at zero and this is because DC gain of High pass filter is 0. We can clearly see from graph that it starts from 0.793 and this because AC gain of transfer function at high frequencies is 0.793(Step can assumed as infinite frequency signal and we know high pass filter only allows high frequency signals)\n\nstep response overshoots the steady state value of 0, reaches an\nextremum, then settles back to 0, unlike the response of the low pass filter which steadily\napproaches the steady state value with no extrema. This occurs because of the presence of\nzeros at the origin in the transfer function of the high pass filter(which imply that the DC\ngain is 0). Since the steady state value of the step response is 0, the total signed area under\nthe curve of the impulse response must also be 0. This means that the impulse response must\nequal zero at one or more time instants. Since the impulse response is the derivative of the\nstep response, this therefore means that the step response must have at least one extremum.\nThis explains the behaviour of the step response of the high pass filter.", "_____no_output_____" ], [ "# Conclusions:", "_____no_output_____" ], [ "The low pass filter responds by letting the low frequency sinusoid pass through without\nmuch additional attenuation. The output decays as the input also decays.\n\nThe high pass filter responds by quickly attenuating the input. Notice that the time scales\nshow that the high pass filter response is orders of magnitudes faster than the low pass\nresponse. This is because the input frequency is below the cutoff frequency, so the output\ngoes to 0 very fast.\n\nIn conclusion, the sympy module has allowed us to analyse quite complicated circuits by\nanalytically solving their node equations. We then interpreted the solutions by plotting time\ndomain responses using the signals toolbox. Thus, sympy combined with the scipy.signal\nmodule is a very useful toolbox for analyzing complicated systems like the active filters in\nthis assignment.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cbc37c653be119bc37e215611881e832d3296d75
4,598
ipynb
Jupyter Notebook
docs/interpret_docs/pdp.ipynb
eddy-geek/interpret
d75e1b1f53b6fbd78611a08b856bfe59dc1d8e2f
[ "MIT" ]
2,674
2019-10-03T14:14:35.000Z
2022-03-31T13:40:49.000Z
docs/interpret_docs/pdp.ipynb
eddy-geek/interpret
d75e1b1f53b6fbd78611a08b856bfe59dc1d8e2f
[ "MIT" ]
257
2019-11-08T19:22:56.000Z
2022-03-29T20:09:07.000Z
docs/interpret_docs/pdp.ipynb
eddy-geek/interpret
d75e1b1f53b6fbd78611a08b856bfe59dc1d8e2f
[ "MIT" ]
367
2019-10-31T15:33:21.000Z
2022-03-31T13:40:50.000Z
30.653333
415
0.624837
[ [ [ "# Partial Dependence Plot\n\n## Summary\n\nPartial dependence plots visualize the dependence between the response and a set of target features (usually one or two), marginalizing over all the other features. For a perturbation-based interpretability method, it is relatively quick. PDP assumes independence between the features, and can be misleading interpretability-wise when this is not met (e.g. when the model has many high order interactions).\n\n## How it Works\n\nThe PDP module for `scikit-learn` {cite}`pedregosa2011scikit` provides a succinct description of the algorithm [here](https://scikit-learn.org/stable/modules/partial_dependence.html).\n\nChristoph Molnar's \"Interpretable Machine Learning\" e-book {cite}`molnar2020interpretable` has an excellent overview on partial dependence that can be found [here](https://christophm.github.io/interpretable-ml-book/pdp.html).\n\nThe conceiving paper \"Greedy Function Approximation: A Gradient Boosting Machine\" {cite}`friedman2001greedy` provides a good motivation and definition.", "_____no_output_____" ], [ "## Code Example\n\nThe following code will train a blackbox pipeline for the breast cancer dataset. Aftewards it will interpret the pipeline and its decisions with Partial Dependence Plots. The visualizations provided will be for global explanations.", "_____no_output_____" ] ], [ [ "from interpret import set_visualize_provider\nfrom interpret.provider import InlineProvider\nset_visualize_provider(InlineProvider())", "_____no_output_____" ], [ "from sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\n\nfrom interpret import show\nfrom interpret.blackbox import PartialDependence\n\nseed = 1\nX, y = load_breast_cancer(return_X_y=True, as_frame=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=seed)\n\npca = PCA()\nrf = RandomForestClassifier(n_estimators=100, n_jobs=-1)\n\nblackbox_model = Pipeline([('pca', pca), ('rf', rf)])\nblackbox_model.fit(X_train, y_train)\n\npdp = PartialDependence(predict_fn=blackbox_model.predict_proba, data=X_train)\npdp_global = pdp.explain_global()\n\nshow(pdp_global)", "_____no_output_____" ] ], [ [ "## Further Resources\n\n- [Paper link to conceiving paper](https://projecteuclid.org/download/pdf_1/euclid.aos/1013203451)\n- [scikit-learn on their PDP module](https://scikit-learn.org/stable/modules/partial_dependence.html)", "_____no_output_____" ], [ "## Bibliography\n\n```{bibliography} references.bib\n:style: unsrt\n:filter: docname in docnames\n```", "_____no_output_____" ], [ "## API\n\n### PartialDependence\n\n```{eval-rst}\n.. autoclass:: interpret.blackbox.PartialDependence\n :members:\n :inherited-members:\n```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cbc37efb1fed206fa4c0380106d7dbcf1b3853bc
12,030
ipynb
Jupyter Notebook
1_Descente de gradient.ipynb
fradav/Cours-Deep-Learning
3e4daceecb97f57d1d5df6fb174eef117967e17c
[ "MIT" ]
null
null
null
1_Descente de gradient.ipynb
fradav/Cours-Deep-Learning
3e4daceecb97f57d1d5df6fb174eef117967e17c
[ "MIT" ]
null
null
null
1_Descente de gradient.ipynb
fradav/Cours-Deep-Learning
3e4daceecb97f57d1d5df6fb174eef117967e17c
[ "MIT" ]
null
null
null
22.113971
287
0.512219
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Descente-de-Gradient\" data-toc-modified-id=\"Descente-de-Gradient-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Descente de Gradient</a></span></li></ul></div>", "_____no_output_____" ], [ "Descente de Gradient\n================\n\nL'[algorithme de la descente de gradient](http://en.wikipedia.org/wiki/Gradient_descent) est un algorithme d'optimisation pour trouver un minimum local d'une fonction scalaire à partir d'un point donné, en effectuant de pas successifs dans la direction de l'inverse du gradient.\n\nPour une fonction $f: \\mathbb{R}^n \\to \\mathbb{R}$, partant d'un point $\\mathbf{x}_0$, la méthode calcule les points successifs dans le domaine de la fonction\n\n$$\n \\mathbf{x}_{n + 1} = \\mathbf{x}_n - \\eta \\left( \\nabla f \\right)_{\\mathbf{x}_n} \\; ,\n$$", "_____no_output_____" ], [ "où \n\n$\\eta > 0$ est une taille de /pas/ suffisamment petite et and $\\left( \\nabla f \\right)_{\\mathbf{x}_n}$ est le [gradient](http://en.wikipedia.org/wiki/Gradient) de $f$ évaluée au point $\\mathbf{x}_n$. Les valeurs successives de la fonction \n\n$$\n f(\\mathbf{x}_0) \\ge f(\\mathbf{x}_1) \\ge f(\\mathbf{x}_2) \\ge \\dots\n$$\n\nvont décroître globalement et la séquence $\\mathbf{x}_n$ converge habituellement vers un minimum local.", "_____no_output_____" ], [ "En pratique utiliser un pas de taille fixe $\\eta$ est particulièrement inefficace et la plupart des algorithmes vont plutôt chercher à l'adapter à chaque itération.\n\nLe code suivant implémente la descente de gradient avec un pas de taille fixe s'arrétant quand la [norme](http://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) du gradient descend en dessous d'un certain seuil.", "_____no_output_____" ], [ "Attention par défaut, pytorch *accumule* les gradients à chaque passe inverse!\nC'est pourquoi il faut le remettre à zéro à chaque itération.", "_____no_output_____" ], [ "Commençons par importer les suspects usuels", "_____no_output_____" ] ], [ [ "import torch\nimport numpy as np\nimport math", "_____no_output_____" ] ], [ [ "Illustrons l'accumulation du gradient", "_____no_output_____" ] ], [ [ "x1 = torch.empty(2, requires_grad=True)\nx1", "_____no_output_____" ], [ "f1 = torch.pow(x1[0],2)\nf1", "_____no_output_____" ], [ "# x1.grad.zero_()\nf1.backward(retain_graph=True)\nx1.grad", "_____no_output_____" ], [ "x1.data.sub_(torch.ones(2))", "_____no_output_____" ] ], [ [ "Maintenant essayons d'implémenter une descente de gradient pour la fonction\n$f(X) = sin(x_1) + cos(x_2)$ ", "_____no_output_____" ] ], [ [ "x0 = torch.ones(2,requires_grad=True)", "_____no_output_____" ], [ "f = torch.sin(x0[0]) + torch.cos(x0[1])\nf", "_____no_output_____" ] ], [ [ "On va avoir besoin de :\n```python\nf.backward(...) # Pour le calcul du gradient proprement dit\nx.grad.data.zero_() # pour la remise à zéro du gradient après une itération\nnp.linalg.norm(x.grad.numpy()) # pour contrôler la convergence (norme l2)\n```\n\nOn veut une fonction gd qui prend en argument $f, x, \\eta, \\epsilon$", "_____no_output_____" ] ], [ [ "def gd(f, x, eta, epsilon):\n while 1:\n f.backward(retain_graph=True)\n# print(np.linalg.norm(x.grad.numpy()))\n if (torch.norm(x.grad) < epsilon): \n break\n else:\n x.data.sub_(eta * x.grad.data)\n x.grad.data.zero_()", "_____no_output_____" ], [ "gd(f, x0, 0.9, 0.00001)", "_____no_output_____" ], [ "print(x0.data)\nprint(f.data)", "tensor([-1.5708, 3.1416])\ntensor(1.3818)\n" ] ], [ [ "Cette fonction ne permet pas d'avoir la valeur de $f$ directement sur le résultat. Il vaut mieux utiliser une fonction qu'un noeud de notre graphe comme argument de notre descente de gradient.", "_____no_output_____" ] ], [ [ "x0 = torch.ones(2,requires_grad=True)\nx0", "_____no_output_____" ], [ "def f(x):\n return x[0].sin() + x[1].cos()", "_____no_output_____" ], [ "def gd(f, x, eta, epsilon):\n fval = f(x)\n while 1: \n fval.backward(retain_graph=True) # On a pas besoin de recalculer f(x) dans ce cas\n # seul le gradient nous intéresse ici.\n # notez qu'en pratique ce n'est pratiquement \n # jamais le cas.\n if (torch.norm(x.grad) < epsilon):\n break\n else:\n x.data.sub_(eta * x.grad.data)\n x.grad.data.zero_()", "_____no_output_____" ], [ "gd(f, x0, 0.9, 0.00001)", "_____no_output_____" ], [ "print(x0)\nprint(f(x0))", "tensor([-1.5708, 3.1416], requires_grad=True)\ntensor(-2., grad_fn=<AddBackward0>)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cbc3868bc3e1c0a8d147b48ec77e38208c9533ee
25,554
ipynb
Jupyter Notebook
notebooks/tlh_1.01_clean_test.ipynb
TheKaggleKings/kaggle-data-science-bowl
d21a6871d6857f3dff7409f6d4f80df51398034f
[ "FTL" ]
null
null
null
notebooks/tlh_1.01_clean_test.ipynb
TheKaggleKings/kaggle-data-science-bowl
d21a6871d6857f3dff7409f6d4f80df51398034f
[ "FTL" ]
null
null
null
notebooks/tlh_1.01_clean_test.ipynb
TheKaggleKings/kaggle-data-science-bowl
d21a6871d6857f3dff7409f6d4f80df51398034f
[ "FTL" ]
null
null
null
40.179245
196
0.387337
[ [ [ "import pandas as pd\nimport sys", "_____no_output_____" ], [ "test = pd.read_csv(\n '../data/raw/test.csv', \n parse_dates = ['timestamp'],\n)\nprint(test.dtypes)\nprint(sys.getsizeof(test) / 1024 / 1024)\nprint(test.memory_usage() / 1024 / 1024)\ntest", "event_id object\ngame_session object\ntimestamp datetime64[ns, UTC]\nevent_data object\ninstallation_id object\nevent_count int64\nevent_code int64\ngame_time int64\ntitle object\ntype object\nworld object\ndtype: object\n771.4811239242554\nIndex 0.000122\nevent_id 8.822739\ngame_session 8.822739\ntimestamp 8.822739\nevent_data 8.822739\ninstallation_id 8.822739\nevent_count 8.822739\nevent_code 8.822739\ngame_time 8.822739\ntitle 8.822739\ntype 8.822739\nworld 8.822739\ndtype: float64\n" ], [ "df = test.copy()\ndf = df.astype({'world': 'category', 'type': 'category', 'title': 'category', 'event_code': 'category', 'installation_id': 'category', 'game_session': 'category', 'event_id': 'category'})\ndf.loc[:, 'game_time'] = pd.to_numeric(df.game_time, downcast = 'integer')\ndf.loc[:, 'event_count'] = pd.to_numeric(df.event_count, downcast = 'integer')\nprint(sys.getsizeof(df) / 1024 / 1024)\nprint(df.memory_usage() / 1024 / 1024)\ndf", "315.6400394439697\nIndex 0.000122\nevent_id 2.218235\ngame_session 3.672703\ntimestamp 8.822739\nevent_data 8.822739\ninstallation_id 2.252377\nevent_count 2.205685\nevent_code 1.104383\ngame_time 4.411369\ntitle 1.104399\ntype 1.103025\nworld 1.103025\ndtype: float64\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cbc38de7771a979dec0dfc7438bd4492b41d1fea
17,882
ipynb
Jupyter Notebook
A - Using TorchText with Your Own Datasets.ipynb
Andrews2017/pytorch-sentiment-analysis
bc5b2e0221abc289aee789c02812bb57495be7ff
[ "MIT" ]
3,595
2018-03-26T19:44:39.000Z
2022-03-31T08:45:12.000Z
A - Using TorchText with Your Own Datasets.ipynb
v-mk-s/pytorch-sentiment-analysis
b4efbefa47672174394a8b6a27d4e7bc193bc224
[ "MIT" ]
105
2018-09-07T14:49:27.000Z
2022-03-01T05:43:11.000Z
A - Using TorchText with Your Own Datasets.ipynb
v-mk-s/pytorch-sentiment-analysis
b4efbefa47672174394a8b6a27d4e7bc193bc224
[ "MIT" ]
1,066
2018-06-12T00:58:01.000Z
2022-03-27T09:03:54.000Z
39.561947
531
0.562745
[ [ [ "# A - Using TorchText with Your Own Datasets\n\nIn this series we have used the IMDb dataset included as a dataset in TorchText. TorchText has many canonical datasets included for classification, language modelling, sequence tagging, etc. However, frequently you'll be wanting to use your own datasets. Luckily, TorchText has functions to help you to this.\n\nRecall in the series, we:\n- defined the `Field`s\n- loaded the dataset\n- created the splits\n\nAs a reminder, the code is shown below:\n\n```python\nTEXT = data.Field()\nLABEL = data.LabelField()\n\ntrain_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\n\ntrain_data, valid_data = train_data.split()\n```", "_____no_output_____" ], [ "There are three data formats TorchText can read: `json`, `tsv` (tab separated values) and`csv` (comma separated values).\n\n**In my opinion, the best formatting for TorchText is `json`, which I'll explain later on.**\n\n## Reading JSON\n\nStarting with `json`, your data must be in the `json lines` format, i.e. it must be something like:\n\n```\n{\"name\": \"John\", \"location\": \"United Kingdom\", \"age\": 42, \"quote\": [\"i\", \"love\", \"the\", \"united kingdom\"]}\n{\"name\": \"Mary\", \"location\": \"United States\", \"age\": 36, \"quote\": [\"i\", \"want\", \"more\", \"telescopes\"]}\n```\n\nThat is, each line is a `json` object. See `data/train.json` for an example.\n\nWe then define the fields:", "_____no_output_____" ] ], [ [ "from torchtext.legacy import data\nfrom torchtext.legacy import datasets\n\nNAME = data.Field()\nSAYING = data.Field()\nPLACE = data.Field()", "_____no_output_____" ] ], [ [ "Next, we must tell TorchText which fields apply to which elements of the `json` object. \n\nFor `json` data, we must create a dictionary where:\n- the key matches the key of the `json` object\n- the value is a tuple where:\n - the first element becomes the batch object's attribute name\n - the second element is the name of the `Field`\n \nWhat do we mean when we say \"becomes the batch object's attribute name\"? Recall in the previous exercises where we accessed the `TEXT` and `LABEL` fields in the train/evaluation loop by using `batch.text` and `batch.label`, this is because TorchText sets the batch object to have a `text` and `label` attribute, each being a tensor containing either the text or the label.\n\nA few notes:\n\n* The order of the keys in the `fields` dictionary does not matter, as long as its keys match the `json` data keys.\n\n- The `Field` name does not have to match the key in the `json` object, e.g. we use `PLACE` for the `\"location\"` field.\n\n- When dealing with `json` data, not all of the keys have to be used, e.g. we did not use the `\"age\"` field.\n\n- Also, if the values of `json` field are a string then the `Fields` tokenization is applied (default is to split the string on spaces), however if the values are a list then no tokenization is applied. Usually it is a good idea for the data to already be tokenized into a list, this saves time as you don't have to wait for TorchText to do it.\n\n- The value of the `json` fields do not have to be the same type. Some examples can have their `\"quote\"` as a string, and some as a list. The tokenization will only get applied to the ones with their `\"quote\"` as a string.\n\n- If you are using a `json` field, every single example must have an instance of that field, e.g. in this example all examples must have a name, location and quote. However, as we are not using the age field, it does not matter if an example does not have it.", "_____no_output_____" ] ], [ [ "fields = {'name': ('n', NAME), 'location': ('p', PLACE), 'quote': ('s', SAYING)}", "_____no_output_____" ] ], [ [ "Now, in a training loop we can iterate over the data iterator and access the name via `batch.n`, the location via `batch.p`, and the quote via `batch.s`.\n\nWe then create our datasets (`train_data` and `test_data`) with the `TabularDataset.splits` function. \n\nThe `path` argument specifices the top level folder common among both datasets, and the `train` and `test` arguments specify the filename of each dataset, e.g. here the train dataset is located at `data/train.json`.\n\nWe tell the function we are using `json` data, and pass in our `fields` dictionary defined previously.", "_____no_output_____" ] ], [ [ "train_data, test_data = data.TabularDataset.splits(\n path = 'data',\n train = 'train.json',\n test = 'test.json',\n format = 'json',\n fields = fields\n)", "_____no_output_____" ] ], [ [ "If you already had a validation dataset, the location of this can be passed as the `validation` argument.", "_____no_output_____" ] ], [ [ "train_data, valid_data, test_data = data.TabularDataset.splits(\n path = 'data',\n train = 'train.json',\n validation = 'valid.json',\n test = 'test.json',\n format = 'json',\n fields = fields\n)", "_____no_output_____" ] ], [ [ "We can then view an example to make sure it has worked correctly.\n\nNotice how the field names (`n`, `p` and `s`) match up with what was defined in the `fields` dictionary.\n\nAlso notice how the word `\"United Kingdom\"` in `p` has been split by the tokenization, whereas the `\"united kingdom\"` in `s` has not. This is due to what was mentioned previously, where TorchText assumes that any `json` fields that are lists are already tokenized and no further tokenization is applied. ", "_____no_output_____" ] ], [ [ "print(vars(train_data[0]))", "{'n': ['John'], 'p': ['United', 'Kingdom'], 's': ['i', 'love', 'the', 'united kingdom']}\n" ] ], [ [ "We can now use `train_data`, `test_data` and `valid_data` to build a vocabulary and create iterators, as in the other notebooks. We can access all attributes by using `batch.n`, `batch.p` and `batch.s` for the names, places and sayings, respectively.\n\n## Reading CSV/TSV\n\n`csv` and `tsv` are very similar, except csv has elements separated by commas and tsv by tabs.\n\nUsing the same example above, our `tsv` data will be in the form of:\n\n```\nname\tlocation\tage\tquote\nJohn\tUnited Kingdom\t42\ti love the united kingdom\nMary\tUnited States\t36\ti want more telescopes\n```\n\nThat is, on each row the elements are separated by tabs and we have one example per row. The first row is usually a header (i.e. the name of each of the columns), but your data could have no header.\n\nYou cannot have lists within `tsv` or `csv` data.\n\nThe way the fields are defined is a bit different to `json`. We now use a list of tuples, where each element is also a tuple. The first element of these inner tuples will become the batch object's attribute name, second element is the `Field` name.\n\nUnlike the `json` data, the tuples have to be in the same order that they are within the `tsv` data. Due to this, when skipping a column of data a tuple of `None`s needs to be used, if not then our `SAYING` field will be applied to the `age` column of the `tsv` data and the `quote` column will not be used. \n\nHowever, if you only wanted to use the `name` and `age` column, you could just use two tuples as they are the first two columns.\n\nWe change our `TabularDataset` to read the correct `.tsv` files, and change the `format` argument to `'tsv'`.\n\nIf your data has a header, which ours does, it must be skipped by passing `skip_header = True`. If not, TorchText will think the header is an example. By default, `skip_header` will be `False`.", "_____no_output_____" ] ], [ [ "fields = [('n', NAME), ('p', PLACE), (None, None), ('s', SAYING)]", "_____no_output_____" ], [ "train_data, valid_data, test_data = data.TabularDataset.splits(\n path = 'data',\n train = 'train.tsv',\n validation = 'valid.tsv',\n test = 'test.tsv',\n format = 'tsv',\n fields = fields,\n skip_header = True\n)", "_____no_output_____" ], [ "print(vars(train_data[0]))", "{'n': ['John'], 'p': ['United', 'Kingdom'], 's': ['i', 'love', 'the', 'united', 'kingdom']}\n" ] ], [ [ "Finally, we'll cover `csv` files. \n\nThis is pretty much the exact same as the `tsv` files, expect with the `format` argument set to `'csv'`.", "_____no_output_____" ] ], [ [ "fields = [('n', NAME), ('p', PLACE), (None, None), ('s', SAYING)]", "_____no_output_____" ], [ "train_data, valid_data, test_data = data.TabularDataset.splits(\n path = 'data',\n train = 'train.csv',\n validation = 'valid.csv',\n test = 'test.csv',\n format = 'csv',\n fields = fields,\n skip_header = True\n)", "_____no_output_____" ], [ "print(vars(train_data[0]))", "{'n': ['John'], 'p': ['United', 'Kingdom'], 's': ['i', 'love', 'the', 'united', 'kingdom']}\n" ] ], [ [ "## Why JSON over CSV/TSV?\n\n1. Your `csv` or `tsv` data cannot be stored lists. This means data cannot be already be tokenized, thus everytime you run your Python script that reads this data via TorchText, it has to be tokenized. Using advanced tokenizers, such as the `spaCy` tokenizer, takes a non-negligible amount of time. Thus, it is better to tokenize your datasets and store them in the `json lines` format.\n\n2. If tabs appear in your `tsv` data, or commas appear in your `csv` data, TorchText will think they are delimiters between columns. This will cause your data to be parsed incorrectly. Worst of all TorchText will not alert you to this as it cannot tell the difference between a tab/comma in a field and a tab/comma as a delimiter. As `json` data is essentially a dictionary, you access the data within the fields via its key, so do not have to worry about \"surprise\" delimiters.", "_____no_output_____" ], [ "## Iterators \n\nUsing any of the above datasets, we can then build the vocab and create the iterators.", "_____no_output_____" ] ], [ [ "NAME.build_vocab(train_data)\nSAYING.build_vocab(train_data)\nPLACE.build_vocab(train_data)", "_____no_output_____" ] ], [ [ "Then, we can create the iterators after defining our batch size and device.\n\nBy default, the train data is shuffled each epoch, but the validation/test data is sorted. However, TorchText doesn't know what to use to sort our data and it would throw an error if we don't tell it. \n\nThere are two ways to handle this, you can either tell the iterator not to sort the validation/test data by passing `sort = False`, or you can tell it how to sort the data by passing a `sort_key`. A sort key is a function that returns a key on which to sort the data on. For example, `lambda x: x.s` will sort the examples by their `s` attribute, i.e their quote. Ideally, you want to use a sort key as the `BucketIterator` will then be able to sort your examples and then minimize the amount of padding within each batch.\n\nWe can then iterate over our iterator to get batches of data. Note how by default TorchText has the batch dimension second.", "_____no_output_____" ] ], [ [ "import torch\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nBATCH_SIZE = 1\n\ntrain_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(\n (train_data, valid_data, test_data),\n sort = False, #don't sort test/validation data\n batch_size=BATCH_SIZE,\n device=device)\n\ntrain_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(\n (train_data, valid_data, test_data),\n sort_key = lambda x: x.s, #sort by s attribute (quote)\n batch_size=BATCH_SIZE,\n device=device)\n\nprint('Train:')\nfor batch in train_iterator:\n print(batch)\n \nprint('Valid:')\nfor batch in valid_iterator:\n print(batch)\n \nprint('Test:')\nfor batch in test_iterator:\n print(batch)", "Train:\n\n[torchtext.data.batch.Batch of size 1]\n\t[.n]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.p]:[torch.cuda.LongTensor of size 2x1 (GPU 0)]\n\t[.s]:[torch.cuda.LongTensor of size 5x1 (GPU 0)]\n\n[torchtext.data.batch.Batch of size 1]\n\t[.n]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.p]:[torch.cuda.LongTensor of size 2x1 (GPU 0)]\n\t[.s]:[torch.cuda.LongTensor of size 4x1 (GPU 0)]\nValid:\n\n[torchtext.data.batch.Batch of size 1]\n\t[.n]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.p]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.s]:[torch.cuda.LongTensor of size 2x1 (GPU 0)]\n\n[torchtext.data.batch.Batch of size 1]\n\t[.n]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.p]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.s]:[torch.cuda.LongTensor of size 4x1 (GPU 0)]\nTest:\n\n[torchtext.data.batch.Batch of size 1]\n\t[.n]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.p]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.s]:[torch.cuda.LongTensor of size 3x1 (GPU 0)]\n\n[torchtext.data.batch.Batch of size 1]\n\t[.n]:[torch.cuda.LongTensor of size 1x1 (GPU 0)]\n\t[.p]:[torch.cuda.LongTensor of size 2x1 (GPU 0)]\n\t[.s]:[torch.cuda.LongTensor of size 3x1 (GPU 0)]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc38e0fe8b150d8cdaef3faec55213a35e96a73
56,691
ipynb
Jupyter Notebook
.ipynb_checkpoints/data_exploration_heart_disease-checkpoint.ipynb
danhtaihoang/kidney-disease
3b5b7e5d7250b14a000359d9539ae8882cd0b754
[ "MIT" ]
1
2020-10-02T16:08:11.000Z
2020-10-02T16:08:11.000Z
.ipynb_checkpoints/data_exploration_heart_disease-checkpoint.ipynb
danhtaihoang/hiv1-protease
fa1d26d0013ce6ecf4127898c7853acc56939503
[ "MIT" ]
null
null
null
.ipynb_checkpoints/data_exploration_heart_disease-checkpoint.ipynb
danhtaihoang/hiv1-protease
fa1d26d0013ce6ecf4127898c7853acc56939503
[ "MIT" ]
null
null
null
89.136792
12,716
0.794182
[ [ [ "## Data Analysis", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "np.random.seed(1)", "_____no_output_____" ], [ "# load data\ndf = pd.read_csv('../input_data/heartdisease_data.csv',sep= ',')\ndf[0:10]", "_____no_output_____" ] ], [ [ "The data contains 13 features:<br/>\n0) age: Age (years) --> discrete <br/>\n1) sex: Sex (1: male, 0: female) --> categorical <br/>\n2) cp: Chest pain type (1: typical angina, 2: atypical angina, 3: non-anginal pain, 4: asymptomatic) --> categorical <br/>\n3) trestbps: Resting blood pressure (mm Hg on admission to the hospital) --> continuous <br/>\n4) chol: Cholesterol measurement (mg/dl) --> continuous <br/>\n5) fbs: Fasting blood sugar (0: <120 mg/dl, 1: > 120 mg/dl) --> categorical <br/>\n6) restecg: Resting electrocardiographic measurement (0: normal, 1: having ST-T wave abnormality, 2: showing probable or definite left ventricular hypertrophy by Estes' criteria) --> categorical <br/>\n7) thalach: Maximum heart rate achieved --> continuous<br/>\n8) exang: Exercise induced angina (1: yes; 0: no) --> categorical <br/>\n9) oldpeak: ST depression induced by exercise relative to rest ('ST' relates to positions on the ECG plot) --> continuous<br/>\n10) slope: The slope of the peak exercise ST segment (1: upsloping, 2: flat, 3: downsloping) --> categorical<br/>\n11) ca: The number of major vessels (0-3) --> categorical <br/>\n12) thal: Thalassemia (a type of blood disorder) (3: normal; 6: fixed defect; 7: reversable defect) --> categorical <br/>\n\nand 1 target: Heart disease (0: no, 1: yes) <br/>", "_____no_output_____" ] ], [ [ "# select features and target:\ndf = np.array(df).astype(float)\n\n# features:\nX = df[:,:-1]\nl,n = X.shape\nprint(l,n)\n\n# target:\ny = df[:,-1]", "(303, 13)\n" ] ], [ [ "### Features", "_____no_output_____" ] ], [ [ "\"\"\"\nplt.figure(figsize=(11,6))\n\nfeatures = s[0,:8]\nfor j in range(2):\n for i in range(4):\n ii = j*4 + i\n plt.subplot2grid((2,4),(j,i))\n bins = np.linspace(min(X[:,ii]), max(X[:,ii]),10, endpoint=False)\n plt.hist(X[:,ii],bins,histtype='bar',rwidth=0.8,normed=True)\n plt.title('%s'%features[ii])\n plt.tight_layout(h_pad=1, w_pad=1.5)\n\"\"\" ", "_____no_output_____" ] ], [ [ "### Target", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(4,3))\nplt.bar(0,sum(y==0)/float(l),width=0.8,color='blue',label='non disease')\nplt.bar(1,sum(y==1)/float(l),width=0.8,color='red',label='disease')\nplt.xlabel('0: non disease, 1: disease')\nplt.title('target')", "_____no_output_____" ] ], [ [ "### 0) Age", "_____no_output_____" ] ], [ [ "ct = pd.crosstab(X[:,0], y)\nct.plot.bar(stacked=True,figsize=(12,3))\nplt.xlabel('age')", "_____no_output_____" ] ], [ [ "### 1) Sex", "_____no_output_____" ] ], [ [ "ct = pd.crosstab(X[:,1], y)\nct.plot.bar(stacked=True,figsize=(4,3))\nplt.xlabel('0: female, 1: male')", "_____no_output_____" ] ], [ [ "### 2) Chest pain type", "_____no_output_____" ] ], [ [ "ct = pd.crosstab(X[:,2], y)\nct.plot.bar(stacked=True,figsize=(8,3))\nplt.xlabel('Chest pain type')", "_____no_output_____" ] ], [ [ "### 3) Resting blood pressure", "_____no_output_____" ] ], [ [ "#ct = pd.crosstab(X[:,3], y)\n#ct.plot.histo(stacked=True,figsize=(10,3))\n#plt.xlabel('Resting blood pressure')", "_____no_output_____" ] ], [ [ "### 5) Fasting blood sugar", "_____no_output_____" ] ], [ [ "pd.crosstab(X[:,5], y).plot.bar(stacked=True,figsize=(4,3))\nplt.xlabel('0: <120 mg/dl, 1: > 120 mg/dl')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc3ca5d409d2f52bd54f024f9b73bd9c7ec8d0e
49,401
ipynb
Jupyter Notebook
filtrado de datos pandas.ipynb
Quisutd3us/Python-Pandas
39f53ac98ca6a5fdcc8dcdf244ae3556ed6c2de2
[ "MIT" ]
null
null
null
filtrado de datos pandas.ipynb
Quisutd3us/Python-Pandas
39f53ac98ca6a5fdcc8dcdf244ae3556ed6c2de2
[ "MIT" ]
null
null
null
filtrado de datos pandas.ipynb
Quisutd3us/Python-Pandas
39f53ac98ca6a5fdcc8dcdf244ae3556ed6c2de2
[ "MIT" ]
null
null
null
34.116713
101
0.263274
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df=pd.read_csv(\"C:/Users/d3us/Desktop/cursoDataScienceLinkedIn/bd/2008.csv\", nrows=100000)\ndf.head()", "_____no_output_____" ], [ "df[['Year','Month','DayOfWeek','Cancelled']][:10]", "_____no_output_____" ], [ "#consultas\ndf[df['ArrDelay']>60].head(20)", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df[((df['Year']==2008) & (df['Origin']=='LAX')) & (df['ArrDelay']>100)]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cbc3cfdd3f5e79536dc7bf6c24d001e5f33852f0
346,896
ipynb
Jupyter Notebook
examples/notebooks/Context.ipynb
gaunthan/light-weight-refinenet
3e947f1ba13a44f91e56eb44a12a0352831583f4
[ "BSD-2-Clause" ]
null
null
null
examples/notebooks/Context.ipynb
gaunthan/light-weight-refinenet
3e947f1ba13a44f91e56eb44a12a0352831583f4
[ "BSD-2-Clause" ]
null
null
null
examples/notebooks/Context.ipynb
gaunthan/light-weight-refinenet
3e947f1ba13a44f91e56eb44a12a0352831583f4
[ "BSD-2-Clause" ]
null
null
null
1,875.113514
342,240
0.960614
[ [ [ "# PASCAL Context - Light-Weight Refinenet\n\n## 59 semantic classes + background\n\n### Light-Weight RefineNet based on ResNet-101/152", "_____no_output_____" ] ], [ [ "import six\nimport sys\nsys.path.append('../../')\n\nfrom models.resnet import rf_lw101, rf_lw152", "_____no_output_____" ], [ "from utils.helpers import prepare_img", "_____no_output_____" ], [ "%matplotlib inline\n\nimport glob\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\nfrom PIL import Image", "_____no_output_____" ], [ "cmap = np.load('../../utils/cmap.npy')\nhas_cuda = torch.cuda.is_available()\nimg_dir = '../imgs/Context/'\nimgs = glob.glob('{}*.jpg'.format(img_dir))\nn_classes = 60", "_____no_output_____" ], [ "# Initialise models\nmodel_inits = { \n 'rf_lw101_context' : rf_lw101, # key / constructor\n 'rf_lw152_context' : rf_lw152,\n }\n\nmodels = dict()\nfor key,fun in six.iteritems(model_inits):\n net = fun(n_classes, pretrained=True).eval()\n if has_cuda:\n net = net.cuda()\n models[key] = net", "_____no_output_____" ], [ "# Figure 1 from the supplementary\nn_cols = len(models) + 2 # 1 - for image, 1 - for GT\nn_rows = len(imgs)\n\nplt.figure(figsize=(16, 12))\nidx = 1\n\nwith torch.no_grad():\n for img_path in imgs:\n img = np.array(Image.open(img_path))\n msk = np.array(Image.open(img_path.replace('jpg', 'png')))\n orig_size = img.shape[:2][::-1]\n \n img_inp = torch.tensor(prepare_img(img).transpose(2, 0, 1)[None]).float()\n if has_cuda:\n img_inp = img_inp.cuda()\n \n plt.subplot(n_rows, n_cols, idx)\n plt.imshow(img)\n plt.title('img')\n plt.axis('off')\n idx += 1\n \n plt.subplot(n_rows, n_cols, idx)\n plt.imshow(msk)\n plt.title('gt')\n plt.axis('off')\n idx += 1\n \n for mname, mnet in six.iteritems(models):\n segm = mnet(img_inp)[0].data.cpu().numpy().transpose(1, 2, 0)\n segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)\n segm = cmap[segm.argmax(axis=2).astype(np.uint8)]\n \n plt.subplot(n_rows, n_cols, idx)\n plt.imshow(segm)\n plt.title(mname)\n plt.axis('off')\n idx += 1", "/home/kerry/anaconda3/lib/python3.7/site-packages/torch/nn/modules/upsampling.py:129: UserWarning: nn.Upsample is deprecated. Use nn.functional.interpolate instead.\n warnings.warn(\"nn.{} is deprecated. Use nn.functional.interpolate instead.\".format(self.name))\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cbc3d9ed774285438a9d9237d43c48a6bf18a562
60,373
ipynb
Jupyter Notebook
Recognising_Trend_in_Time_Series_Data.ipynb
ishujaswani/ishujaswani.github.io
68137b0365902892498a573e5a125b009a55179d
[ "Unlicense" ]
null
null
null
Recognising_Trend_in_Time_Series_Data.ipynb
ishujaswani/ishujaswani.github.io
68137b0365902892498a573e5a125b009a55179d
[ "Unlicense" ]
null
null
null
Recognising_Trend_in_Time_Series_Data.ipynb
ishujaswani/ishujaswani.github.io
68137b0365902892498a573e5a125b009a55179d
[ "Unlicense" ]
null
null
null
83.273103
21,082
0.698574
[ [ [ "", "_____no_output_____" ] ], [ [ "## STATSMODEL", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport statsmodels.api as sm", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "df=sm.datasets.macrodata.load_pandas().data #in order to load data from statsmodels\ndf", "_____no_output_____" ], [ "#if you want information from statsmodels dataset then:\nprint(sm.datasets.macrodata.NOTE)", "::\n Number of Observations - 203\n\n Number of Variables - 14\n\n Variable name definitions::\n\n year - 1959q1 - 2009q3\n quarter - 1-4\n realgdp - Real gross domestic product (Bil. of chained 2005 US$,\n seasonally adjusted annual rate)\n realcons - Real personal consumption expenditures (Bil. of chained\n 2005 US$, seasonally adjusted annual rate)\n realinv - Real gross private domestic investment (Bil. of chained\n 2005 US$, seasonally adjusted annual rate)\n realgovt - Real federal consumption expenditures & gross investment\n (Bil. of chained 2005 US$, seasonally adjusted annual rate)\n realdpi - Real private disposable income (Bil. of chained 2005\n US$, seasonally adjusted annual rate)\n cpi - End of the quarter consumer price index for all urban\n consumers: all items (1982-84 = 100, seasonally adjusted).\n m1 - End of the quarter M1 nominal money stock (Seasonally\n adjusted)\n tbilrate - Quarterly monthly average of the monthly 3-month\n treasury bill: secondary market rate\n unemp - Seasonally adjusted unemployment rate (%)\n pop - End of the quarter total population: all ages incl. armed\n forces over seas\n infl - Inflation rate (ln(cpi_{t}/cpi_{t-1}) * 400)\n realint - Real interest rate (tbilrate - infl)\n\n" ], [ "", "_____no_output_____" ], [ "#we can convert years data to date by using pandas\n#BUT statsmodels also has the capability to do that:\ndf.index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1','2009Q3'))", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df['realgdp'].plot()", "_____no_output_____" ], [ "#we are gonna use stats model to get the trend.\nresult=sm.tsa.filters.hpfilter(df['realgdp'])", "_____no_output_____" ], [ "type(result)", "_____no_output_____" ], [ "#this hpfilter gives a tuple. 1- is the cycle and 2- is the trend\n#in order to catch the trend line we can store that tuple into two diff vars\ngdp_cycle,gdp_trend=sm.tsa.filters.hpfilter(df['realgdp'])", "_____no_output_____" ], [ "#adding a trend coloum to the df\ndf['trend'] = gdp_trend\ndf['Cycle'] = gdp_cycle", "_____no_output_____" ], [ "#ploting the realgdp with the trend line in order to show the trend of the realgdp.\ndf[['realgdp','trend']]['2000-03-31':].plot()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc3dd2715d30f55f2481d137c9b3098df3a2b8f
335,956
ipynb
Jupyter Notebook
notebooks/ch-states/single-qubit-gates.ipynb
InfamousPlatypus/platypus
54a8eae3577513e9168a5700f0e4431fa8b46f83
[ "Apache-2.0" ]
2
2022-03-09T13:39:05.000Z
2022-03-24T16:35:55.000Z
notebooks/ch-states/single-qubit-gates.ipynb
InfamousPlatypus/platypus
54a8eae3577513e9168a5700f0e4431fa8b46f83
[ "Apache-2.0" ]
2
2022-03-07T16:25:36.000Z
2022-03-10T09:28:52.000Z
notebooks/ch-states/single-qubit-gates.ipynb
InfamousPlatypus/platypus
54a8eae3577513e9168a5700f0e4431fa8b46f83
[ "Apache-2.0" ]
1
2021-11-22T14:15:42.000Z
2021-11-22T14:15:42.000Z
80.545673
49,044
0.804364
[ [ [ "# Single Qubit Gates", "_____no_output_____" ], [ "In the previous section we looked at all the possible states a qubit could be in. We saw that qubits could be represented by 2D vectors, and that their states are limited to the form:\n\n$$ |q\\rangle = \\cos{(\\tfrac{\\theta}{2})}|0\\rangle + e^{i\\phi}\\sin{\\tfrac{\\theta}{2}}|1\\rangle $$\n\nWhere $\\theta$ and $\\phi$ are real numbers. In this section we will cover _gates,_ the operations that change a qubit between these states. Due to the number of gates and the similarities between them, this chapter is at risk of becoming a list. To counter this, we have included a few digressions to introduce important ideas at appropriate places throughout the chapter. \n\n\nIn _The Atoms of Computation_ we came across some gates and used them to perform a classical computation. An important feature of quantum circuits is that, between initialising the qubits and measuring them, the operations (gates) are *_always_* reversible! These reversible gates can be represented as matrices, and as rotations around the Bloch sphere. ", "_____no_output_____" ] ], [ [ "from qiskit import *\nfrom math import pi\nfrom qiskit.visualization import plot_bloch_multivector", "_____no_output_____" ] ], [ [ "## 1. The Pauli Gates <a id=\"pauli\"></a>\nYou should be familiar with the Pauli matrices from the linear algebra section. If any of the maths here is new to you, you should use the linear algebra section to bring yourself up to speed. We will see here that the Pauli matrices can represent some very commonly used quantum gates.\n\n### 1.1 The X-Gate <a id=\"xgate\"></a>\nThe X-gate is represented by the Pauli-X matrix:\n\n$$ X = \\begin{bmatrix} 0 & 1 \\\\ 1 & 0 \\end{bmatrix} = |0\\rangle\\langle1| + |1\\rangle\\langle0| $$\n\nTo see the effect a gate has on a qubit, we simply multiply the qubit’s statevector by the gate. We can see that the X-gate switches the amplitudes of the states $|0\\rangle$ and $|1\\rangle$:\n\n$$ X|0\\rangle = \\begin{bmatrix} 0 & 1 \\\\ 1 & 0 \\end{bmatrix}\\begin{bmatrix} 1 \\\\ 0 \\end{bmatrix} = \\begin{bmatrix} 0 \\\\ 1 \\end{bmatrix} = |1\\rangle$$\n\n\n\n", "_____no_output_____" ], [ "<!-- ::: q-block.reminder -->\n\n## Reminders\n\n<details>\n <summary>Multiplying Vectors by Matrices</summary>\nMatrix multiplication is a generalisation of the inner product we saw in the last chapter. In the specific case of multiplying a vector by a matrix (as seen above), we always get a vector back:\n\n$$ M|v\\rangle = \\begin{bmatrix}a & b \\\\ c & d \\end{bmatrix}\\begin{bmatrix}v_0 \\\\ v_1 \\end{bmatrix}\n = \\begin{bmatrix}a\\cdot v_0 + b \\cdot v_1 \\\\ c \\cdot v_0 + d \\cdot v_1 \\end{bmatrix} $$\n\nIn quantum computing, we can write our matrices in terms of basis vectors:\n\n$$X = |0\\rangle\\langle1| + |1\\rangle\\langle0|$$\n\nThis can sometimes be clearer than using a box matrix as we can see what different multiplications will result in:\n\n$$\n\\begin{aligned}\n X|1\\rangle & = (|0\\rangle\\langle1| + |1\\rangle\\langle0|)|1\\rangle \\\\\n & = |0\\rangle\\langle1|1\\rangle + |1\\rangle\\langle0|1\\rangle \\\\\n & = |0\\rangle \\times 1 + |1\\rangle \\times 0 \\\\\n & = |0\\rangle\n\\end{aligned}\n$$\n\nIn fact, when we see a ket and a bra multiplied like this:\n\n$$ |a\\rangle\\langle b| $$\n\nthis is called the _outer product_, which follows the rule:\n\n$$\n |a\\rangle\\langle b| = \n \\begin{bmatrix}\n a_0 b_0 & a_0 b_1 & \\dots & a_0 b_n\\\\\n a_1 b_0 & \\ddots & & \\vdots \\\\\n \\vdots & & \\ddots & \\vdots \\\\\n a_n b_0 & \\dots & \\dots & a_n b_n \\\\\n \\end{bmatrix}\n$$\n\nWe can see this does indeed result in the X-matrix as seen above:\n\n$$\n |0\\rangle\\langle1| + |1\\rangle\\langle0| = \n \\begin{bmatrix}0 & 1 \\\\ 0 & 0 \\end{bmatrix} +\n \\begin{bmatrix}0 & 0 \\\\ 1 & 0 \\end{bmatrix} = \n \\begin{bmatrix}0 & 1 \\\\ 1 & 0 \\end{bmatrix} = X\n$$\n</details>\n\n<!-- ::: -->\n", "_____no_output_____" ], [ "In Qiskit, we can create a short circuit to verify this:", "_____no_output_____" ] ], [ [ "# Let's do an X-gate on a |0> qubit\nqc = QuantumCircuit(1)\nqc.x(0)\nqc.draw()", "_____no_output_____" ] ], [ [ "Let's see the result of the above circuit. **Note:** Here we use <code>plot_bloch_multivector()</code> which takes a qubit's statevector instead of the Bloch vector.", "_____no_output_____" ] ], [ [ "# Let's see the result\nbackend = Aer.get_backend('statevector_simulator')\nout = execute(qc,backend).result().get_statevector()\nplot_bloch_multivector(out)", "_____no_output_____" ] ], [ [ "We can indeed see the state of the qubit is $|1\\rangle$ as expected. We can think of this as a rotation by $\\pi$ radians around the *x-axis* of the Bloch sphere. The X-gate is also often called a NOT-gate, referring to its classical analogue.\n\n### 1.2 The Y & Z-gates <a id=\"ynzgatez\"></a>\nSimilarly to the X-gate, the Y & Z Pauli matrices also act as the Y & Z-gates in our quantum circuits:\n\n\n$$ Y = \\begin{bmatrix} 0 & -i \\\\ i & 0 \\end{bmatrix} \\quad\\quad\\quad\\quad Z = \\begin{bmatrix} 1 & 0 \\\\ 0 & -1 \\end{bmatrix} $$\n\n$$ Y = -i|0\\rangle\\langle1| + i|1\\rangle\\langle0| \\quad\\quad Z = |0\\rangle\\langle0| - |1\\rangle\\langle1| $$\n\nAnd, unsurprisingly, they also respectively perform rotations by [[$\\pi$|$2\\pi$|$\\frac{\\pi}{2}$]] around the y and z-axis of the Bloch sphere.\n\nBelow is a widget that displays a qubit’s state on the Bloch sphere, pressing one of the buttons will perform the gate on the qubit:", "_____no_output_____" ] ], [ [ "# Run the code in this cell to see the widget\nfrom qiskit_textbook.widgets import gate_demo\ngate_demo(gates='pauli')", "_____no_output_____" ] ], [ [ "In Qiskit, we can apply the Y and Z-gates to our circuit using:", "_____no_output_____" ] ], [ [ "qc.y(0) # Do Y-gate on qubit 0\nqc.z(0) # Do Z-gate on qubit 0\nqc.draw()", "_____no_output_____" ] ], [ [ "## 2. Digression: The X, Y & Z-Bases <a id=\"xyzbases\"></a>", "_____no_output_____" ], [ "<!-- ::: q-block.reminder -->\n\n## Reminders\n\n<details>\n <summary>Eigenvectors of Matrices</summary>\nWe have seen that multiplying a vector by a matrix results in a vector:\n \n$$\n M|v\\rangle = |v'\\rangle \\leftarrow \\text{new vector}\n $$\nIf we chose the right vectors and matrices, we can find a case in which this matrix multiplication is the same as doing a multiplication by a scalar:\n \n$$\n M|v\\rangle = \\lambda|v\\rangle\n $$\n(Above, $M$ is a matrix, and $\\lambda$ is a scalar). For a matrix $M$, any vector that has this property is called an <i>eigenvector</i> of $M$. For example, the eigenvectors of the Z-matrix are the states $|0\\rangle$ and $|1\\rangle$:\n\n$$\n \\begin{aligned}\n Z|0\\rangle & = |0\\rangle \\\\\n Z|1\\rangle & = -|1\\rangle\n \\end{aligned}\n $$\nSince we use vectors to describe the state of our qubits, we often call these vectors <i>eigenstates</i> in this context. Eigenvectors are very important in quantum computing, and it is important you have a solid grasp of them.\n</details>\n\n<!-- ::: -->\n", "_____no_output_____" ], [ "You may also notice that the Z-gate appears to have no effect on our qubit when it is in either of these two states. This is because the states $|0\\rangle$ and $|1\\rangle$ are the two _eigenstates_ of the Z-gate. In fact, the _computational basis_ (the basis formed by the states $|0\\rangle$ and $|1\\rangle$) is often called the Z-basis. This is not the only basis we can use, a popular basis is the X-basis, formed by the eigenstates of the X-gate. We call these two vectors $|+\\rangle$ and $|-\\rangle$:\n\n$$ |+\\rangle = \\tfrac{1}{\\sqrt{2}}(|0\\rangle + |1\\rangle) = \\tfrac{1}{\\sqrt{2}}\\begin{bmatrix} 1 \\\\ 1 \\end{bmatrix}$$\n\n$$ |-\\rangle = \\tfrac{1}{\\sqrt{2}}(|0\\rangle - |1\\rangle) = \\tfrac{1}{\\sqrt{2}}\\begin{bmatrix} 1 \\\\ -1 \\end{bmatrix} $$\n\nAnother less commonly used basis is that formed by the eigenstates of the Y-gate. These are called:\n\n$$ |\\circlearrowleft\\rangle, \\quad |\\circlearrowright\\rangle$$\n\nWe leave it as an exercise to calculate these. There are in fact an infinite number of bases; to form one, we simply need two orthogonal vectors.\n\n### Quick Exercises\n1. Verify that $|+\\rangle$ and $|-\\rangle$ are in fact eigenstates of the X-gate.\n2. What eigenvalues do they have? \n3. Why would we not see these eigenvalues appear on the Bloch sphere?\n4. Find the eigenstates of the Y-gate, and their co-ordinates on the Bloch sphere.\n\nUsing only the Pauli-gates it is impossible to move our initialised qubit to any state other than $|0\\rangle$ or $|1\\rangle$, i.e. we cannot achieve superposition. This means we can see no behaviour different to that of a classical bit. To create more interesting states we will need more gates!\n\n## 3. The Hadamard Gate <a id=\"hgate\"></a>\n\nThe Hadamard gate (H-gate) is a fundamental quantum gate. It allows us to move away from the poles of the Bloch sphere and create a superposition of $|0\\rangle$ and $|1\\rangle$. It has the matrix:\n\n$$ H = \\tfrac{1}{\\sqrt{2}}\\begin{bmatrix} 1 & 1 \\\\ 1 & -1 \\end{bmatrix} $$\n\nWe can see that this performs the transformations below:\n\n$$ H|0\\rangle = |+\\rangle $$\n\n$$ H|1\\rangle = |-\\rangle $$\n\nThis can be thought of as a rotation around the Bloch vector `[1,0,1]` (the line between the x & z-axis), or as transforming the state of the qubit between the X and Z bases.\n\nYou can play around with these gates using the widget below:", "_____no_output_____" ] ], [ [ "# Run the code in this cell to see the widget\nfrom qiskit_textbook.widgets import gate_demo\ngate_demo(gates='pauli+h')", "_____no_output_____" ] ], [ [ "### Quick Exercise\n1. Write the H-gate as the outer products of vectors $|0\\rangle$, $|1\\rangle$, $|+\\rangle$ and $|-\\rangle$.\n2. Show that applying the sequence of gates: HZH, to any qubit state is equivalent to applying an X-gate.\n3. Find a combination of X, Z and H-gates that is equivalent to a Y-gate (ignoring global phase).\n\n## 4. Digression: Measuring in Different Bases <a id=\"measuring\"></a>\nWe have seen that the Z-axis is not intrinsically special, and that there are infinitely many other bases. Similarly with measurement, we don’t always have to measure in the computational basis (the Z-basis), we can measure our qubits in any basis.\n\nAs an example, let’s try measuring in the X-basis. We can calculate the probability of measuring either $|+\\rangle$ or $|-\\rangle$:\n\n$$ p(|+\\rangle) = |\\langle+|q\\rangle|^2, \\quad p(|-\\rangle) = |\\langle-|q\\rangle|^2 $$\n\nAnd after measurement, we are guaranteed to have a qubit in one of these two states. Since Qiskit only allows measuring in the Z-basis, we must create our own using Hadamard gates:", "_____no_output_____" ] ], [ [ "# Create the X-measurement function:\ndef x_measurement(qc,qubit,cbit):\n \"\"\"Measure 'qubit' in the X-basis, and store the result in 'cbit'\"\"\"\n qc.h(qubit)\n qc.measure(qubit, cbit)\n qc.h(qubit)\n return qc\n\ninitial_state = [0,1]\n# Initialise our qubit and measure it\nqc = QuantumCircuit(1,1)\nqc.initialize(initial_state, 0)\nx_measurement(qc, 0, 0) # measure qubit 0 to classical bit 0\nqc.draw()", "_____no_output_____" ] ], [ [ "In the quick exercises above, we saw you could create an X-gate by sandwiching our Z-gate between two H-gates:\n\n$$ X = HZH $$\n\nStarting in the Z-basis, the H-gate switches our qubit to the X-basis, the Z-gate performs a NOT in the X-basis, and the final H-gate returns our qubit to the Z-basis.\n\n<img src=\"images/bloch_HZH.svg\">\n\nWe can verify this always behaves like an X-gate by multiplying the matrices:\n\n$$\nHZH =\n\\tfrac{1}{\\sqrt{2}}\\begin{bmatrix} 1 & 1 \\\\ 1 & -1 \\end{bmatrix}\n\\begin{bmatrix} 1 & 0 \\\\ 0 & -1 \\end{bmatrix}\n\\tfrac{1}{\\sqrt{2}}\\begin{bmatrix} 1 & 1 \\\\ 1 & -1 \\end{bmatrix}\n=\n\\begin{bmatrix} 0 & 1 \\\\ 1 & 0 \\end{bmatrix}\n=X\n$$\n\nFollowing the same logic, we have created an X-measurement by sandwiching our Z-measurement between two H-gates.\n\n<img src=\"images/x-measurement.svg\">\n\nLet’s now see the results:", "_____no_output_____" ] ], [ [ "backend = Aer.get_backend('statevector_simulator') # Tell Qiskit how to simulate our circuit\nout_state = execute(qc,backend).result().get_statevector() # Do the simulation, returning the state vector\nplot_bloch_multivector(out_state) # Display the output state vector", "_____no_output_____" ] ], [ [ "We initialised our qubit in the state $|1\\rangle$, but we can see that, after the measurement, we have collapsed our qubit to the states $|+\\rangle$ or $|-\\rangle$. If you run the cell again, you will see different results, but the final state of the qubit will always be $|+\\rangle$ or $|-\\rangle$.\n\n### Quick Exercises\n1.\tIf we initialise our qubit in the state $|+\\rangle$, what is the probability of measuring it in state $|-\\rangle$?\n2.\tUse Qiskit to display the probability of measuring a $|0\\rangle$ qubit in the states $|+\\rangle$ and $|-\\rangle$ (**Hint:** you might want to use <code>.get_counts()</code> and <code>plot_histogram()</code>).\n3.\tTry to create a function that measures in the Y-basis.\n\nMeasuring in different bases allows us to see Heisenberg’s famous uncertainty principle in action. Having certainty of measuring a state in the Z-basis removes all certainty of measuring a specific state in the X-basis, and vice versa. A common misconception is that the uncertainty is due to the limits in our equipment, but here we can see the uncertainty is actually part of the nature of the qubit. \n\nFor example, if we put our qubit in the state $|0\\rangle$, our measurement in the Z-basis is certain to be $|0\\rangle$, but our measurement in the X-basis is completely random! Similarly, if we put our qubit in the state $|-\\rangle$, our measurement in the X-basis is certain to be $|-\\rangle$, but now any measurement in the Z-basis will be completely random.\n\nMore generally: _Whatever state our quantum system is in, there is always a measurement that has a deterministic outcome._ \n\nThe introduction of the H-gate has allowed us to explore some interesting phenomena, but we are still very limited in our quantum operations. Let us now introduce a new type of gate:\n\n## The R<sub>&straightphi;</sub>-gate\n\nThe $R_\\phi$-gate is _parametrised,_ that is, it needs a number ($\\phi$) to tell it exactly what to do. The $R_\\phi$-gate performs a rotation of $\\phi$ around the Z-axis direction (and as such is sometimes also known as the $R_z$-gate). It has the matrix:\n\n$$\nR_\\phi = \\begin{bmatrix} 1 & 0 \\\\ 0 & e^{i\\phi} \\end{bmatrix}\n$$\n\nWhere $\\phi$ is a real number.\n\nYou can use the widget below to play around with the $R_\\phi$-gate, specify $\\phi$ using the slider:", "_____no_output_____" ] ], [ [ "# Run the code in this cell to see the widget\nfrom qiskit_textbook.widgets import gate_demo\ngate_demo(gates='pauli+h+rz')", "_____no_output_____" ] ], [ [ "In Qiskit, we specify an $R_\\phi$-gate using `rz(phi, qubit)`:", "_____no_output_____" ] ], [ [ "qc = QuantumCircuit(1)\nqc.rz(pi/4, 0)\nqc.draw()", "_____no_output_____" ] ], [ [ "You may notice that the Z-gate is a special case of the $R_\\phi$-gate, with $\\phi = \\pi$. In fact there are three more commonly referenced gates we will mention in this chapter, all of which are special cases of the $R_\\phi$-gate:\n\n## 6. The I, S and T-gates <a id=\"istgates\"></a>\n\n### 6.1 The I-gate <a id=\"igate\"></a>\n\nFirst comes the I-gate (aka ‘Id-gate’ or ‘Identity gate’). This is simply a gate that does nothing. Its matrix is the identity matrix:\n\n$$\nI = \\begin{bmatrix} 1 & 0 \\\\ 0 & 1\\end{bmatrix}\n$$\n\nApplying the identity gate anywhere in your circuit should have no effect on the qubit state, so it’s interesting this is even considered a gate. There are two main reasons behind this, one is that it is often used in calculations, for example: proving the X-gate is its own inverse:\n\n$$ I = XX $$\n\nThe second, is that it is often useful when considering real hardware to specify a ‘do-nothing’ or ‘none’ operation.\n\n#### Quick Exercise\n1. What are the eigenstates of the I-gate?\n\n### 6.2 The S-gates <a id=\"sgate\"></a>\n\nThe next gate to mention is the S-gate (sometimes known as the $\\sqrt{Z}$-gate), this is an $R_\\phi$-gate with $\\phi = \\pi/2$. It does a quarter-turn around the Bloch sphere. It is important to note that unlike every gate introduced in this chapter so far, the S-gate is **not** its own inverse! As a result, you will often see the $S^\\dagger$-gate, (also “S-dagger”, “Sdg” or $\\sqrt{Z}^\\dagger$-gate). The $S^\\dagger$-gate is clearly an $R_\\phi$-gate with $\\phi = -\\pi/2$:\n\n$$ S = \\begin{bmatrix} 1 & 0 \\\\ 0 & e^{\\frac{i\\pi}{2}} \\end{bmatrix}, \\quad S^\\dagger = \\begin{bmatrix} 1 & 0 \\\\ 0 & e^{-\\frac{i\\pi}{2}} \\end{bmatrix}$$\n\nThe name \"$\\sqrt{Z}$-gate\" is due to the fact that two successively applied S-gates has the same effect as one Z-gate:\n\n$$ SS|q\\rangle = Z|q\\rangle $$\n\nThis notation is common throughout quantum computing.\n\nTo add an S-gate in Qiskit:", "_____no_output_____" ] ], [ [ "qc = QuantumCircuit(1)\nqc.s(0) # Apply S-gate to qubit 0\nqc.sdg(0) # Apply Sdg-gate to qubit 0\nqc.draw()", "_____no_output_____" ] ], [ [ "### 6.3 The T-gate <a id=\"tgate\"></a>\nThe T-gate is a very commonly used gate, it is an $R_\\phi$-gate with $\\phi = \\pi/4$:\n\n$$ T = \\begin{bmatrix} 1 & 0 \\\\ 0 & e^{\\frac{i\\pi}{4}} \\end{bmatrix}, \\quad T^\\dagger = \\begin{bmatrix} 1 & 0 \\\\ 0 & e^{-\\frac{i\\pi}{4}} \\end{bmatrix}$$\n\nAs with the S-gate, the T-gate is sometimes also known as the $\\sqrt[4]{Z}$-gate.\n\nIn Qiskit:", "_____no_output_____" ] ], [ [ "qc = QuantumCircuit(1)\nqc.t(0) # Apply T-gate to qubit 0\nqc.tdg(0) # Apply Tdg-gate to qubit 0\nqc.draw()", "_____no_output_____" ] ], [ [ "You can use the widget below to play around with all the gates introduced in this chapter so far:", "_____no_output_____" ] ], [ [ "# Run the code in this cell to see the widget\nfrom qiskit_textbook.widgets import gate_demo\ngate_demo()", "_____no_output_____" ] ], [ [ "## 7. General U-gates <a id=\"generalU3\"></a>\n\nAs we saw earlier, the I, Z, S & T-gates were all special cases of the more general $R_\\phi$-gate. In the same way, the $U_3$-gate is the most general of all single-qubit quantum gates. It is a parametrised gate of the form:\n\n$$\nU_3(\\theta, \\phi, \\lambda) = \\begin{bmatrix} \\cos(\\theta/2) & -e^{i\\lambda}\\sin(\\theta/2) \\\\\n e^{i\\phi}\\sin(\\theta/2) & e^{i\\lambda+i\\phi}\\cos(\\theta/2)\n \\end{bmatrix}\n$$\n\nEvery gate in this chapter could be specified as $U_3(\\theta,\\phi,\\lambda)$, but it is unusual to see this in a circuit diagram, possibly due to the difficulty in reading this.\n\nQiskit provides $U_2$ and $U_1$-gates, which are specific cases of the $U_3$ gate in which $\\theta = \\tfrac{\\pi}{2}$, and $\\theta = \\phi = 0$ respectively. You will notice that the $U_1$-gate is equivalent to the $R_\\phi$-gate.\n\n$$\n\\begin{aligned}\nU_3(\\tfrac{\\pi}{2}, \\phi, \\lambda) = U_2 = \\tfrac{1}{\\sqrt{2}}\\begin{bmatrix} 1 & -e^{i\\lambda} \\\\\n e^{i\\phi} & e^{i\\lambda+i\\phi}\n \\end{bmatrix}\n& \\quad &\nU_3(0, 0, \\lambda) = U_1 = \\begin{bmatrix} 1 & 0 \\\\\n 0 & e^{i\\lambda}\\\\\n \\end{bmatrix}\n\\end{aligned}\n$$\n\nBefore running on real IBM quantum hardware, all single-qubit operations are compiled down to $U_1$ , $U_2$ and $U_3$ . For this reason they are sometimes called the _physical gates_.\n\nIt should be obvious from this that there are an infinite number of possible gates, and that this also includes $R_x$ and $R_y$-gates, although they are not mentioned here. It must also be noted that there is nothing special about the Z-basis, except that it has been selected as the standard computational basis. That is why we have names for the S and T-gates, but not their X and Y equivalents (e.g. $\\sqrt{X}$ and $\\sqrt[4]{Y}$).\n", "_____no_output_____" ] ], [ [ "import qiskit\nqiskit.__qiskit_version__", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc3e23bc737d0e8cfe5a7d624c346d26b1c7dfe
8,746
ipynb
Jupyter Notebook
notebooks/create_dtm.ipynb
david-siqi-liu/yelp_sentiment_analysis
bd0112a4f043feb082f92eb4fb2b5e5013016bf5
[ "MIT" ]
1
2021-12-14T23:47:42.000Z
2021-12-14T23:47:42.000Z
notebooks/create_dtm.ipynb
david-siqi-liu/yelp_sentiment_analysis
bd0112a4f043feb082f92eb4fb2b5e5013016bf5
[ "MIT" ]
null
null
null
notebooks/create_dtm.ipynb
david-siqi-liu/yelp_sentiment_analysis
bd0112a4f043feb082f92eb4fb2b5e5013016bf5
[ "MIT" ]
3
2021-07-25T10:11:55.000Z
2021-12-14T23:50:15.000Z
8,746
8,746
0.695518
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "GOOGLE_COLAB = True", "_____no_output_____" ], [ "%reload_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\nimport pickle", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "import sys\nif GOOGLE_COLAB:\n sys.path.append('drive/My Drive/yelp_sentiment_analysis')\nelse:\n sys.path.append('../')\n\nfrom yelpsent import data\nfrom yelpsent import features\nfrom yelpsent import metrics\nfrom yelpsent import visualization\nfrom yelpsent import models", "_____no_output_____" ], [ "import importlib\nimportlib.reload(features)", "_____no_output_____" ] ], [ [ "# Load Dataset", "_____no_output_____" ] ], [ [ "if GOOGLE_COLAB:\n data_train, data_test = data.load_dataset(\"drive/My Drive/yelp_sentiment_analysis/data/yelp_train.json\",\n \"drive/My Drive/yelp_sentiment_analysis/data/yelp_test.json\")\nelse:\n data_train, data_test = data.load_dataset(\"../data/yelp_train.json\",\n \"../data/yelp_test.json\")", "_____no_output_____" ], [ "X_train = data_train['review'].tolist()\ny_train = data_train['sentiment'].tolist()", "_____no_output_____" ], [ "X_test = data_test['review'].tolist()\ny_test = data_test['sentiment'].tolist()", "_____no_output_____" ] ], [ [ "# Vectorize", "_____no_output_____" ], [ "- CountVectorizer\n- Unigram + Bigram\n- Remove non-words/numbers\n- Remove stopwords\n- Lemmatization", "_____no_output_____" ] ], [ [ "import nltk", "_____no_output_____" ], [ "nltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\n[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Unzipping corpora/wordnet.zip.\n[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /root/nltk_data...\n[nltk_data] Unzipping taggers/averaged_perceptron_tagger.zip.\n" ], [ "vect = features.YelpSentCountVectorizer(ngram_range=(1,2),\n remove_nonwords=True,\n remove_stopwords=True,\n stem=False,\n lemmatize=True)", "_____no_output_____" ], [ "%time cv = vect.fit(X_train)", "CPU times: user 28min 14s, sys: 7.3 s, total: 28min 22s\nWall time: 28min 24s\n" ], [ "with open('drive/My Drive/yelp_sentiment_analysis/pickles/cv.pickle', 'wb') as f:\n pickle.dump(cv, f)", "_____no_output_____" ], [ "X_train_dtm = vect.transform(X_train)\nX_test_dtm = vect.transform(X_test)", "_____no_output_____" ], [ "with open('drive/My Drive/yelp_sentiment_analysis/pickles/X_train_dtm.pickle', 'wb') as f:\n pickle.dump(X_train_dtm, f)\n\nwith open('drive/My Drive/yelp_sentiment_analysis/pickles/X_test_dtm.pickle', 'wb') as f:\n pickle.dump(X_test_dtm, f)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cbc3e29051ca905aa48618cf44e192ee277675a3
379,907
ipynb
Jupyter Notebook
nlp_demo_riva/riva/nb_demo_speech_api.ipynb
t-triobox/gQuant
6ee3ba104ce4c6f17a5755e7782298902d125563
[ "Apache-2.0" ]
null
null
null
nlp_demo_riva/riva/nb_demo_speech_api.ipynb
t-triobox/gQuant
6ee3ba104ce4c6f17a5755e7782298902d125563
[ "Apache-2.0" ]
null
null
null
nlp_demo_riva/riva/nb_demo_speech_api.ipynb
t-triobox/gQuant
6ee3ba104ce4c6f17a5755e7782298902d125563
[ "Apache-2.0" ]
null
null
null
544.27937
177,644
0.854491
[ [ [ "# Copyright 2021 NVIDIA Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================", "_____no_output_____" ] ], [ [ "<img src=\"http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png\" style=\"width: 90px; float: right;\">\n\n# Python API Examples\n\nThis notebook walks through the basics of the Riva Speech and Language AI Services.\n\n## Overview\n\nNVIDIA Riva is a platform for building and deploying AI applications that fuse vision, speech and other sensors. It offers a complete workflow to build, train and deploy AI systems that can use visual cues such as gestures and gaze along with speech in context. With the Riva platform, you can:\n\n- Build speech and visual AI applications using pretrained NVIDIA Neural Modules ([NeMo](https://github.com/NVIDIA/NeMo)) available at NVIDIA GPU Cloud ([NGC](https://ngc.nvidia.com/catalog/models?orderBy=modifiedDESC&query=%20label%3A%22NeMo%2FPyTorch%22&quickFilter=models&filters=)).\n\n- Transfer learning: re-train your model on domain-specific data, with NVIDIA [NeMo](https://github.com/NVIDIA/NeMo). NeMo is a toolkit and platform that enables researchers to define and build new state-of-the-art speech and natural language processing models.\n\n- Optimize neural network performance and latency using NVIDIA TensorRT \n\n- Deploy AI applications with TensorRT Inference Server:\n - Support multiple network formats: ONNX, TensorRT plans, PyTorch TorchScript models.\n - Deployement on multiple platforms: from datacenter to edge servers, via Helm to K8s cluster, on NVIDIA Volta/Turing GPUs or Jetson Xavier platforms.\n\nSee the below video for a demo of Riva capabilities.", "_____no_output_____" ] ], [ [ "from IPython.display import IFrame\n\n# Riva Youtube demo video\nIFrame(\"https://www.youtube.com/embed/r264lBi1nMU?rel=0&amp;controls=0&amp;showinfo=0\", width=\"560\", height=\"315\", frameborder=\"0\", allowfullscreen=True)", "_____no_output_____" ] ], [ [ "For more detailed information on Riva, please refer to the [Riva developer documentation](https://developer.nvidia.com/).\n\n## Introduction the Riva Speech and Natural Languages services\n\nRiva offers a rich set of speech and natural language understanding services such as:\n\n- Automated speech recognition (ASR)\n- Text-to-Speech synthesis (TTS)\n- A collection of natural language understanding services such as named entity recognition (NER), punctuation, intent classification.", "_____no_output_____" ], [ "## Learning objectives\n\n- Understand how interact with Riva Speech and Natural Languages APIs, services and use cases\n\n## Requirements and setup\n\nTo execute this notebook, please follow the setup steps in [README](./README.md).\n\nWe first generate some required libraries.", "_____no_output_____" ] ], [ [ "import io\nimport librosa\nfrom time import time\nimport numpy as np\nimport IPython.display as ipd\nimport grpc\nimport requests\n\n# NLP proto\nimport riva_api.riva_nlp_pb2 as rnlp\nimport riva_api.riva_nlp_pb2_grpc as rnlp_srv\n\n# ASR proto\nimport riva_api.riva_asr_pb2 as rasr\nimport riva_api.riva_asr_pb2_grpc as rasr_srv\n\n# TTS proto\nimport riva_api.riva_tts_pb2 as rtts\nimport riva_api.riva_tts_pb2_grpc as rtts_srv\nimport riva_api.riva_audio_pb2 as ra", "_____no_output_____" ] ], [ [ "### Create Riva clients and connect to Riva Speech API server\n\nThe below URI assumes a local deployment of the Riva Speech API server on the default port. In case the server deployment is on a different host or via Helm chart on Kubernetes, the user should use an appropriate URI.", "_____no_output_____" ] ], [ [ "channel = grpc.insecure_channel('localhost:50051')\n\nriva_asr = rasr_srv.RivaSpeechRecognitionStub(channel)\nriva_nlp = rnlp_srv.RivaLanguageUnderstandingStub(channel)\nriva_tts = rtts_srv.RivaSpeechSynthesisStub(channel)", "_____no_output_____" ] ], [ [ "## Content\n1. [Offline ASR Example](#1)\n1. [Core NLP Service Examples](#2)\n1. [TTS Service Example](#3)\n1. [Riva NLP Service Examples](#4)\n", "_____no_output_____" ], [ "<a id=\"1\"></a>\n\n## 1. Offline ASR Example\n\nRiva Speech API supports `.wav` files in PCM format, `.alaw`, `.mulaw` and `.flac` formats with single channel in this release. ", "_____no_output_____" ] ], [ [ "# This example uses a .wav file with LINEAR_PCM encoding.\n# read in an audio file from local disk\npath = \"/work/wav/sample.wav\"\naudio, sr = librosa.core.load(path, sr=None)\nwith io.open(path, 'rb') as fh:\n content = fh.read()\nipd.Audio(path)", "_____no_output_____" ], [ "# Set up an offline/batch recognition request\nreq = rasr.RecognizeRequest()\nreq.audio = content # raw bytes\nreq.config.encoding = ra.AudioEncoding.LINEAR_PCM # Supports LINEAR_PCM, FLAC, MULAW and ALAW audio encodings\nreq.config.sample_rate_hertz = sr # Audio will be resampled if necessary\nreq.config.language_code = \"en-US\" # Ignored, will route to correct model in future release\nreq.config.max_alternatives = 1 # How many top-N hypotheses to return\nreq.config.enable_automatic_punctuation = True # Add punctuation when end of VAD detected\nreq.config.audio_channel_count = 1 # Mono channel\n\nresponse = riva_asr.Recognize(req)\nasr_best_transcript = response.results[0].alternatives[0].transcript\nprint(\"ASR Transcript:\", asr_best_transcript)\n\nprint(\"\\n\\nFull Response Message:\")\nprint(response)", "ASR Transcript: What is natural language processing? \n\n\nFull Response Message:\nresults {\n alternatives {\n transcript: \"What is natural language processing? \"\n confidence: -8.908161163330078\n }\n channel_tag: 1\n audio_processed: 6.400000095367432\n}\n\n" ] ], [ [ "<a id=\"2\"></a>\n\n## 2. Core NLP Service Examples\n\nAll of the Core NLP Services support batched requests. The maximum batch size,\nif any, of the underlying models is hidden from the end user and automatically\nbatched by the Riva and TRTIS servers.\n\nThe Core NLP API provides three methods currently:\n\n 1. TransformText - map an input string to an output string\n \n 2. ClassifyText - return a single label for the input string\n \n 3. ClassifyTokens - return a label per input token", "_____no_output_____" ] ], [ [ "# Use the TextTransform API to run the punctuation model\nreq = rnlp.TextTransformRequest()\nreq.model.model_name = \"riva_punctuation\"\nreq.text.append(\"add punctuation to this sentence\")\nreq.text.append(\"do you have any red nvidia shirts\")\nreq.text.append(\"i need one cpu four gpus and lots of memory \"\n \"for my new computer it's going to be very cool\")\n\nnlp_resp = riva_nlp.TransformText(req)\nprint(\"TransformText Output:\")\nprint(\"\\n\".join([f\" {x}\" for x in nlp_resp.text]))", "TransformText Output:\n Add punctuation to this sentence.\n Do you have any red Nvidia shirts?\n I need one cpu, four gpus and lots of memory for my new computer. It's going to be very cool.\n" ], [ "# Use the TokenClassification API to run a Named Entity Recognition (NER) model\n# Note: the model configuration of the NER model indicates that the labels are\n# in IOB format. Riva, subsequently, knows to:\n# a) ignore 'O' labels\n# b) Remove B- and I- prefixes from labels\n# c) Collapse sequences of B- I- ... I- tokens into a single token\n\nreq = rnlp.TokenClassRequest()\nreq.model.model_name = \"riva_ner\" # If you have deployed a custom model with the domain_name \n # parameter in ServiceMaker's `riva-build` command then you should use \n # \"riva_ner_<your_input_domain_name>\" where <your_input_domain_name>\n # is the name you provided to the domain_name parameter.\n\nreq.text.append(\"Jensen Huang is the CEO of NVIDIA Corporation, \"\n \"located in Santa Clara, California\")\nresp = riva_nlp.ClassifyTokens(req)\n\nprint(\"Named Entities:\")\nfor result in resp.results[0].results:\n print(f\" {result.token} ({result.label[0].class_name})\")", "Named Entities:\n jensen huang (PER)\n nvidia corporation (ORG)\n santa clara (LOC)\n california (LOC)\n" ], [ "# Submit a TextClassRequest for text classification.\n# Riva NLP comes with a default text_classification domain called \"domain_misty\" which consists of \n# 4 classes: meteorology, personality, weather and nomatch\n\nrequest = rnlp.TextClassRequest()\nrequest.model.model_name = \"riva_text_classification_domain\" # If you have deployed a custom model \n # with the `--domain_name` parameter in ServiceMaker's `riva-build` command \n # then you should use \"riva_text_classification_<your_input_domain_name>\"\n # where <your_input_domain_name> is the name you provided to the \n # domain_name parameter. In this case the domain_name is \"domain\"\nrequest.text.append(\"Is it going to snow in Burlington, Vermont tomorrow night?\")\nrequest.text.append(\"What causes rain?\")\nrequest.text.append(\"What is your favorite season?\")\nct_response = riva_nlp.ClassifyText(request)\nprint(ct_response)", "results {\n labels {\n class_name: \"weather\"\n score: 0.9975590109825134\n }\n}\nresults {\n labels {\n class_name: \"meteorology\"\n score: 0.984375\n }\n}\nresults {\n labels {\n class_name: \"personality\"\n score: 0.984375\n }\n}\n\n" ] ], [ [ "<a id=\"3\"></a>\n\n## 3. TTS Service Example\n\nSubsequent releases will include added features, including model registration to support multiple languages/voices with the same API. Support for resampling to alternative sampling rates will also be added.", "_____no_output_____" ] ], [ [ "req = rtts.SynthesizeSpeechRequest()\nreq.text = \"Is it recognize speech or wreck a nice beach?\"\nreq.language_code = \"en-US\" # currently required to be \"en-US\"\nreq.encoding = ra.AudioEncoding.LINEAR_PCM # Supports LINEAR_PCM, FLAC, MULAW and ALAW audio encodings\nreq.sample_rate_hz = 22050 # ignored, audio returned will be 22.05KHz\nreq.voice_name = \"ljspeech\" # ignored\n\nresp = riva_tts.Synthesize(req)\naudio_samples = np.frombuffer(resp.audio, dtype=np.float32)\nipd.Audio(audio_samples, rate=22050)", "_____no_output_____" ] ], [ [ "<a id=\"4\"></a>\n\n## 4. Riva NLP Service Examples\n\nThe NLP Service contains higher-level/more application-specific NLP APIs. This\nguide demonstrates how the AnalyzeIntent API can be used for queries across\nboth known and unknown domains.", "_____no_output_____" ] ], [ [ "# The AnalyzeIntent API can be used to query a Intent Slot classifier. The API can leverage a\n# text classification model to classify the domain of the input query and then route to the \n# appropriate intent slot model.\n\n# Lets first see an example where the domain is known. This skips execution of the domain classifier\n# and proceeds directly to the intent/slot model for the requested domain.\n\nreq = rnlp.AnalyzeIntentRequest()\nreq.query = \"How is the humidity in San Francisco?\"\nreq.options.domain = \"weather\" # The <domain_name> is appended to \"riva_intent_\" to look for a \n # model \"riva_intent_<domain_name>\". So in this e.g., the model \"riva_intent_weather\"\n # needs to be preloaded in riva server. If you would like to deploy your \n # custom Joint Intent and Slot model use the `--domain_name` parameter in \n # ServiceMaker's `riva-build intent_slot` command.\n\nresp = riva_nlp.AnalyzeIntent(req)\nprint(resp)", "intent {\n class_name: \"weather.humidity\"\n score: 0.983601987361908\n}\nslots {\n token: \"san francisco\"\n label {\n class_name: \"weatherplace\"\n score: 0.9822959899902344\n }\n}\nslots {\n token: \"?\"\n label {\n class_name: \"weatherplace\"\n score: 0.6474800109863281\n }\n}\ndomain_str: \"weather\"\ndomain {\n class_name: \"weather\"\n score: 1.0\n}\n\n" ], [ "# Below is an example where the input domain is not provided.\n\nreq = rnlp.AnalyzeIntentRequest()\nreq.query = \"Is it going to rain tomorrow?\"\n\n # The input query is first routed to the a text classification model called \"riva_text_classification_domain\"\n # The output class label of \"riva_text_classification_domain\" is appended to \"riva_intent_\"\n # to get the appropriate Intent Slot model to execute for the input query.\n # Note: The model \"riva_text_classification_domain\" needs to be loaded into Riva server and have the appropriate\n # class labels that would invoke the corresponding intent slot model.\n\nresp = riva_nlp.AnalyzeIntent(req)\nprint(resp)", "intent {\n class_name: \"weather.rainfall\"\n score: 0.9661880135536194\n}\nslots {\n token: \"tomorrow\"\n label {\n class_name: \"weatherforecastdaily\"\n score: 0.5325539708137512\n }\n}\nslots {\n token: \"?\"\n label {\n class_name: \"weatherplace\"\n score: 0.6895459890365601\n }\n}\ndomain_str: \"weather\"\ndomain {\n class_name: \"weather\"\n score: 0.9975590109825134\n}\n\n" ], [ "# Some weather Intent queries\nqueries = [\n \"Is it currently cloudy in Tokyo?\",\n \"What is the annual rainfall in Pune?\",\n \"What is the humidity going to be tomorrow?\"\n]\nfor q in queries:\n req = rnlp.AnalyzeIntentRequest()\n req.query = q\n start = time()\n resp = riva_nlp.AnalyzeIntent(req)\n\n print(f\"[{resp.intent.class_name}]\\t{req.query}\")", "[weather.cloudy]\tIs it currently cloudy in Tokyo?\n[weather.rainfall]\tWhat is the annual rainfall in Pune?\n[weather.humidity]\tWhat is the humidity going to be tomorrow?\n" ], [ "# Demonstrate latency by calling repeatedly.\n# NOTE: this is a synchronous API call, so request #N will not be sent until\n# response #N-1 is returned. This means latency and throughput will be negatively\n# impacted by long-distance & VPN connections\n\nreq = rnlp.TextTransformRequest()\nreq.text.append(\"i need one cpu four gpus and lots of memory for my new computer it's going to be very cool\")\n\niterations = 10\n# Demonstrate synchronous performance\nstart_time = time()\nfor _ in range(iterations):\n nlp_resp = riva_nlp.PunctuateText(req)\nend_time = time()\nprint(f\"Time to complete {iterations} synchronous requests: {end_time-start_time}\")\n\n# Demonstrate async performance\nstart_time = time()\nfutures = []\nfor _ in range(iterations):\n futures.append(riva_nlp.PunctuateText.future(req))\nfor f in futures:\n f.result()\nend_time = time()\nprint(f\"Time to complete {iterations} asynchronous requests: {end_time-start_time}\\n\")\n", "Time to complete 10 synchronous requests: 0.05957150459289551\nTime to complete 10 asynchronous requests: 0.020952463150024414\n\n" ] ], [ [ "<a id=\"5\"></a>\n\n## 5. Go deeper into Riva capabilities\n\nNow that you have a basic introduction to the Riva APIs, you may like to try out:\n\n### 1. Sample apps:\n\nRiva comes with various sample apps as a demonstration for how to use the APIs to build interesting applications such as a [chatbot](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/weather.html), a domain specific speech recognition or [keyword (entity) recognition system](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/callcenter.html), or simply how Riva allows scaling out for handling massive amount of requests at the same time. ([SpeechSquad)](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/speechsquad.html) \nHave a look at the Sample Application section in the [Riva developer documentation](https://developer.nvidia.com/) for all the sample apps.\n\n\n### 2. Finetune your own domain specific Speech or NLP model and deploy into Riva.\n\nTrain the latest state-of-the-art speech and natural language processing models on your own data using [NeMo](https://github.com/NVIDIA/NeMo) or [Transfer Learning ToolKit](https://developer.nvidia.com/transfer-learning-toolkit) and deploy them on Riva using the [Riva ServiceMaker tool](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/model-servicemaker.html).\n\n\n### 3. Further resources:\n\nExplore the details of each of the APIs and their functionalities in the [docs](https://docs.nvidia.com/deeplearning/jarvis/user-guide/docs/protobuf-api/protobuf-api-root.html).", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cbc3fa6505ed9878a9f981919ae0e57f843686b6
521,878
ipynb
Jupyter Notebook
Segmentacao de instancia - toras.ipynb
lucdem/identificacao-toras
d3a7bdb9476620c04be19833bf4c1d7bb533e893
[ "MIT" ]
null
null
null
Segmentacao de instancia - toras.ipynb
lucdem/identificacao-toras
d3a7bdb9476620c04be19833bf4c1d7bb533e893
[ "MIT" ]
null
null
null
Segmentacao de instancia - toras.ipynb
lucdem/identificacao-toras
d3a7bdb9476620c04be19833bf4c1d7bb533e893
[ "MIT" ]
null
null
null
521,878
521,878
0.961108
[ [ [ "!pip install pyyaml==5.1\n\nimport torch\nTORCH_VERSION = \".\".join(torch.__version__.split(\".\")[:2])\nCUDA_VERSION = torch.__version__.split(\"+\")[-1]\nprint(\"torch: \", TORCH_VERSION, \"; cuda: \", CUDA_VERSION)\n# Install detectron2 that matches the above pytorch version\n# See https://detectron2.readthedocs.io/tutorials/install.html for instructions\n!pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/$CUDA_VERSION/torch$TORCH_VERSION/index.html\n# If there is not yet a detectron2 release that matches the given torch + CUDA version, you need to install a different pytorch.\n\n# exit(0) # After installation, you may need to \"restart runtime\" in Colab. This line can also restart runtime", "_____no_output_____" ], [ "!nvidia-smi\n!nvcc --version", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/gdrive')\nproject_path = '/content/gdrive/MyDrive/madeira'\nimages_path = f'{project_path}'", "_____no_output_____" ], [ "# Some basic setup:\n# Setup detectron2 logger\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\n# import some common libraries\nimport numpy as np\nimport os, json, cv2, random\nimport datetime\nfrom google.colab.patches import cv2_imshow\n\n# import some common detectron2 utilities\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nfrom detectron2.engine import DefaultTrainer\nfrom detectron2.config import get_cfg\nfrom detectron2.checkpoint import DetectionCheckpointer", "_____no_output_____" ], [ "from detectron2.data.datasets import register_coco_instances\n\nregister_coco_instances(\"train_coco\", {}, f\"{images_path}/train_coco/annotations.json\", f'{images_path}/train_coco')\ntrain_dataset = DatasetCatalog.get(\"train_coco\")\ntrain_metadata = MetadataCatalog.get(\"train_coco\")\n\nregister_coco_instances(\"test_coco\", {}, f\"{images_path}/test_coco/annotations.json\", f'{images_path}/test_coco')\ntest_dataset = DatasetCatalog.get(\"test_coco\")\ntest_metadata = MetadataCatalog.get(\"test_coco\")", "_____no_output_____" ], [ "from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader, build_detection_train_loader\nfrom detectron2.data import detection_utils as utils\nimport detectron2.data.transforms as T\nimport copy\n\ndef custom_mapper(dataset_dict):\n dataset_dict = copy.deepcopy(dataset_dict)\n image = utils.read_image(dataset_dict[\"file_name\"], format=\"BGR\")\n transform_list = [\n T.RandomBrightness(0.9, 1.1),\n T.RandomContrast(0.9, 1.1),\n T.RandomSaturation(0.9, 1.1),\n T.RandomFlip(prob=0.5, horizontal=False, vertical=True),\n T.RandomFlip(prob=0.5, horizontal=True, vertical=False),\n T.RandomCrop(\"relative\", (0.4, 0.4))\n ]\n image, transforms = T.apply_transform_gens(transform_list, image)\n dataset_dict[\"image\"] = torch.as_tensor(image.transpose(2, 0, 1).astype(\"float32\"))\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(annos, image.shape[:2])\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n return dataset_dict\n\n\nclass AugTrainer(DefaultTrainer):\n \n @classmethod\n def build_train_loader(cls, cfg):\n return build_detection_train_loader(cfg, mapper=custom_mapper)", "_____no_output_____" ], [ "# If first training\n\ncfg = get_cfg()\n\ncfg.OUTPUT_DIR = f'{project_path}/model/best'\nos.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n\ncfg.merge_from_file(model_zoo.get_config_file('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))\ncfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\n\ncfg.DATASETS.TRAIN = (\"train_coco\",)\ncfg.DATASETS.TEST = ()\ncfg.DATALOADER.NUM_WORKERS = 2\ncfg.SOLVER.IMS_PER_BATCH = 6\ncfg.SOLVER.BASE_LR = 0.002\ncfg.SOLVER.MAX_ITER = (300)\ncfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 2\ncfg.MODEL.ROI_HEADS.NUM_CLASSES = 2\ncfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)\n\ntrainer = AugTrainer(cfg)\ncheckpointer = DetectionCheckpointer(trainer.model, save_dir=cfg.OUTPUT_DIR)", "_____no_output_____" ], [ "# Train\n\nimport os\n\ncfg.SOLVER.MAX_ITER = (300)\ntrainer.resume_or_load(resume=False)\ntrainer.resume_or_load()\ntrainer.train()", "_____no_output_____" ], [ "from detectron2.utils.visualizer import ColorMode\n\ncfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\ncfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\npredictor = DefaultPredictor(cfg)\n#im = cv2.imread(f'{images_path}/de frente.jpeg')\nim = cv2.imread(f'{images_path}/test_coco/JPEGImages/1_3.jpg')\nprint(test_metadata.thing_classes)\noutputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format\nv = Visualizer(im[:, :, ::-1],\n metadata=train_metadata, \n scale=0.5, \n instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models\n)\nout = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\ncv2_imshow(out.get_image()[:, :, ::-1])", "['_background_', 'base']\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc3fc6392646ec1774b891ff57a392f0e73628c
75,041
ipynb
Jupyter Notebook
Homework3.ipynb
merveersahin/ml_zoomcamp_homework
3cf7c51d76cf7ec36883869a4e32072fa3c8bb85
[ "MIT" ]
null
null
null
Homework3.ipynb
merveersahin/ml_zoomcamp_homework
3cf7c51d76cf7ec36883869a4e32072fa3c8bb85
[ "MIT" ]
null
null
null
Homework3.ipynb
merveersahin/ml_zoomcamp_homework
3cf7c51d76cf7ec36883869a4e32072fa3c8bb85
[ "MIT" ]
null
null
null
29.267161
249
0.388881
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "#Loading the dataset:\n\ndata = pd.read_csv(\"AB_NYC_2019.csv\")\ndata.head()", "_____no_output_____" ] ], [ [ "### Features\n\nFor the rest of the homework, you'll need to use the features from the previous homework with additional two `'neighbourhood_group'` and `'room_type'`. So the whole feature set will be set as follows:\n\n* `'neighbourhood_group'`,\n* `'room_type'`,\n* `'latitude'`,\n* `'longitude'`,\n* `'price'`,\n* `'minimum_nights'`,\n* `'number_of_reviews'`,\n* `'reviews_per_month'`,\n* `'calculated_host_listings_count'`,\n* `'availability_365'`\n\nSelect only them and fill in the missing values with 0.\n", "_____no_output_____" ] ], [ [ "new_data = data[['neighbourhood_group','room_type','latitude', 'longitude', 'price', 'minimum_nights','number_of_reviews', 'reviews_per_month', 'calculated_host_listings_count','availability_365']]\nnew_data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 48895 entries, 0 to 48894\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 neighbourhood_group 48895 non-null object \n 1 room_type 48895 non-null object \n 2 latitude 48895 non-null float64\n 3 longitude 48895 non-null float64\n 4 price 48895 non-null int64 \n 5 minimum_nights 48895 non-null int64 \n 6 number_of_reviews 48895 non-null int64 \n 7 reviews_per_month 38843 non-null float64\n 8 calculated_host_listings_count 48895 non-null int64 \n 9 availability_365 48895 non-null int64 \ndtypes: float64(3), int64(5), object(2)\nmemory usage: 3.4+ MB\n" ], [ "new_data.isnull().sum()", "_____no_output_____" ], [ "new_data['reviews_per_month']= new_data['reviews_per_month'].fillna(0)\nnew_data.isnull().sum()", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "### Question 1\n\nWhat is the most frequent observation (mode) for the column `'neighbourhood_group'`?\n", "_____no_output_____" ] ], [ [ "print('The most frequent observation for the column neighbourhood_group is', new_data['neighbourhood_group'].mode())", "The most frequent observation for the column neighbourhood_group is 0 Manhattan\ndtype: object\n" ] ], [ [ "### Split the data\n\n* Split your data in train/val/test sets, with 60%/20%/20% distribution.\n* Use Scikit-Learn for that (the `train_test_split` function) and set the seed to 42.\n* Make sure that the target value ('price') is not in your dataframe.\n", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX = new_data.drop(['price'], axis=1)\ny = new_data[\"price\"]\n\n\nX_full_train, X_test, y_full_train, y_test=train_test_split(X, y, test_size = 0.2, random_state=42) #Data is divided only 2 parts with this code(80% for train, 20% for test)\nX_train, X_val, y_train, y_val=train_test_split(X_full_train, y_full_train, test_size = 0.2, random_state=42) #Now, train data is divided 2 parts to create validation set\n", "_____no_output_____" ], [ "X_train = X_train.reset_index(drop=True)\nX_val = X_val.reset_index(drop=True)\nX_test = X_val.reset_index(drop=True)", "_____no_output_____" ] ], [ [ "### Question 2\n\n* Create the [correlation matrix](https://www.google.com/search?q=correlation+matrix) for the numerical features of your train dataset.\n * In a correlation matrix, you compute the correlation coefficient between every pair of features in the dataset.\n* What are the two features that have the biggest correlation in this dataset?", "_____no_output_____" ] ], [ [ "X_train.corr()", "_____no_output_____" ] ], [ [ "The *number of reviews* and *reviews_per_month* has the highest correlation score as 0.59.", "_____no_output_____" ], [ "### Make price binary\n\n* We need to turn the price variable from numeric into binary.\n* Let's create a variable `above_average` which is `1` if the price is above (or equal to) `152`.", "_____no_output_____" ] ], [ [ "y_train =pd.DataFrame(y_train)\ny_train1 = y_train #not to lose original train set with price\ny_train1['above_average'] = y_train1['price'] >= 152\ny_train1", "_____no_output_____" ], [ "y_train1['above_average'] = y_train1.above_average.astype(int)\ny_train1", "_____no_output_____" ], [ "y_val =pd.DataFrame(y_val)\ny_val1 = y_val\ny_val1['above_average'] = y_val1['price'] >= 152\ny_val1['above_average'] = y_val1.above_average.astype(int)\ny_val1", "_____no_output_____" ], [ "y_test =pd.DataFrame(y_test)\ny_test1 = y_test\ny_test1['above_average'] = y_test1['price'] >= 152\ny_test1['above_average'] = y_test1.above_average.astype(int)\ny_test1", "_____no_output_____" ] ], [ [ "### Question 3\n\n* Calculate the mutual information score with the (binarized) price for the two categorical variables that we have. Use the training set only.\n* Which of these two variables has bigger score?\n* Round it to 2 decimal digits using `round(score, 2)`", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mutual_info_score", "_____no_output_____" ], [ "round(mutual_info_score(X_train.room_type, y_train1.above_average),2)", "_____no_output_____" ], [ "round(mutual_info_score(X_train.neighbourhood_group, y_train1.above_average),2)", "_____no_output_____" ] ], [ [ "Room type has the bigger mutual score with binarized price variable.", "_____no_output_____" ], [ "### Question 4\n\n* Now let's train a logistic regression\n* Remember that we have two categorical variables in the data. Include them using one-hot encoding.\n* Fit the model on the training dataset.\n * To make sure the results are reproducible across different versions of Scikit-Learn, fit the model with these parameters:\n * `model = LogisticRegression(solver='liblinear', C=1.0, random_state=42)`\n* Calculate the accuracy on the validation dataset and rount it to 2 decimal digits.", "_____no_output_____" ] ], [ [ "new_data.columns", "_____no_output_____" ], [ "categorical = ['neighbourhood_group', 'room_type']\nnumerical = [ 'latitude', 'longitude',\n 'minimum_nights', 'number_of_reviews', 'reviews_per_month',\n 'calculated_host_listings_count', 'availability_365']", "_____no_output_____" ], [ "#ONE HOT ENCODING\n\nfrom sklearn.feature_extraction import DictVectorizer\n\ntrain_dict = X_train[categorical + numerical].to_dict(orient='records')", "_____no_output_____" ], [ "train_dict[0]", "_____no_output_____" ], [ "dv = DictVectorizer(sparse=False)\ndv.fit(train_dict)", "_____no_output_____" ], [ "X_train = dv.transform(train_dict)\nprint(X_train.shape)\nprint(X_train)", "(31292, 15)\n[[ 0. 1. 40.71754 ... 1. 0. 0. ]\n [326. 1. 40.78784 ... 1. 0. 0. ]\n [324. 1. 40.7358 ... 1. 0. 0. ]\n ...\n [ 88. 1. 40.79994 ... 0. 1. 0. ]\n [ 0. 1. 40.69585 ... 0. 1. 0. ]\n [281. 2. 40.64438 ... 1. 0. 0. ]]\n" ], [ "dv.get_feature_names()", "_____no_output_____" ], [ "y_train1 = y_train1[['above_average']]\ny_train1", "_____no_output_____" ], [ "#TRAINING LOGISTIC REGRESSION\n\nfrom sklearn.linear_model import LogisticRegression\n\nmodel = LogisticRegression(solver='liblinear', C=1.0, random_state=42)\nmodel.fit(X_train, y_train1)", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:72: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n return f(**kwargs)\n" ], [ "val_dict = X_val[categorical + numerical].to_dict(orient='records')\ndv = DictVectorizer(sparse=False)\ndv.fit(val_dict)\n\nX_val = dv.transform(val_dict)\nX_val.shape", "_____no_output_____" ], [ "print(y_val)\ny_val1 = y_val[['above_average']]", " price above_average\n27408 65 0\n7741 89 0\n4771 200 1\n1719 120 0\n19153 748 1\n... ... ...\n44145 75 0\n17550 100 0\n12622 48 0\n9644 386 1\n29925 50 0\n\n[7824 rows x 2 columns]\n" ], [ "from sklearn.metrics import accuracy_score\ny_pred = model.predict(X_val)\nround(accuracy_score(y_val1,y_pred),2)", "_____no_output_____" ] ], [ [ "### Question 5\n\n* We have 9 features: 7 numerical features and 2 categorical.\n* Let's find the least useful one using the *feature elimination* technique.\n* Train a model with all these features (using the same parameters as in Q4).\n* Now exclude each feature from this set and train a model without it. Record the accuracy for each model.\n* For each feature, calculate the difference between the original accuracy and the accuracy without the feature. \n* Which of following feature has the smallest difference? \n * `neighbourhood_group`\n * `room_type` \n * `number_of_reviews`\n * `reviews_per_month`\n\n> **note**: the difference doesn't have to be positive\n", "_____no_output_____" ] ], [ [ "#Model without neighbourhood_group\nmodel1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)\nmodel1.fit(np.delete(X_train, [5,6,7,8,9], 1), y_train1)", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:72: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n return f(**kwargs)\n" ], [ "y_val1 = y_val1[['above_average']]", "_____no_output_____" ], [ "y_pred1 = model1.predict(np.delete(X_val, [5,6,7,8,9], 1))\nround(accuracy_score(y_val1,y_pred1),2)", "_____no_output_____" ], [ "#Model without room_type\nmodel1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)\nmodel1.fit(np.delete(X_train, [12,13,14], 1), y_train1)\n\ny_pred1 = model1.predict(np.delete(X_val, [12,13,14], 1))\nround(accuracy_score(y_val1,y_pred1),2)", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:72: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n return f(**kwargs)\n" ], [ "#Model without number_of_reviews\nmodel1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)\nmodel1.fit(np.delete(X_train, 10, 1), y_train1)\n\ny_pred1 = model1.predict(np.delete(X_val, 10, 1))\nround(accuracy_score(y_val1,y_pred1),2)", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:72: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n return f(**kwargs)\n" ], [ "#Model without reviews_per_month\nmodel1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)\nmodel1.fit(np.delete(X_train, 11, 1), y_train1)\n\ny_pred1 = model1.predict(np.delete(X_val, 11, 1))\nround(accuracy_score(y_val1,y_pred1),2)", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:72: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n return f(**kwargs)\n" ] ], [ [ "number_of_reviews and reviews_per_month does not change the global accuracy.", "_____no_output_____" ], [ "### Question 6\n\n* For this question, we'll see how to use a linear regression model from Scikit-Learn\n* We'll need to use the original column `'price'`. Apply the logarithmic transformation to this column.\n* Fit the Ridge regression model on the training data.\n* This model has a parameter `alpha`. Let's try the following values: `[0, 0.01, 0.1, 1, 10]`\n* Which of these alphas leads to the best RMSE on the validation set? Round your RMSE scores to 3 decimal digits.\n\nIf there are multiple options, select the smallest `alpha`.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Ridge", "_____no_output_____" ], [ "def rmse(y, y_pred):\n error = y - y_pred\n se = error ** 2\n mse = se.mean()\n return np.sqrt(mse)", "_____no_output_____" ], [ "y_train = pd.DataFrame(y_train)\ny_train", "_____no_output_____" ], [ "#Log Transformation on Price\ny_train = np.log(y_train['price'])\ny_train", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\pandas\\core\\arraylike.py:364: RuntimeWarning: divide by zero encountered in log\n result = getattr(ufunc, method)(*inputs, **kwargs)\n" ], [ "y_train=pd.DataFrame(y_train)\ny_train ", "_____no_output_____" ], [ "y_val = np.log(y_val['price'])\ny_val=pd.DataFrame(y_val)\ny_val ", "_____no_output_____" ], [ "y_test = np.log(y_test['price'])\ny_test=pd.DataFrame(y_test)\ny_test", "C:\\Users\\merve\\Anaconda3\\lib\\site-packages\\pandas\\core\\arraylike.py:364: RuntimeWarning: divide by zero encountered in log\n result = getattr(ufunc, method)(*inputs, **kwargs)\n" ], [ "X_train = pd.DataFrame(X_train)\nX_train", "_____no_output_____" ], [ "#Ridge Regression\nfrom sklearn.linear_model import Ridge\n\nfor a in [0, 0.01, 0.1, 1, 10]:\n clf = Ridge(alpha=a)\n clf.fit(X_train, y_train)\n \n y_pred = clf.predict(X_val)\n rmse_score = rmse(y_val, y_pred)\n \n print('RMSE for',a,'is', rmse_score)", "RMSE for 0 is 210.888191748839\nRMSE for 0.01 is 210.88681941505604\nRMSE for 0.1 is 210.8868613978482\nRMSE for 1 is 210.88796928676265\nRMSE for 10 is 210.9358645418637\n" ] ], [ [ "All RMSEs are very close to each other, however the minimum one belongs to alpha=0.01", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cbc409bbc77dc2b11ce0c1fca4e3aea4033f3ac2
101,962
ipynb
Jupyter Notebook
mnist-mlp/mnist_mlp.ipynb
anoff/AIND-CNN
0b4f5b2746ab45d3f574745b2e421686649ab872
[ "MIT" ]
null
null
null
mnist-mlp/mnist_mlp.ipynb
anoff/AIND-CNN
0b4f5b2746ab45d3f574745b2e421686649ab872
[ "MIT" ]
null
null
null
mnist-mlp/mnist_mlp.ipynb
anoff/AIND-CNN
0b4f5b2746ab45d3f574745b2e421686649ab872
[ "MIT" ]
null
null
null
199.534247
78,914
0.893725
[ [ [ "# Artificial Intelligence Nanodegree\n\n## Convolutional Neural Networks\n\n---\n\nIn this notebook, we train an MLP to classify images from the MNIST database.\n\n### 1. Load MNIST Database", "_____no_output_____" ] ], [ [ "from keras.datasets import mnist\n\n# use Keras to import pre-shuffled MNIST database\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nprint(\"The MNIST database has a training set of %d examples.\" % len(X_train))\nprint(\"The MNIST database has a test set of %d examples.\" % len(X_test))", "The MNIST database has a training set of 60000 examples.\nThe MNIST database has a test set of 10000 examples.\n" ] ], [ [ "### 2. Visualize the First Six Training Images", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nimport matplotlib.cm as cm\nimport numpy as np\n\n# plot first six training images\nfig = plt.figure(figsize=(20,20))\nfor i in range(6):\n ax = fig.add_subplot(1, 6, i+1, xticks=[], yticks=[])\n ax.imshow(X_train[i], cmap='gray')\n ax.set_title(str(y_train[i]))", "_____no_output_____" ] ], [ [ "### 3. View an Image in More Detail", "_____no_output_____" ] ], [ [ "def visualize_input(img, ax):\n ax.imshow(img, cmap='gray')\n width, height = img.shape\n thresh = img.max()/2.5\n for x in range(width):\n for y in range(height):\n ax.annotate(str(round(img[x][y],2)), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y]<thresh else 'black')\n\nfig = plt.figure(figsize = (12,12)) \nax = fig.add_subplot(111)\nvisualize_input(X_train[0], ax)", "_____no_output_____" ] ], [ [ "### 4. Rescale the Images by Dividing Every Pixel in Every Image by 255", "_____no_output_____" ] ], [ [ "# rescale [0,255] --> [0,1]\nX_train = X_train.astype('float32')/255\nX_test = X_test.astype('float32')/255 ", "_____no_output_____" ] ], [ [ "### 5. Encode Categorical Integer Labels Using a One-Hot Scheme", "_____no_output_____" ] ], [ [ "from keras.utils import np_utils\n\n# print first ten (integer-valued) training labels\nprint('Integer-valued labels:')\nprint(y_train[:10])\n\n# one-hot encode the labels\ny_train = np_utils.to_categorical(y_train, 10)\ny_test = np_utils.to_categorical(y_test, 10)\n\n# print first ten (one-hot) training labels\nprint('One-hot labels:')\nprint(y_train[:10])", "Integer-valued labels:\n[5 0 4 1 9 2 1 3 1 4]\nOne-hot labels:\n[[ 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]\n [ 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]\n [ 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]\n [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]]\n" ] ], [ [ "### 6. Define the Model Architecture", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\n\n# define the model\nmodel = Sequential()\nmodel.add(Flatten(input_shape=X_train.shape[1:]))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(10, activation='softmax'))\n\n# summarize the model\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_13 (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense_35 (Dense) (None, 512) 401920 \n_________________________________________________________________\ndropout_21 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_36 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndropout_22 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_37 (Dense) (None, 10) 5130 \n=================================================================\nTotal params: 669,706.0\nTrainable params: 669,706.0\nNon-trainable params: 0.0\n_________________________________________________________________\n" ] ], [ [ "### 7. Compile the Model", "_____no_output_____" ] ], [ [ "# compile the model\nmodel.compile(loss='categorical_crossentropy', optimizer='sgd', \n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### 8. Calculate the Classification Accuracy on the Test Set (Before Training)", "_____no_output_____" ] ], [ [ "# evaluate test accuracy\nscore = model.evaluate(X_test, y_test, verbose=0)\naccuracy = 100*score[1]\n\n# print test accuracy\nprint('Test accuracy: %.4f%%' % accuracy)", "Test accuracy: 97.6700%\n" ] ], [ [ "### 9. Train the Model", "_____no_output_____" ] ], [ [ "from keras.callbacks import ModelCheckpoint \nimport time\n\nstart = time.time()\n\n# train the model\ncheckpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', \n verbose=1, save_best_only=True)\nhist = model.fit(X_train, y_train, batch_size=512, epochs=10,\n validation_split=0.2, callbacks=[checkpointer],\n verbose=2, shuffle=True)\n\ntraining_duration = time.time() - start\nprint(training_duration)", "Train on 48000 samples, validate on 12000 samples\nEpoch 1/10\nEpoch 00000: val_loss improved from inf to 0.06965, saving model to mnist.model.best.hdf5\n7s - loss: 0.0087 - acc: 0.9974 - val_loss: 0.0697 - val_acc: 0.9824\nEpoch 2/10\nEpoch 00001: val_loss did not improve\n7s - loss: 0.0084 - acc: 0.9974 - val_loss: 0.0697 - val_acc: 0.9825\nEpoch 3/10\nEpoch 00002: val_loss did not improve\n9s - loss: 0.0082 - acc: 0.9975 - val_loss: 0.0697 - val_acc: 0.9824\nEpoch 4/10\nEpoch 00003: val_loss did not improve\n9s - loss: 0.0081 - acc: 0.9975 - val_loss: 0.0697 - val_acc: 0.9824\nEpoch 5/10\nEpoch 00004: val_loss did not improve\n8s - loss: 0.0082 - acc: 0.9973 - val_loss: 0.0697 - val_acc: 0.9824\nEpoch 6/10\nEpoch 00005: val_loss did not improve\n9s - loss: 0.0083 - acc: 0.9972 - val_loss: 0.0697 - val_acc: 0.9825\nEpoch 7/10\nEpoch 00006: val_loss did not improve\n10s - loss: 0.0082 - acc: 0.9974 - val_loss: 0.0698 - val_acc: 0.9824\nEpoch 8/10\nEpoch 00007: val_loss did not improve\n7s - loss: 0.0081 - acc: 0.9978 - val_loss: 0.0698 - val_acc: 0.9824\nEpoch 9/10\nEpoch 00008: val_loss did not improve\n11s - loss: 0.0071 - acc: 0.9978 - val_loss: 0.0698 - val_acc: 0.9825\nEpoch 10/10\nEpoch 00009: val_loss did not improve\n7s - loss: 0.0084 - acc: 0.9974 - val_loss: 0.0698 - val_acc: 0.9824\n88.10980010032654\n" ] ], [ [ "### 10. Load the Model with the Best Classification Accuracy on the Validation Set", "_____no_output_____" ] ], [ [ "# load the weights that yielded the best validation accuracy\nmodel.load_weights('mnist.model.best.hdf5')", "_____no_output_____" ] ], [ [ "### 11. Calculate the Classification Accuracy on the Test Set", "_____no_output_____" ] ], [ [ "# evaluate test accuracy\nscore = model.evaluate(X_test, y_test, verbose=0)\naccuracy = 100*score[1]\n\n# print test accuracy\nprint('Test accuracy: %.4f%%' % accuracy)", "Test accuracy: 98.4500%\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc40fe8a37483aaec308d4b31402646ad23ef00
171,333
ipynb
Jupyter Notebook
notebooks/imdb_binary_sentiment_classification.ipynb
victor7246/consNLP
b243bb3ed6edc4b68a4cdbaf5503107a1864ad7e
[ "MIT" ]
2
2020-08-11T12:46:48.000Z
2021-01-08T16:18:26.000Z
notebooks/imdb_binary_sentiment_classification.ipynb
victor7246/consNLP
b243bb3ed6edc4b68a4cdbaf5503107a1864ad7e
[ "MIT" ]
null
null
null
notebooks/imdb_binary_sentiment_classification.ipynb
victor7246/consNLP
b243bb3ed6edc4b68a4cdbaf5503107a1864ad7e
[ "MIT" ]
null
null
null
59.822975
372
0.518155
[ [ [ "from __future__ import absolute_import\n\nimport sys\nimport os\n\ntry:\n from dotenv import find_dotenv, load_dotenv\nexcept:\n pass\n\nimport argparse\n\ntry:\n sys.path.append(os.path.join(os.path.dirname(__file__), '../src'))\nexcept:\n sys.path.append(os.path.join(os.getcwd(), '../src'))\n \nimport pandas as pd\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torchcontrib.optim import SWA\nfrom torch.optim import Adam, SGD \nfrom torch.optim.lr_scheduler import CosineAnnealingLR, ReduceLROnPlateau, CyclicLR, \\\n CosineAnnealingWarmRestarts\n\nfrom consNLP.data import load_data, data_utils, fetch_dataset\nfrom consNLP.models import transformer_models, activations, layers, losses, scorers\nfrom consNLP.visualization import visualize\nfrom consNLP.trainer.trainer import BasicTrainer, PLTrainer, test_pl_trainer\nfrom consNLP.trainer.trainer_utils import set_seed, _has_apex, _torch_lightning_available, _has_wandb, _torch_gpu_available, _num_gpus, _torch_tpu_available\nfrom consNLP.preprocessing.custom_tokenizer import BERTweetTokenizer\n\nif _has_apex:\n #from torch.cuda import amp\n from apex import amp\n\nif _torch_tpu_available:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\n\nif _has_wandb:\n import wandb\n try:\n load_dotenv(find_dotenv())\n wandb.login(key=os.environ['WANDB_API_KEY'])\n except:\n _has_wandb = False\n\nif _torch_lightning_available:\n import pytorch_lightning as pl\n from pytorch_lightning import Trainer, seed_everything\n from pytorch_lightning.loggers import WandbLogger\n from pytorch_lightning.metrics.metric import NumpyMetric\n from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, Callback\n\nimport tokenizers\nfrom transformers import AutoModel, AutoTokenizer, AdamW, get_linear_schedule_with_warmup, AutoConfig", "I0806 14:58:45.963731 4539198912 file_utils.py:41] PyTorch version 1.5.0 available.\nI0806 14:58:53.984734 4539198912 file_utils.py:57] TensorFlow version 2.2.0-rc3 available.\nI0806 14:58:56.724100 4539198912 modeling.py:230] Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .\nwandb: WARNING If you're specifying your api key in code, ensure this code is not shared publically.\nwandb: WARNING Consider setting the WANDB_API_KEY environment variable, or running `wandb login` from the command line.\nwandb: WARNING Calling wandb.login() without arguments from jupyter should prompt you for an api key.\nwandb: Appending key for api.wandb.ai to your netrc file: /Users/victor/.netrc\n/Users/victor/anaconda3/lib/python3.7/site-packages/scipy/sparse/sparsetools.py:21: DeprecationWarning: `scipy.sparse.sparsetools` is deprecated!\nscipy.sparse.sparsetools is a private module for scipy.sparse, and should not be used.\n _deprecated()\nI0806 14:58:59.111385 4539198912 textcleaner.py:37] 'pattern' package not found; tag filters are not available for English\nW0806 14:58:59.377578 4539198912 deprecation.py:323] From /Users/victor/anaconda3/lib/python3.7/site-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.\nInstructions for updating:\nnon-resource variables are not supported in the long term\nwandb: WARNING If you're specifying your api key in code, ensure this code is not shared publically.\nwandb: WARNING Consider setting the WANDB_API_KEY environment variable, or running `wandb login` from the command line.\nwandb: WARNING Calling wandb.login() without arguments from jupyter should prompt you for an api key.\nwandb: Appending key for api.wandb.ai to your netrc file: /Users/victor/.netrc\n" ], [ "load_dotenv(find_dotenv())", "_____no_output_____" ], [ "fetch_dataset(project_dir='../',download_from_kaggle=True,\\\n kaggle_dataset='lakshmi25npathi/imdb-dataset-of-50k-movie-reviews')", "I0806 14:59:06.972804 4539198912 fetch_dataset.py:16] making final data set from raw data\nI0806 14:59:06.973808 4539198912 fetch_dataset.py:21] project directory ../\nI0806 14:59:06.974637 4539198912 fetch_dataset.py:30] output path ../data/raw\nI0806 14:59:13.335819 4539198912 fetch_dataset.py:95] download complete\n" ], [ "parser = argparse.ArgumentParser(prog='Torch trainer function',conflict_handler='resolve')\n\nparser.add_argument('--train_data', type=str, default='../data/raw/IMDB Dataset.csv', required=False,\n help='train data')\nparser.add_argument('--val_data', type=str, default='', required=False,\n help='validation data')\nparser.add_argument('--test_data', type=str, default=None, required=False,\n help='test data')\n\nparser.add_argument('--task_type', type=str, default='binary_sequence_classification', required=False,\n help='type of task')\n\nparser.add_argument('--transformer_model_pretrained_path', type=str, default='roberta-base', required=False,\n help='transformer model pretrained path or huggingface model name')\nparser.add_argument('--transformer_config_path', type=str, default='roberta-base', required=False,\n help='transformer config file path or huggingface model name')\nparser.add_argument('--transformer_tokenizer_path', type=str, default='roberta-base', required=False,\n help='transformer tokenizer file path or huggingface model name')\nparser.add_argument('--bpe_vocab_path', type=str, default='', required=False,\n help='bytepairencoding vocab file path')\nparser.add_argument('--bpe_merges_path', type=str, default='', required=False,\n help='bytepairencoding merges file path')\nparser.add_argument('--berttweettokenizer_path', type=str, default='', required=False,\n help='BERTweet tokenizer path')\n\nparser.add_argument('--max_text_len', type=int, default=100, required=False,\n help='maximum length of text')\nparser.add_argument('--epochs', type=int, default=5, required=False,\n help='number of epochs')\nparser.add_argument('--lr', type=float, default=.00003, required=False,\n help='learning rate')\nparser.add_argument('--loss_function', type=str, default='bcelogit', required=False,\n help='loss function')\nparser.add_argument('--metric', type=str, default='f1', required=False,\n help='scorer metric')\n\nparser.add_argument('--use_lightning_trainer', type=bool, default=False, required=False,\n help='if lightning trainer needs to be used')\nparser.add_argument('--use_torch_trainer', type=bool, default=True, required=False,\n help='if custom torch trainer needs to be used')\nparser.add_argument('--use_apex', type=bool, default=False, required=False,\n help='if apex needs to be used')\nparser.add_argument('--use_gpu', type=bool, default=False, required=False,\n help='GPU mode')\nparser.add_argument('--use_TPU', type=bool, default=False, required=False,\n help='TPU mode')\nparser.add_argument('--num_gpus', type=int, default=0, required=False,\n help='Number of GPUs')\nparser.add_argument('--num_tpus', type=int, default=0, required=False,\n help='Number of TPUs')\n\nparser.add_argument('--train_batch_size', type=int, default=16, required=False,\n help='train batch size')\nparser.add_argument('--eval_batch_size', type=int, default=16, required=False,\n help='eval batch size')\n\nparser.add_argument('--model_save_path', type=str, default='../models/sentiment_classification/', required=False,\n help='seed')\n\nparser.add_argument('--wandb_logging', type=bool, default=False, required=False,\n help='wandb logging needed')\n\nparser.add_argument('--seed', type=int, default=42, required=False,\n help='seed')\n\nargs, _ = parser.parse_known_args()\n\nprint (\"Wandb Logging: {}, GPU: {}, Pytorch Lightning: {}, TPU: {}, Apex: {}\".format(\\\n _has_wandb and args.wandb_logging, _torch_gpu_available,\\\n _torch_lightning_available and args.use_lightning_trainer, _torch_tpu_available, _has_apex))", "Wandb Logging: False, GPU: False, Pytorch Lightning: False, TPU: False, Apex: False\n" ], [ "reshape = False\nfinal_activation = None\nconvert_output = None\n\nif args.task_type == 'binary_sequence_classification':\n if args.metric != 'roc_auc_score': \n convert_output = 'round'\n if args.loss_function == 'bcelogit':\n final_activation = 'sigmoid'\n \nelif args.task_type == 'multiclass_sequence_classification':\n convert_output = 'max'\n \nelif args.task_type == 'binary_token_classification':\n reshape = True\n if args.metric != 'roc_auc_score': \n convert_output = 'round'\n if args.loss_function == 'bcelogit':\n final_activation = 'sigmoid'\n \nelif args.task_type == 'multiclass_token_classification':\n reshape = True\n convert_output = 'max'", "_____no_output_____" ], [ "df = load_data.load_pandas_df(args.train_data,sep=',')\ndf = df.iloc[:1000]", "_____no_output_____" ], [ "df.head(5)", "_____no_output_____" ], [ "model_save_dir = args.model_save_path\ntry:\n os.makedirs(model_save_dir)\nexcept OSError:\n pass", "_____no_output_____" ], [ "df.sentiment, label2idx = data_utils.convert_categorical_label_to_int(df.sentiment, \\\n save_path=os.path.join(model_save_dir,'label2idx.pkl'))", "_____no_output_____" ], [ "df.head(5)", "_____no_output_____" ], [ "from sklearn.model_selection import KFold\n\nkf = KFold(5)\n\nfor train_index, val_index in kf.split(df.review, df.sentiment):\n break\n \ntrain_df = df.iloc[train_index].reset_index(drop=True)\nval_df = df.iloc[val_index].reset_index(drop=True)", "_____no_output_____" ], [ "train_df.shape, val_df.shape", "_____no_output_____" ], [ "if args.berttweettokenizer_path:\n tokenizer = BERTweetTokenizer(args.berttweettokenizer_path)\nelse:\n tokenizer = AutoTokenizer.from_pretrained(args.transformer_model_pretrained_path)\n\nif not args.berttweettokenizer_path:\n try:\n bpetokenizer = tokenizers.ByteLevelBPETokenizer(args.bpe_vocab_path, \\\n args.bpe_merges_path)\n except:\n bpetokenizer = None \nelse:\n bpetokenizer = None", "I0806 14:59:18.932564 4539198912 configuration_utils.py:283] loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json from cache at /Users/victor/.cache/torch/transformers/e1a2a406b5a05063c31f4dfdee7608986ba7c6393f7f79db5e69dcd197208534.117c81977c5979de8c088352e74ec6e70f5c66096c28b61d3c50101609b39690\nI0806 14:59:18.933578 4539198912 configuration_utils.py:319] Model config RobertaConfig {\n \"_num_labels\": 2,\n \"architectures\": [\n \"RobertaForMaskedLM\"\n ],\n \"attention_probs_dropout_prob\": 0.1,\n \"bad_words_ids\": null,\n \"bos_token_id\": 0,\n \"decoder_start_token_id\": null,\n \"do_sample\": false,\n \"early_stopping\": false,\n \"eos_token_id\": 2,\n \"finetuning_task\": null,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 768,\n \"id2label\": {\n \"0\": \"LABEL_0\",\n \"1\": \"LABEL_1\"\n },\n \"initializer_range\": 0.02,\n \"intermediate_size\": 3072,\n \"is_decoder\": false,\n \"is_encoder_decoder\": false,\n \"label2id\": {\n \"LABEL_0\": 0,\n \"LABEL_1\": 1\n },\n \"layer_norm_eps\": 1e-05,\n \"length_penalty\": 1.0,\n \"max_length\": 20,\n \"max_position_embeddings\": 514,\n \"min_length\": 0,\n \"model_type\": \"roberta\",\n \"no_repeat_ngram_size\": 0,\n \"num_attention_heads\": 12,\n \"num_beams\": 1,\n \"num_hidden_layers\": 12,\n \"num_return_sequences\": 1,\n \"output_attentions\": false,\n \"output_hidden_states\": false,\n \"output_past\": true,\n \"pad_token_id\": 1,\n \"prefix\": null,\n \"pruned_heads\": {},\n \"repetition_penalty\": 1.0,\n \"task_specific_params\": null,\n \"temperature\": 1.0,\n \"top_k\": 50,\n \"top_p\": 1.0,\n \"torchscript\": false,\n \"type_vocab_size\": 1,\n \"use_bfloat16\": false,\n \"vocab_size\": 50265\n}\n\nI0806 14:59:21.161639 4539198912 tokenization_utils.py:504] loading file https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json from cache at /Users/victor/.cache/torch/transformers/d0c5776499adc1ded22493fae699da0971c1ee4c2587111707a4d177d20257a2.ef00af9e673c7160b4d41cfda1f48c5f4cba57d5142754525572a846a1ab1b9b\nI0806 14:59:21.162390 4539198912 tokenization_utils.py:504] loading file https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt from cache at /Users/victor/.cache/torch/transformers/b35e7cd126cd4229a746b5d5c29a749e8e84438b14bcdb575950584fe33207e8.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda\n" ], [ "train_dataset = data_utils.TransformerDataset(train_df.review, bpetokenizer=bpetokenizer, tokenizer=tokenizer, MAX_LEN=args.max_text_len, \\\n target_label=train_df.sentiment, sequence_target=False, target_text=None, conditional_label=None, conditional_all_labels=None)\n\nval_dataset = data_utils.TransformerDataset(val_df.review, bpetokenizer=bpetokenizer, tokenizer=tokenizer, MAX_LEN=args.max_text_len, \\\n target_label=val_df.sentiment, sequence_target=False, target_text=None, conditional_label=None, conditional_all_labels=None)", "_____no_output_____" ], [ "config = AutoConfig.from_pretrained(args.transformer_config_path, output_hidden_states=True, output_attentions=True)\nbasemodel = AutoModel.from_pretrained(args.transformer_model_pretrained_path,config=config)\nmodel = transformer_models.TransformerWithCLS(basemodel)", "I0806 14:59:30.627707 4539198912 configuration_utils.py:283] loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json from cache at /Users/victor/.cache/torch/transformers/e1a2a406b5a05063c31f4dfdee7608986ba7c6393f7f79db5e69dcd197208534.117c81977c5979de8c088352e74ec6e70f5c66096c28b61d3c50101609b39690\nI0806 14:59:30.628615 4539198912 configuration_utils.py:319] Model config RobertaConfig {\n \"_num_labels\": 2,\n \"architectures\": [\n \"RobertaForMaskedLM\"\n ],\n \"attention_probs_dropout_prob\": 0.1,\n \"bad_words_ids\": null,\n \"bos_token_id\": 0,\n \"decoder_start_token_id\": null,\n \"do_sample\": false,\n \"early_stopping\": false,\n \"eos_token_id\": 2,\n \"finetuning_task\": null,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 768,\n \"id2label\": {\n \"0\": \"LABEL_0\",\n \"1\": \"LABEL_1\"\n },\n \"initializer_range\": 0.02,\n \"intermediate_size\": 3072,\n \"is_decoder\": false,\n \"is_encoder_decoder\": false,\n \"label2id\": {\n \"LABEL_0\": 0,\n \"LABEL_1\": 1\n },\n \"layer_norm_eps\": 1e-05,\n \"length_penalty\": 1.0,\n \"max_length\": 20,\n \"max_position_embeddings\": 514,\n \"min_length\": 0,\n \"model_type\": \"roberta\",\n \"no_repeat_ngram_size\": 0,\n \"num_attention_heads\": 12,\n \"num_beams\": 1,\n \"num_hidden_layers\": 12,\n \"num_return_sequences\": 1,\n \"output_attentions\": true,\n \"output_hidden_states\": true,\n \"output_past\": true,\n \"pad_token_id\": 1,\n \"prefix\": null,\n \"pruned_heads\": {},\n \"repetition_penalty\": 1.0,\n \"task_specific_params\": null,\n \"temperature\": 1.0,\n \"top_k\": 50,\n \"top_p\": 1.0,\n \"torchscript\": false,\n \"type_vocab_size\": 1,\n \"use_bfloat16\": false,\n \"vocab_size\": 50265\n}\n\nI0806 14:59:31.515705 4539198912 modeling_utils.py:507] loading weights file https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin from cache at /Users/victor/.cache/torch/transformers/228756ed15b6d200d7cb45aaef08c087e2706f54cb912863d2efe07c89584eb7.49b88ba7ec2c26a7558dda98ca3884c3b80fa31cf43a1b1f23aef3ff81ba344e\n" ], [ "if _torch_tpu_available and args.use_TPU:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=True\n )\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(\n val_dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=False\n )\n\nif _torch_tpu_available and args.use_TPU:\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.train_batch_size, sampler=train_sampler,\n drop_last=True,num_workers=2)\n\n val_data_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=args.eval_batch_size, sampler=val_sampler,\n drop_last=False,num_workers=1)\nelse:\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.train_batch_size)\n\n val_data_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=args.eval_batch_size)", "_____no_output_____" ] ], [ [ "### Run with Pytorch Trainer", "_____no_output_____" ] ], [ [ "if args.use_torch_trainer:\n device = torch.device(\"cuda\" if _torch_gpu_available and args.use_gpu else \"cpu\")\n\n if _torch_tpu_available and args.use_TPU:\n device=xm.xla_device()\n\n print (\"Device: {}\".format(device))\n \n if args.use_TPU and _torch_tpu_available and args.num_tpus > 1:\n train_data_loader = torch_xla.distributed.parallel_loader.ParallelLoader(train_data_loader, [device])\n train_data_loader = train_data_loader.per_device_loader(device)\n\n\n trainer = BasicTrainer(model, train_data_loader, val_data_loader, device, args.transformer_model_pretrained_path, \\\n final_activation=final_activation, \\\n test_data_loader=val_data_loader)\n\n param_optimizer = list(trainer.model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_parameters = [\n {\n \"params\": [\n p for n, p in param_optimizer if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.001,\n },\n {\n \"params\": [\n p for n, p in param_optimizer if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n num_train_steps = int(len(train_data_loader) * args.epochs)\n\n if _torch_tpu_available and args.use_TPU:\n optimizer = AdamW(optimizer_parameters, lr=args.lr*xm.xrt_world_size())\n else:\n optimizer = AdamW(optimizer_parameters, lr=args.lr)\n\n if args.use_apex and _has_apex:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n\n\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=num_train_steps)\n \n loss = losses.get_loss(args.loss_function)\n scorer = scorers.SKMetric(args.metric, convert=convert_output, reshape=reshape) \n \n def _mp_fn(rank, flags, trainer, epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \\\n max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed):\n torch.set_default_tensor_type('torch.FloatTensor')\n a = trainer.train(epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \\\n max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed)\n\n FLAGS = {}\n if _torch_tpu_available and args.use_TPU:\n xmp.spawn(_mp_fn, args=(FLAGS, trainer, args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \\\n 1, 3, False, args.use_apex, False, args.seed), nprocs=8, start_method='fork')\n else:\n use_wandb = _has_wandb and args.wandb_logging\n trainer.train(args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \\\n max_grad_norm=1, early_stopping_rounds=3, snapshot_ensemble=False, is_amp=args.use_apex, use_wandb=use_wandb, seed=args.seed)\n\nelif args.use_lightning_trainer and _torch_lightning_available:\n from pytorch_lightning import Trainer, seed_everything\n seed_everything(args.seed)\n \n loss = losses.get_loss(args.loss_function)\n scorer = scorers.PLMetric(args.metric, convert=convert_output, reshape=reshape)\n \n log_args = {'description': args.transformer_model_pretrained_path, 'loss': loss.__class__.__name__, 'epochs': args.epochs, 'learning_rate': args.lr}\n\n if _has_wandb and not _torch_tpu_available and args.wandb_logging:\n wandb.init(project=\"Project\",config=log_args)\n wandb_logger = WandbLogger()\n\n checkpoint_callback = ModelCheckpoint(\n filepath=args.model_save_path,\n save_top_k=1,\n verbose=True,\n monitor='val_loss',\n mode='min'\n )\n earlystop = EarlyStopping(\n monitor='val_loss',\n patience=3,\n verbose=False,\n mode='min'\n )\n\n if args.use_gpu and _torch_gpu_available:\n print (\"using GPU\")\n if args.wandb_logging:\n if _has_apex:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n if _has_apex:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n\n elif args.use_TPU and _torch_tpu_available:\n print (\"using TPU\")\n if _has_apex:\n trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n\n else:\n print (\"using CPU\")\n if args.wandb_logging:\n if _has_apex:\n trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n if _has_apex:\n trainer = Trainer(max_epochs=args.epochs, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(max_epochs=args.epochs, checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n\n num_train_steps = int(len(train_data_loader) * args.epochs)\n\n pltrainer = PLTrainer(num_train_steps, model, scorer, loss, args.lr, \\\n final_activation=final_activation, seed=42)\n\n #try:\n # print (\"Loaded model from previous checkpoint\")\n # pltrainer = PLTrainer.load_from_checkpoint(args.model_save_path)\n #except:\n # pass\n\n trainer.fit(pltrainer, train_data_loader, val_data_loader) ", "\n 0%| | 0/50 [00:00<?, ?it/s]\u001b[A" ], [ "test_output1 = trainer.test_output", "_____no_output_____" ] ], [ [ "### Run with Pytorch Lightning Trainer", "_____no_output_____" ] ], [ [ "parser = argparse.ArgumentParser(prog='Torch trainer function',conflict_handler='resolve')\n\nparser.add_argument('--train_data', type=str, default='../data/raw/IMDB Dataset.csv', required=False,\n help='train data')\nparser.add_argument('--val_data', type=str, default='', required=False,\n help='validation data')\nparser.add_argument('--test_data', type=str, default=None, required=False,\n help='test data')\n\nparser.add_argument('--transformer_model_pretrained_path', type=str, default='roberta-base', required=False,\n help='transformer model pretrained path or huggingface model name')\nparser.add_argument('--transformer_config_path', type=str, default='roberta-base', required=False,\n help='transformer config file path or huggingface model name')\nparser.add_argument('--transformer_tokenizer_path', type=str, default='roberta-base', required=False,\n help='transformer tokenizer file path or huggingface model name')\nparser.add_argument('--bpe_vocab_path', type=str, default='', required=False,\n help='bytepairencoding vocab file path')\nparser.add_argument('--bpe_merges_path', type=str, default='', required=False,\n help='bytepairencoding merges file path')\nparser.add_argument('--berttweettokenizer_path', type=str, default='', required=False,\n help='BERTweet tokenizer path')\n\nparser.add_argument('--max_text_len', type=int, default=100, required=False,\n help='maximum length of text')\nparser.add_argument('--epochs', type=int, default=5, required=False,\n help='number of epochs')\nparser.add_argument('--lr', type=float, default=.00003, required=False,\n help='learning rate')\nparser.add_argument('--loss_function', type=str, default='bcelogit', required=False,\n help='loss function')\nparser.add_argument('--metric', type=str, default='f1', required=False,\n help='scorer metric')\n\nparser.add_argument('--use_lightning_trainer', type=bool, default=True, required=False,\n help='if lightning trainer needs to be used')\nparser.add_argument('--use_torch_trainer', type=bool, default=False, required=False,\n help='if custom torch trainer needs to be used')\nparser.add_argument('--use_apex', type=bool, default=False, required=False,\n help='if apex needs to be used')\nparser.add_argument('--use_gpu', type=bool, default=False, required=False,\n help='GPU mode')\nparser.add_argument('--use_TPU', type=bool, default=False, required=False,\n help='TPU mode')\nparser.add_argument('--num_gpus', type=int, default=0, required=False,\n help='Number of GPUs')\nparser.add_argument('--num_tpus', type=int, default=0, required=False,\n help='Number of TPUs')\n\nparser.add_argument('--train_batch_size', type=int, default=16, required=False,\n help='train batch size')\nparser.add_argument('--eval_batch_size', type=int, default=16, required=False,\n help='eval batch size')\n\nparser.add_argument('--model_save_path', type=str, default='../models/sentiment_classification/', required=False,\n help='seed')\n\nparser.add_argument('--wandb_logging', type=bool, default=False, required=False,\n help='wandb logging needed')\n\nparser.add_argument('--seed', type=int, default=42, required=False,\n help='seed')\n\nargs, _ = parser.parse_known_args()\n\nprint (\"Wandb Logging: {}, GPU: {}, Pytorch Lightning: {}, TPU: {}, Apex: {}\".format(\\\n _has_wandb and args.wandb_logging, _torch_gpu_available,\\\n _torch_lightning_available and args.use_lightning_trainer, _torch_tpu_available, _has_apex))", "Wandb Logging: False, GPU: False, Pytorch Lightning: True, TPU: False, Apex: False\n" ], [ "if args.use_torch_trainer:\n device = torch.device(\"cuda\" if _torch_gpu_available and args.use_gpu else \"cpu\")\n\n if _torch_tpu_available and args.use_TPU:\n device=xm.xla_device()\n\n print (\"Device: {}\".format(device))\n \n if args.use_TPU and _torch_tpu_available and args.num_tpus > 1:\n train_data_loader = torch_xla.distributed.parallel_loader.ParallelLoader(train_data_loader, [device])\n train_data_loader = train_data_loader.per_device_loader(device)\n\n\n trainer = BasicTrainer(model, train_data_loader, val_data_loader, device, args.transformer_model_pretrained_path, \\\n final_activation=final_activation, \\\n test_data_loader=val_data_loader)\n\n param_optimizer = list(trainer.model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_parameters = [\n {\n \"params\": [\n p for n, p in param_optimizer if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.001,\n },\n {\n \"params\": [\n p for n, p in param_optimizer if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n num_train_steps = int(len(train_data_loader) * args.epochs)\n\n if _torch_tpu_available and args.use_TPU:\n optimizer = AdamW(optimizer_parameters, lr=args.lr*xm.xrt_world_size())\n else:\n optimizer = AdamW(optimizer_parameters, lr=args.lr)\n\n if args.use_apex and _has_apex:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n\n\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=num_train_steps)\n \n loss = losses.get_loss(args.loss_function)\n scorer = scorers.SKMetric(args.metric, convert=convert_output, reshape=reshape) \n \n def _mp_fn(rank, flags, trainer, epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \\\n max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed):\n torch.set_default_tensor_type('torch.FloatTensor')\n a = trainer.train(epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \\\n max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed)\n\n FLAGS = {}\n if _torch_tpu_available and args.use_TPU:\n xmp.spawn(_mp_fn, args=(FLAGS, trainer, args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \\\n 1, 3, False, args.use_apex, False, args.seed), nprocs=8, start_method='fork')\n else:\n use_wandb = _has_wandb and args.wandb_logging\n trainer.train(args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \\\n max_grad_norm=1, early_stopping_rounds=3, snapshot_ensemble=False, is_amp=args.use_apex, use_wandb=use_wandb, seed=args.seed)\n\nelif args.use_lightning_trainer and _torch_lightning_available:\n from pytorch_lightning import Trainer, seed_everything\n seed_everything(args.seed)\n \n loss = losses.get_loss(args.loss_function)\n scorer = scorers.PLMetric(args.metric, convert=convert_output, reshape=reshape)\n \n log_args = {'description': args.transformer_model_pretrained_path, 'loss': loss.__class__.__name__, 'epochs': args.epochs, 'learning_rate': args.lr}\n\n if _has_wandb and not _torch_tpu_available and args.wandb_logging:\n wandb.init(project=\"Project\",config=log_args)\n wandb_logger = WandbLogger()\n\n checkpoint_callback = ModelCheckpoint(\n filepath=args.model_save_path,\n save_top_k=1,\n verbose=True,\n monitor='val_loss',\n mode='min'\n )\n earlystop = EarlyStopping(\n monitor='val_loss',\n patience=3,\n verbose=False,\n mode='min'\n )\n\n if args.use_gpu and _torch_gpu_available:\n print (\"using GPU\")\n if args.wandb_logging:\n if _has_apex:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n if _has_apex:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n\n elif args.use_TPU and _torch_tpu_available:\n print (\"using TPU\")\n if _has_apex:\n trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n\n else:\n print (\"using CPU\")\n if args.wandb_logging:\n if _has_apex:\n trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n if _has_apex:\n trainer = Trainer(max_epochs=args.epochs, precision=16, \\\n checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n else:\n trainer = Trainer(max_epochs=args.epochs, checkpoint_callback=checkpoint_callback, callbacks=[earlystop])\n\n num_train_steps = int(len(train_data_loader) * args.epochs)\n\n pltrainer = PLTrainer(num_train_steps, model, scorer, loss, args.lr, \\\n final_activation=final_activation, seed=42)\n\n #try:\n # print (\"Loaded model from previous checkpoint\")\n # pltrainer = PLTrainer.load_from_checkpoint(args.model_save_path)\n #except:\n # pass\n\n trainer.fit(pltrainer, train_data_loader, val_data_loader) ", "GPU available: False, used: False\nI0806 15:52:24.844974 4539198912 distributed.py:29] GPU available: False, used: False\nTPU available: False, using: 0 TPU cores\nI0806 15:52:24.846642 4539198912 distributed.py:29] TPU available: False, using: 0 TPU cores\n" ], [ "from tqdm import tqdm\n\ntest_output2 = []\n\nfor val_batch in tqdm(val_data_loader):\n out = torch.sigmoid(pltrainer(val_batch)).detach().cpu().numpy()\n test_output2.extend(out[:,0].tolist())\n \n#test_output2 = np.concatenate(test_output2)", "\n 0%| | 0/13 [00:00<?, ?it/s]\u001b[A\n 8%|▊ | 1/13 [00:02<00:27, 2.33s/it]\u001b[A\n 15%|█▌ | 2/13 [00:04<00:23, 2.17s/it]\u001b[A\n 23%|██▎ | 3/13 [00:05<00:20, 2.08s/it]\u001b[A\n 31%|███ | 4/13 [00:08<00:18, 2.07s/it]\u001b[A\n 38%|███▊ | 5/13 [00:10<00:16, 2.06s/it]\u001b[A\n 46%|████▌ | 6/13 [00:12<00:14, 2.03s/it]\u001b[A\n 54%|█████▍ | 7/13 [00:13<00:11, 1.97s/it]\u001b[A\n 62%|██████▏ | 8/13 [00:15<00:09, 1.92s/it]\u001b[A\n 69%|██████▉ | 9/13 [00:17<00:07, 1.91s/it]\u001b[A\n 77%|███████▋ | 10/13 [00:19<00:05, 1.88s/it]\u001b[A\n 85%|████████▍ | 11/13 [00:21<00:03, 1.86s/it]\u001b[A\n 92%|█████████▏| 12/13 [00:23<00:01, 1.85s/it]\u001b[A\n100%|██████████| 13/13 [00:23<00:00, 1.84s/it]\u001b[A\n" ], [ "test_output1 = np.array(test_output1)[:,0]\ntest_output2 = np.array(test_output2)\nnp.corrcoef(test_output1,test_output2)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cbc4135195a113337aa6d009da436bdf1de56cc3
352,379
ipynb
Jupyter Notebook
examples/visualization.ipynb
ahcantao/pyswarms
1422eb7ad3b8641de83b39dc36ce7b09858e2440
[ "MIT" ]
null
null
null
examples/visualization.ipynb
ahcantao/pyswarms
1422eb7ad3b8641de83b39dc36ce7b09858e2440
[ "MIT" ]
null
null
null
examples/visualization.ipynb
ahcantao/pyswarms
1422eb7ad3b8641de83b39dc36ce7b09858e2440
[ "MIT" ]
1
2018-12-26T12:15:09.000Z
2018-12-26T12:15:09.000Z
90.516054
20,776
0.840618
[ [ [ "# Visualization\nPySwarms implements tools for visualizing the behavior of your swarm. These are built on top of `matplotlib`, thus rendering charts that are easy to use and highly-customizable. However, it must be noted that in order to use the animation capability in PySwarms (and in `matplotlib` for that matter), at least one writer tool must be installed. Some available tools include:\n* ffmpeg\n* ImageMagick\n* MovieWriter (base)\n\nIn the following demonstration, the `ffmpeg` tool is used. For Linux and Windows users, it can be installed via:\n```shell\n$ conda install -c conda-forge ffmpeg\n```", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append('../')", "_____no_output_____" ] ], [ [ "First, we need to import the `pyswarms.utils.environments.PlotEnvironment` class. This enables us to use various methods to create animations or plot costs.", "_____no_output_____" ] ], [ [ "# Import modules\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import animation, rc\nfrom IPython.display import HTML\n\n# Import PySwarms\nimport pyswarms as ps\nfrom pyswarms.utils.functions import single_obj as fx\nfrom pyswarms.utils.environments import PlotEnvironment\n\n# Some more magic so that the notebook will reload external python modules;\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "The first step is to create an optimizer. Here, we're going to use Global-best PSO to find the minima of a sphere function. As usual, we simply create an instance of its class `pyswarms.single.GlobalBestPSO` by passing the required parameters that we will use.", "_____no_output_____" ] ], [ [ "options = {'c1':0.5, 'c2':0.3, 'w':0.9}\noptimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=3, options=options)", "_____no_output_____" ] ], [ [ "## Initializing the `PlotEnvironment`\n\nThink of the `PlotEnvironment` as a container in which various plotting methods can be called. In order to create an instance of this class, we need to pass the optimizer object, the objective function, and the number of iterations needed. The `PlotEnvironment` will then simulate these parameters so as to build the plots.", "_____no_output_____" ] ], [ [ "plt_env = PlotEnvironment(optimizer, fx.sphere_func, 1000)", "_____no_output_____" ] ], [ [ "## Plotting the cost\n\nTo plot the cost, we simply need to call the `plot_cost()` function. There are pre-set defaults in this method already, but we can customize by passing various arguments into it such as figure size, title, x- and y-labels and etc. Furthermore, this method also accepts a keyword argument `**kwargs` similar to `matplotlib`. This enables us to further customize various artists and elements in the plot. \n\nFor now, let's stick with the default one. We'll just call the `plot_cost()` and `show()` it.", "_____no_output_____" ] ], [ [ "plt_env.plot_cost(figsize=(8,6));\nplt.show()", "_____no_output_____" ] ], [ [ "## Animating swarms\nThe `PlotEnvironment()` offers two methods to perform animation, `plot_particles2D()` and `plot_particles3D()`. As its name suggests, these methods plot the particles in a 2-D or 3-D space. You can choose which dimensions will be plotted using the `index` argument, but the default takes the first 2 (or first three in 3D) indices of your swarm dimension. \n\nEach animation method returns a `matplotlib.animation.Animation` class that still needs to be animated by a `Writer` class (thus necessitating the installation of a writer module). For the proceeding examples, we will convert the animations into an HTML5 video. In such case, we need to invoke some extra methods to do just that.", "_____no_output_____" ] ], [ [ "# equivalent to rcParams['animation.html'] = 'html5'\n# See http://louistiao.me/posts/notebooks/save-matplotlib-animations-as-gifs/\nrc('animation', html='html5')", "_____no_output_____" ] ], [ [ "### Plotting in 2-D space\n", "_____no_output_____" ] ], [ [ "HTML(plt_env.plot_particles2D(limits=((-1.2,1.2),(-1.2,1.2))).to_html5_video())", "_____no_output_____" ] ], [ [ "### Plotting in 3-D space", "_____no_output_____" ] ], [ [ "HTML(plt_env.plot_particles3D(limits=((-1.2,1.2),(-1.2,1.2),(-1.2,1.2))).to_html5_video())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc4136abd9d12344fcb51d87c86c45c0d1a65b1
125,799
ipynb
Jupyter Notebook
PlotlyCandlestick.ipynb
dansarmiento/ColaboratoryRunningAnalysis
9badf15e40df34ab4d7cd65fb9b99a1a59bfd6eb
[ "MIT" ]
null
null
null
PlotlyCandlestick.ipynb
dansarmiento/ColaboratoryRunningAnalysis
9badf15e40df34ab4d7cd65fb9b99a1a59bfd6eb
[ "MIT" ]
null
null
null
PlotlyCandlestick.ipynb
dansarmiento/ColaboratoryRunningAnalysis
9badf15e40df34ab4d7cd65fb9b99a1a59bfd6eb
[ "MIT" ]
null
null
null
140.557542
86,491
0.664401
[ [ [ "<a href=\"https://colab.research.google.com/github/dansarmiento/ColaboratoryRunningAnalysis/blob/main/PlotlyCandlestick.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!pip install yfinance", "Collecting yfinance\n Downloading yfinance-0.1.70-py2.py3-none-any.whl (26 kB)\nRequirement already satisfied: pandas>=0.24.0 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.3.5)\nCollecting requests>=2.26\n Downloading requests-2.27.1-py2.py3-none-any.whl (63 kB)\n\u001b[K |████████████████████████████████| 63 kB 1.4 MB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.21.5)\nRequirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from yfinance) (0.0.10)\nCollecting lxml>=4.5.1\n Downloading lxml-4.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (6.4 MB)\n\u001b[K |████████████████████████████████| 6.4 MB 16.6 MB/s \n\u001b[?25hRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->yfinance) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->yfinance) (2.8.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=0.24.0->yfinance) (1.15.0)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.26->yfinance) (2021.10.8)\nRequirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.7/dist-packages (from requests>=2.26->yfinance) (2.0.12)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.26->yfinance) (1.24.3)\nRequirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.26->yfinance) (2.10)\nInstalling collected packages: requests, lxml, yfinance\n Attempting uninstall: requests\n Found existing installation: requests 2.23.0\n Uninstalling requests-2.23.0:\n Successfully uninstalled requests-2.23.0\n Attempting uninstall: lxml\n Found existing installation: lxml 4.2.6\n Uninstalling lxml-4.2.6:\n Successfully uninstalled lxml-4.2.6\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ngoogle-colab 1.0.0 requires requests~=2.23.0, but you have requests 2.27.1 which is incompatible.\ndatascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\u001b[0m\nSuccessfully installed lxml-4.8.0 requests-2.27.1 yfinance-0.1.70\n" ], [ "import yfinance as yf", "_____no_output_____" ], [ "mdrx = yf.Ticker('MDRX')\n\nhist = mdrx.history(periods='max', auto_adjust=True)\nhist.describe()", "_____no_output_____" ], [ "df = yf.download(\"MDRX\", start=\"2019-1-1\",end=\"2022-3-30\")", "\r[*********************100%***********************] 1 of 1 completed\n" ], [ "import plotly.graph_objects as go \nimport pandas as pd \ndf.head()", "_____no_output_____" ], [ "df.index", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "# Create an interactive candlestick chart\nfigure = go.Figure(\n data = [go.Candlestick(\n x = df.index,\n low = df.Low, high = df.High, close = df.Close, open = df.Open,\n increasing_line_color = 'green', decreasing_line_color = 'red'\n )\n ] \n )\nfigure.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc413cf6c855618e724b726e7d115c7522ab2e6
35,386
ipynb
Jupyter Notebook
advanced_functionality/distributed_tensorflow_mask_rcnn/mask-rcnn-experiment-trials.ipynb
P15241328/amazon-sagemaker-examples
00cba545be0822474f070321a62d22865187e09b
[ "Apache-2.0" ]
5
2019-01-19T23:53:35.000Z
2022-01-29T14:04:31.000Z
advanced_functionality/distributed_tensorflow_mask_rcnn/mask-rcnn-experiment-trials.ipynb
P15241328/amazon-sagemaker-examples
00cba545be0822474f070321a62d22865187e09b
[ "Apache-2.0" ]
2
2021-08-25T16:15:24.000Z
2022-02-10T02:49:50.000Z
advanced_functionality/distributed_tensorflow_mask_rcnn/mask-rcnn-experiment-trials.ipynb
P15241328/amazon-sagemaker-examples
00cba545be0822474f070321a62d22865187e09b
[ "Apache-2.0" ]
7
2020-03-04T22:23:51.000Z
2021-07-13T14:05:46.000Z
39.230599
526
0.526395
[ [ [ "# Amazon SageMaker Experiment Trials for Distirbuted Training of Mask-RCNN\n\nThis notebook is a step-by-step tutorial on Amazon SageMaker Experiment Trials for distributed tranining of [Mask R-CNN](https://arxiv.org/abs/1703.06870) implemented in [TensorFlow](https://www.tensorflow.org/) framework. \n\nConcretely, we will describe the steps for SagerMaker Experiment Trials for training [TensorPack Faster-RCNN/Mask-RCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) and [AWS Samples Mask R-CNN](https://github.com/aws-samples/mask-rcnn-tensorflow) in [Amazon SageMaker](https://aws.amazon.com/sagemaker/) using [Amazon S3](https://aws.amazon.com/s3/) as data source.\n\nThe outline of steps is as follows:\n\n1. Stage COCO 2017 dataset in [Amazon S3](https://aws.amazon.com/s3/)\n2. Build SageMaker training image and push it to [Amazon ECR](https://aws.amazon.com/ecr/)\n3. Configure data input channels\n4. Configure hyper-prarameters\n5. Define training metrics\n6. Define training job \n7. Define SageMaker Experiment Trials to start the training jobs\n\nBefore we get started, let us initialize two python variables ```aws_region``` and ```s3_bucket``` that we will use throughout the notebook:", "_____no_output_____" ] ], [ [ "aws_region = # aws-region-code e.g. us-east-1\ns3_bucket = # your-s3-bucket-name", "_____no_output_____" ] ], [ [ "## Stage COCO 2017 dataset in Amazon S3\n\nWe use [COCO 2017 dataset](http://cocodataset.org/#home) for training. We download COCO 2017 training and validation dataset to this notebook instance, extract the files from the dataset archives, and upload the extracted files to your Amazon [S3 bucket](https://docs.aws.amazon.com/en_pv/AmazonS3/latest/gsg/CreatingABucket.html) with the prefix ```mask-rcnn/sagemaker/input/train```. The ```prepare-s3-bucket.sh``` script executes this step.\n", "_____no_output_____" ] ], [ [ "!cat ./prepare-s3-bucket.sh", "_____no_output_____" ] ], [ [ " Using your *Amazon S3 bucket* as argument, run the cell below. If you have already uploaded COCO 2017 dataset to your Amazon S3 bucket *in this AWS region*, you may skip this step. The expected time to execute this step is 20 minutes.", "_____no_output_____" ] ], [ [ "%%time\n!./prepare-s3-bucket.sh {s3_bucket}", "_____no_output_____" ] ], [ [ "## Build and push SageMaker training images\n\nFor this step, the [IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) attached to this notebook instance needs full access to Amazon ECR service. If you created this notebook instance using the ```./stack-sm.sh``` script in this repository, the IAM Role attached to this notebook instance is already setup with full access to ECR service. \n\nBelow, we have a choice of two different implementations:\n\n1. [TensorPack Faster-RCNN/Mask-RCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) implementation supports a maximum per-GPU batch size of 1, and does not support mixed precision. It can be used with mainstream TensorFlow releases.\n\n2. [AWS Samples Mask R-CNN](https://github.com/aws-samples/mask-rcnn-tensorflow) is an optimized implementation that supports a maximum batch size of 4 and supports mixed precision. This implementation uses custom TensorFlow ops. The required custom TensorFlow ops are available in [AWS Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md) images in ```tensorflow-training``` repository with image tag ```1.15.2-gpu-py36-cu100-ubuntu18.04```, or later. \n\nIt is recommended that you build and push both SageMaker training images and use either image for training later.\n", "_____no_output_____" ], [ "### TensorPack Faster-RCNN/Mask-RCNN\n\nUse ```./container/build_tools/build_and_push.sh``` script to build and push the TensorPack Faster-RCNN/Mask-RCNN training image to Amazon ECR. ", "_____no_output_____" ] ], [ [ "!cat ./container/build_tools/build_and_push.sh", "_____no_output_____" ] ], [ [ "Using your *AWS region* as argument, run the cell below.", "_____no_output_____" ] ], [ [ "%%time\n! ./container/build_tools/build_and_push.sh {aws_region}", "_____no_output_____" ] ], [ [ "Set ```tensorpack_image``` below to Amazon ECR URI of the image you pushed above.", "_____no_output_____" ] ], [ [ "tensorpack_image = # mask-rcnn-tensorpack-sagemaker ECR URI", "_____no_output_____" ] ], [ [ "### AWS Samples Mask R-CNN\nUse ```./container-optimized/build_tools/build_and_push.sh``` script to build and push the AWS Samples Mask R-CNN training image to Amazon ECR.", "_____no_output_____" ] ], [ [ "!cat ./container-optimized/build_tools/build_and_push.sh", "_____no_output_____" ] ], [ [ "Using your *AWS region* as argument, run the cell below.", "_____no_output_____" ] ], [ [ "%%time\n! ./container-optimized/build_tools/build_and_push.sh {aws_region}", "_____no_output_____" ] ], [ [ " Set ```aws_samples_image``` below to Amazon ECR URI of the image you pushed above.", "_____no_output_____" ] ], [ [ "aws_samples_image = # mask-rcnn-tensorflow-sagemaker ECR URI", "_____no_output_____" ] ], [ [ "## SageMaker Initialization \nFirst we upgrade SageMaker to 2.3.0 API. If your notebook is already using latest Sagemaker 2.x API, you may skip the next cell.\n", "_____no_output_____" ] ], [ [ "! pip install --upgrade pip\n! pip install sagemaker==2.3.0", "_____no_output_____" ] ], [ [ "We have staged the data and we have built and pushed the training docker image to Amazon ECR. Now we are ready to start using Amazon SageMaker.", "_____no_output_____" ] ], [ [ "%%time\nimport os\nimport time\nimport boto3\nimport sagemaker\nfrom sagemaker import get_execution_role\nfrom sagemaker.estimator import Estimator\n\nrole = get_execution_role() # provide a pre-existing role ARN as an alternative to creating a new role\nprint(f'SageMaker Execution Role:{role}')\n\nclient = boto3.client('sts')\naccount = client.get_caller_identity()['Account']\nprint(f'AWS account:{account}')\n\nsession = boto3.session.Session()\nregion = session.region_name\nprint(f'AWS region:{region}')", "_____no_output_____" ] ], [ [ "Next, we set ```training_image``` to the Amazon ECR image URI you saved in a previous step. ", "_____no_output_____" ] ], [ [ "training_image = # set to tensorpack_image or aws_samples_image \nprint(f'Training image: {training_image}')", "_____no_output_____" ] ], [ [ "## Define SageMaker Data Channels\n\nNext, we define the *train* data channel using EFS file-system. To do so, we need to specify the EFS file-system id, which is shown in the output of the command below.", "_____no_output_____" ] ], [ [ "!df -kh | grep 'fs-' | sed 's/\\(fs-[0-9a-z]*\\).*/\\1/'", "_____no_output_____" ] ], [ [ "Set the EFS ```file_system_id``` below to the ouput of the command shown above. In the cell below, we define the `train` data input channel.", "_____no_output_____" ] ], [ [ "from sagemaker.inputs import FileSystemInput\n\n# Specify EFS ile system id.\nfile_system_id = # 'fs-xxxxxxxx'\nprint(f\"EFS file-system-id: {file_system_id}\")\n\n# Specify directory path for input data on the file system. \n# You need to provide normalized and absolute path below.\nfile_system_directory_path = '/mask-rcnn/sagemaker/input/train'\nprint(f'EFS file-system data input path: {file_system_directory_path}')\n\n# Specify the access mode of the mount of the directory associated with the file system. \n# Directory must be mounted 'ro'(read-only).\nfile_system_access_mode = 'ro'\n\n# Specify your file system type\nfile_system_type = 'EFS'\n\ntrain = FileSystemInput(file_system_id=file_system_id,\n file_system_type=file_system_type,\n directory_path=file_system_directory_path,\n file_system_access_mode=file_system_access_mode)", "_____no_output_____" ] ], [ [ "Next, we define the model output location in S3 bucket.", "_____no_output_____" ] ], [ [ "prefix = \"mask-rcnn/sagemaker\" #prefix in your bucket\ns3_output_location = f's3://{s3_bucket}/{prefix}/output'\nprint(f'S3 model output location: {s3_output_location}')", "_____no_output_____" ] ], [ [ "## Configure Hyper-parameters\nNext, we define the hyper-parameters. \n\nNote, some hyper-parameters are different between the two implementations. The batch size per GPU in TensorPack Faster-RCNN/Mask-RCNN is fixed at 1, but is configurable in AWS Samples Mask-RCNN. The learning rate schedule is specified in units of steps in TensorPack Faster-RCNN/Mask-RCNN, but in epochs in AWS Samples Mask-RCNN.\n\nThe detault learning rate schedule values shown below correspond to training for a total of 24 epochs, at 120,000 images per epoch.\n\n<table align='left'>\n <caption>TensorPack Faster-RCNN/Mask-RCNN Hyper-parameters</caption>\n <tr>\n <th style=\"text-align:center\">Hyper-parameter</th>\n <th style=\"text-align:center\">Description</th>\n <th style=\"text-align:center\">Default</th>\n </tr>\n <tr>\n <td style=\"text-align:center\">mode_fpn</td>\n <td style=\"text-align:left\">Flag to indicate use of Feature Pyramid Network (FPN) in the Mask R-CNN model backbone</td>\n <td style=\"text-align:center\">\"True\"</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">mode_mask</td>\n <td style=\"text-align:left\">A value of \"False\" means Faster-RCNN model, \"True\" means Mask R-CNN moodel</td>\n <td style=\"text-align:center\">\"True\"</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">eval_period</td>\n <td style=\"text-align:left\">Number of epochs period for evaluation during training</td>\n <td style=\"text-align:center\">1</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">lr_schedule</td>\n <td style=\"text-align:left\">Learning rate schedule in training steps</td>\n <td style=\"text-align:center\">'[240000, 320000, 360000]'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">batch_norm</td>\n <td style=\"text-align:left\">Batch normalization option ('FreezeBN', 'SyncBN', 'GN', 'None') </td>\n <td style=\"text-align:center\">'FreezeBN'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">images_per_epoch</td>\n <td style=\"text-align:left\">Images per epoch </td>\n <td style=\"text-align:center\">120000</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">data_train</td>\n <td style=\"text-align:left\">Training data under data directory</td>\n <td style=\"text-align:center\">'coco_train2017'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">data_val</td>\n <td style=\"text-align:left\">Validation data under data directory</td>\n <td style=\"text-align:center\">'coco_val2017'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">resnet_arch</td>\n <td style=\"text-align:left\">Must be 'resnet50' or 'resnet101'</td>\n <td style=\"text-align:center\">'resnet50'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">backbone_weights</td>\n <td style=\"text-align:left\">ResNet backbone weights</td>\n <td style=\"text-align:center\">'ImageNet-R50-AlignPadding.npz'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">load_model</td>\n <td style=\"text-align:left\">Pre-trained model to load</td>\n <td style=\"text-align:center\"></td>\n </tr>\n <tr>\n <td style=\"text-align:center\">config:</td>\n <td style=\"text-align:left\">Any hyperparamter prefixed with <b>config:</b> is set as a model config parameter</td>\n <td style=\"text-align:center\"></td>\n </tr>\n</table>\n\n \n<table align='left'>\n <caption>AWS Samples Mask-RCNN Hyper-parameters</caption>\n <tr>\n <th style=\"text-align:center\">Hyper-parameter</th>\n <th style=\"text-align:center\">Description</th>\n <th style=\"text-align:center\">Default</th>\n </tr>\n <tr>\n <td style=\"text-align:center\">mode_fpn</td>\n <td style=\"text-align:left\">Flag to indicate use of Feature Pyramid Network (FPN) in the Mask R-CNN model backbone</td>\n <td style=\"text-align:center\">\"True\"</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">mode_mask</td>\n <td style=\"text-align:left\">A value of \"False\" means Faster-RCNN model, \"True\" means Mask R-CNN moodel</td>\n <td style=\"text-align:center\">\"True\"</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">eval_period</td>\n <td style=\"text-align:left\">Number of epochs period for evaluation during training</td>\n <td style=\"text-align:center\">1</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">lr_epoch_schedule</td>\n <td style=\"text-align:left\">Learning rate schedule in epochs</td>\n <td style=\"text-align:center\">'[(16, 0.1), (20, 0.01), (24, None)]'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">batch_size_per_gpu</td>\n <td style=\"text-align:left\">Batch size per gpu ( Minimum 1, Maximum 4)</td>\n <td style=\"text-align:center\">4</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">batch_norm</td>\n <td style=\"text-align:left\">Batch normalization option ('FreezeBN', 'SyncBN', 'GN', 'None') </td>\n <td style=\"text-align:center\">'FreezeBN'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">images_per_epoch</td>\n <td style=\"text-align:left\">Images per epoch </td>\n <td style=\"text-align:center\">120000</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">data_train</td>\n <td style=\"text-align:left\">Training data under data directory</td>\n <td style=\"text-align:center\">'train2017'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">data_val</td>\n <td style=\"text-align:left\">Validation data under data directory</td>\n <td style=\"text-align:center\">'val2017'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">resnet_arch</td>\n <td style=\"text-align:left\">Must be 'resnet50' or 'resnet101'</td>\n <td style=\"text-align:center\">'resnet50'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">backbone_weights</td>\n <td style=\"text-align:left\">ResNet backbone weights</td>\n <td style=\"text-align:center\">'ImageNet-R50-AlignPadding.npz'</td>\n </tr>\n <tr>\n <td style=\"text-align:center\">load_model</td>\n <td style=\"text-align:left\">Pre-trained model to load</td>\n <td style=\"text-align:center\"></td>\n </tr>\n <tr>\n <td style=\"text-align:center\">config:</td>\n <td style=\"text-align:left\">Any hyperparamter prefixed with <b>config:</b> is set as a model config parameter</td>\n <td style=\"text-align:center\"></td>\n </tr>\n</table>", "_____no_output_____" ] ], [ [ "hyperparameters = {\n \"mode_fpn\": \"True\",\n \"mode_mask\": \"True\",\n \"eval_period\": 1,\n \"batch_norm\": \"FreezeBN\"\n }", "_____no_output_____" ] ], [ [ "## Define Training Metrics\nNext, we define the regular expressions that SageMaker uses to extract algorithm metrics from training logs and send them to [AWS CloudWatch metrics](https://docs.aws.amazon.com/en_pv/AmazonCloudWatch/latest/monitoring/working_with_metrics.html). These algorithm metrics are visualized in SageMaker console.", "_____no_output_____" ] ], [ [ "metric_definitions=[\n {\n \"Name\": \"fastrcnn_losses/box_loss\",\n \"Regex\": \".*fastrcnn_losses/box_loss:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"fastrcnn_losses/label_loss\",\n \"Regex\": \".*fastrcnn_losses/label_loss:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"fastrcnn_losses/label_metrics/accuracy\",\n \"Regex\": \".*fastrcnn_losses/label_metrics/accuracy:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"fastrcnn_losses/label_metrics/false_negative\",\n \"Regex\": \".*fastrcnn_losses/label_metrics/false_negative:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"fastrcnn_losses/label_metrics/fg_accuracy\",\n \"Regex\": \".*fastrcnn_losses/label_metrics/fg_accuracy:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"fastrcnn_losses/num_fg_label\",\n \"Regex\": \".*fastrcnn_losses/num_fg_label:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"maskrcnn_loss/accuracy\",\n \"Regex\": \".*maskrcnn_loss/accuracy:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"maskrcnn_loss/fg_pixel_ratio\",\n \"Regex\": \".*maskrcnn_loss/fg_pixel_ratio:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"maskrcnn_loss/maskrcnn_loss\",\n \"Regex\": \".*maskrcnn_loss/maskrcnn_loss:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"maskrcnn_loss/pos_accuracy\",\n \"Regex\": \".*maskrcnn_loss/pos_accuracy:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(bbox)/IoU=0.5\",\n \"Regex\": \".*mAP\\\\(bbox\\\\)/IoU=0\\\\.5:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(bbox)/IoU=0.5:0.95\",\n \"Regex\": \".*mAP\\\\(bbox\\\\)/IoU=0\\\\.5:0\\\\.95:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(bbox)/IoU=0.75\",\n \"Regex\": \".*mAP\\\\(bbox\\\\)/IoU=0\\\\.75:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(bbox)/large\",\n \"Regex\": \".*mAP\\\\(bbox\\\\)/large:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(bbox)/medium\",\n \"Regex\": \".*mAP\\\\(bbox\\\\)/medium:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(bbox)/small\",\n \"Regex\": \".*mAP\\\\(bbox\\\\)/small:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(segm)/IoU=0.5\",\n \"Regex\": \".*mAP\\\\(segm\\\\)/IoU=0\\\\.5:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(segm)/IoU=0.5:0.95\",\n \"Regex\": \".*mAP\\\\(segm\\\\)/IoU=0\\\\.5:0\\\\.95:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(segm)/IoU=0.75\",\n \"Regex\": \".*mAP\\\\(segm\\\\)/IoU=0\\\\.75:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(segm)/large\",\n \"Regex\": \".*mAP\\\\(segm\\\\)/large:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(segm)/medium\",\n \"Regex\": \".*mAP\\\\(segm\\\\)/medium:\\\\s*(\\\\S+).*\"\n },\n {\n \"Name\": \"mAP(segm)/small\",\n \"Regex\": \".*mAP\\\\(segm\\\\)/small:\\\\s*(\\\\S+).*\"\n } \n \n ]", "_____no_output_____" ] ], [ [ "## Define SageMaker Experiment\n\nTo define SageMaker Experiment, we first install `sagemaker-experiments` package.", "_____no_output_____" ] ], [ [ "! pip install sagemaker-experiments==0.1.20", "_____no_output_____" ] ], [ [ "Next, we import the SageMaker Experiment modules.", "_____no_output_____" ] ], [ [ "from smexperiments.experiment import Experiment\nfrom smexperiments.trial import Trial\nfrom smexperiments.trial_component import TrialComponent\nfrom smexperiments.tracker import Tracker\nimport time", "_____no_output_____" ] ], [ [ "Next, we define a `Tracker` for tracking input data used in the SageMaker Trials in this Experiment. Specify the S3 URL of your dataset in the `value` below and change the name of the dataset if you are using a different dataset.", "_____no_output_____" ] ], [ [ "sm = session.client('sagemaker')\nwith Tracker.create(display_name=\"Preprocessing\", sagemaker_boto_client=sm) as tracker:\n # we can log the s3 uri to the dataset used for training\n tracker.log_input(name=\"coco-2017-dataset\", \n media_type=\"s3/uri\", \n value= f's3://{s3_bucket}/{prefix}/input/train' # specify S3 URL to your dataset\n )", "_____no_output_____" ] ], [ [ "Next, we create a SageMaker Experiment.", "_____no_output_____" ] ], [ [ "mrcnn_experiment = Experiment.create(\n experiment_name=f\"mask-rcnn-experiment-{int(time.time())}\", \n description=\"Mask R-CNN experiment\", \n sagemaker_boto_client=sm)\nprint(mrcnn_experiment)", "_____no_output_____" ] ], [ [ "We run the training job in your private VPC, so we need to set the ```subnets``` and ```security_group_ids``` prior to running the cell below. You may specify multiple subnet ids in the ```subnets``` list. The subnets included in the ```sunbets``` list must be part of the output of ```./stack-sm.sh``` CloudFormation stack script used to create this notebook instance. Specify only one security group id in ```security_group_ids``` list. The security group id must be part of the output of ```./stack-sm.sh``` script.", "_____no_output_____" ] ], [ [ "security_group_ids = # ['sg-xxxxxxxx']\nsubnets = # ['subnet-xxxxxxx', 'subnet-xxxxxxx', 'subnet-xxxxxxx']\nsagemaker_session = sagemaker.session.Session(boto_session=session)", "_____no_output_____" ] ], [ [ "Next, we use SageMaker [Estimator](https://sagemaker.readthedocs.io/en/stable/estimators.html) API to define a SageMaker Training Job for each SageMaker Trial we need to run within the SageMaker Experiment.\n\nWe recommned using 8 GPUs, so we set ```train_instance_count=1``` and ```train_instance_type='ml.p3.16xlarge'```, because there are 8 Tesla V100 GPUs per ```ml.p3.16xlarge``` instance. We recommend using 100 GB [Amazon EBS](https://aws.amazon.com/ebs/) storage volume with each training instance, so we set ```train_volume_size = 100```. We want to replicate training data to each training instance, so we set ```input_mode= 'File'```.\n\nNext, we will iterate through the Trial parameters and start two trials, one for ResNet architecture `resnet50`, and a second Trial for `resnet101`.", "_____no_output_____" ] ], [ [ "trial_params = [ ('resnet50', 'ImageNet-R50-AlignPadding.npz'), \n ('resnet101', 'ImageNet-R101-AlignPadding.npz')]\n\nfor resnet_arch, backbone_weights in trial_params:\n \n hyperparameters['resnet_arch'] = resnet_arch\n hyperparameters['backbone_weights'] = backbone_weights\n \n trial_name = f\"mask-rcnn-{resnet_arch}-{int(time.time())}\"\n mrcnn_trial = Trial.create(\n trial_name=trial_name, \n experiment_name=mrcnn_experiment.experiment_name,\n sagemaker_boto_client=sm,\n )\n \n # associate the proprocessing trial component with the current trial\n mrcnn_trial.add_trial_component(tracker.trial_component)\n print(mrcnn_trial)\n\n mask_rcnn_estimator = Estimator(image_uri=training_image,\n role=role, \n instance_count=4, \n instance_type='ml.p3.16xlarge',\n volume_size = 100,\n max_run = 400000,\n input_mode= 'File',\n output_path=s3_output_location,\n sagemaker_session=sagemaker_session, \n hyperparameters = hyperparameters,\n metric_definitions = metric_definitions,\n subnets=subnets,\n security_group_ids=security_group_ids)\n \n # Specify directory path for log output on the EFS file system.\n # You need to provide normalized and absolute path below.\n # For example, '/mask-rcnn/sagemaker/output/log'\n # Log output directory must not exist\n file_system_directory_path = f'/mask-rcnn/sagemaker/output/{mrcnn_trial.trial_name}'\n print(f\"EFS log directory:{file_system_directory_path}\")\n\n # Create the log output directory. \n # EFS file-system is mounted on '$HOME/efs' mount point for this notebook.\n home_dir=os.environ['HOME']\n local_efs_path = os.path.join(home_dir,'efs', file_system_directory_path[1:])\n print(f\"Creating log directory on EFS: {local_efs_path}\")\n\n assert not os.path.isdir(local_efs_path)\n ! sudo mkdir -p -m a=rw {local_efs_path}\n assert os.path.isdir(local_efs_path)\n\n # Specify the access mode of the mount of the directory associated with the file system. \n # Directory must be mounted 'rw'(read-write).\n file_system_access_mode = 'rw'\n\n\n log = FileSystemInput(file_system_id=file_system_id,\n file_system_type=file_system_type,\n directory_path=file_system_directory_path,\n file_system_access_mode=file_system_access_mode)\n\n data_channels = {'train': train, 'log': log}\n\n mask_rcnn_estimator.fit(inputs=data_channels, \n job_name=mrcnn_trial.trial_name,\n logs=True, \n experiment_config={\"TrialName\": mrcnn_trial.trial_name, \n \"TrialComponentDisplayName\": \"Training\"},\n wait=False)\n\n # sleep in between starting two trials\n time.sleep(2)", "_____no_output_____" ], [ "search_expression = {\n \"Filters\":[\n {\n \"Name\": \"DisplayName\",\n \"Operator\": \"Equals\",\n \"Value\": \"Training\",\n },\n {\n \"Name\": \"metrics.maskrcnn_loss/accuracy.max\",\n \"Operator\": \"LessThan\",\n \"Value\": \"1\",\n }\n ],\n}", "_____no_output_____" ], [ "from sagemaker.analytics import ExperimentAnalytics\n\ntrial_component_analytics = ExperimentAnalytics(\n sagemaker_session=sagemaker_session,\n experiment_name=mrcnn_experiment.experiment_name,\n search_expression=search_expression,\n sort_by=\"metrics.maskrcnn_loss/accuracy.max\",\n sort_order=\"Descending\",\n parameter_names=['resnet_arch']\n)", "_____no_output_____" ], [ "analytic_table = trial_component_analytics.dataframe()\nfor col in analytic_table.columns: \n print(col) \n", "_____no_output_____" ], [ "bbox_map=analytic_table[['resnet_arch',\n 'mAP(bbox)/small - Max', \n 'mAP(bbox)/medium - Max', \n 'mAP(bbox)/large - Max']]\nbbox_map", "_____no_output_____" ], [ "segm_map=analytic_table[['resnet_arch',\n 'mAP(segm)/small - Max', \n 'mAP(segm)/medium - Max', \n 'mAP(segm)/large - Max']]\nsegm_map\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cbc41d443d96481abe4246e4aa9e3934927776ef
4,350
ipynb
Jupyter Notebook
AnalisiTesto/Lezione1/Esercizi/simple_regex.ipynb
nick87ds/MaterialeSerate
51627e47ff1d3c3ecfc9ce6741c04b91b3295359
[ "MIT" ]
12
2021-12-12T22:19:52.000Z
2022-03-18T11:45:17.000Z
AnalisiTesto/Lezione1/Esercizi/simple_regex.ipynb
PythonGroupBiella/MaterialeLezioni
58b45ecda7b9a8a298b9ca966d2806618a277372
[ "MIT" ]
1
2021-02-02T09:21:23.000Z
2021-02-02T09:21:23.000Z
AnalisiTesto/Lezione1/Esercizi/simple_regex.ipynb
PythonGroupBiella/MaterialeLezioni
58b45ecda7b9a8a298b9ca966d2806618a277372
[ "MIT" ]
7
2021-02-01T22:09:14.000Z
2021-06-22T08:30:16.000Z
23.015873
177
0.508046
[ [ [ "## Esercizi Introduttivi\n\n<sup><sub>Adattato da: [learnbyexample/py_regular_expressions](https://github.com/learnbyexample/py_regular_expressions/blob/master/exercises/Exercises.md)</sup></sub>.\nContiene molti altri esempi (in inglese), ben catalogati e con soluzioni.", "_____no_output_____" ] ], [ [ "import re", "_____no_output_____" ], [ "'''\nSostituire tutte le occorrenze di `5` con `five`\n'''\nip = 'They ate 5 apples and 5 oranges'\n\npattern = r'\\b5\\b'\nres = re.sub(pattern, 'five', ip)\nassert res == 'They ate five apples and five oranges'", "_____no_output_____" ], [ "'''\nScrivere una regex che controlla se la stinga inizia con `be`\n'''\nline1 = 'be nice'\nline2 = 'oh no\\nbear spotted'\n\npattern = r'^be'\npat = re.compile(pattern)\nassert bool(pat.search(line1))\nassert not(bool(pat.search(line2)))\n\n", "_____no_output_____" ], [ "'''\nDato un testo estrarre tutte le parole contenute tra le parentesi (assumi che non ci sono coppie di parentesi non chiuse)\n'''\n\ntext = 'another (way) to reuse (portion) matched (by) capture groups'\n\npattern = r'\\((.*?)\\)'\nres = re.findall(pattern, text)\nassert res == ['way', 'portion', 'by']", "_____no_output_____" ], [ "'''\nData la sequenza in input, estrarre tutte le parole in cui è presente almeno una sequenza ripetuta.\nEsempio: `232323` and `897897`\n'''\ntext = '1234 2323 453545354535 9339 11 60260260'\n\npattern = r'\\b(\\d+)\\1+\\b'\npat = re.compile(pattern)\nres = [m[0] for m in pat.finditer(text)]\nassert res == ['2323', '453545354535', '11']", "_____no_output_____" ], [ "'''\nConvertire le seguenti stringe in dizionari. Il nome delle chiavi per i campi sono: `name`, `maths`, `phy`.\n'''\n\nrow1 = 'rohan first,75,89'\nrow2 = 'rose, 88, 92'\n\npattern = r'(?P<name>[^,]+),\\s*(?P<maths>[^,]+),\\s*(?P<phy>[^,]+)'\npat = re.compile(pattern)\n\nres1 = pat.search(row1).groupdict()\nassert res1 == {'name': 'rohan first', 'maths': '75', 'phy': '89'}, res1\n\nres2 = pat.search(row2).groupdict()\nassert res2 == {'name': 'rose', 'maths': '88', 'phy': '92'}, res2", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cbc42a3f004023982cb3e1bca53d76d3c4cac9e0
48,507
ipynb
Jupyter Notebook
notebooks/historical.ipynb
ctogle/coind
170c6074c9556da6094514c5cf0c45686645ad80
[ "MIT" ]
null
null
null
notebooks/historical.ipynb
ctogle/coind
170c6074c9556da6094514c5cf0c45686645ad80
[ "MIT" ]
null
null
null
notebooks/historical.ipynb
ctogle/coind
170c6074c9556da6094514c5cf0c45686645ad80
[ "MIT" ]
null
null
null
54.934315
462
0.54654
[ [ [ "import json\n\nwith open('../tmp/history.2018.log', 'r') as f:\n data = json.loads(f.read())", "_____no_output_____" ], [ "data.keys()", "_____no_output_____" ], [ "data['BAT-USDC'][:10]", "_____no_output_____" ], [ "import collections\nimport json\nfrom coind.tickers import ticker_date_format\n\n\nhistorical_sample = collections.namedtuple('HistoricalSample',\n ('time', 'product_id', 'price', 'low', 'high', 'volume'))\n\ndef stream_historical(path):\n with open(path, 'r') as f:\n data = json.loads(f.read())\n stream = []\n for product in data:\n for msg in data[product]:\n if msg == 'message':\n continue\n else:\n time, low, high, _, close, volume = msg\n time = datetime.fromtimestamp(time)\n #time = time.strftime(ticker_date_format)\n sample = historical_sample(time, product, close, low, high, volume)\n stream.append(sample)\n stream.sort(key=lambda s: s.time)\n for sample in stream:\n #ticker = {\n # \"best_ask\": None, \"best_bid\": None, \"high_24h\": None, \"last_size\": None, \"low_24h\": None, \n # \"open_24h\": None, \"price\": sample.price, \"product_id\": sample.product_id, \"sequence\": None,\n # \"side\": None, \"time\": sample.time.strftime(ticker_date_format), \"trade_id\": None, \"type\": \"ticker\",\n # \"volume_24h\": None, \"volume_30d\": None,\n #}\n ticker = {\n \"best_ask\": None, \"best_bid\": None, \"high_24h\": sample.high, \"last_size\": None, \"low_24h\": sample.low, \n \"open_24h\": None, \"price\": sample.price, \"product_id\": sample.product_id, \"sequence\": None,\n \"side\": None, \"time\": sample.time.strftime(ticker_date_format), \"trade_id\": None, \"type\": \"ticker\",\n \"volume_24h\": sample.volume, \"volume_30d\": None,\n }\n yield json.dumps(ticker, sort_keys=True)\n\n \npath = '../tmp/history.2015.log'\nwith open('../tmp/history.2015.stream.log', 'w') as f:\n for tick in stream_historical(path):\n f.write(f'{tick}\\n')", "_____no_output_____" ], [ "!head ../tmp/history.2019.stream.log", "{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 0.12689, \"product_id\": \"BAT-USDC\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:00:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 4.99, \"product_id\": \"ETC-USD\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:00:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 132.32, \"product_id\": \"ETH-USD\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:00:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 29.78, \"product_id\": \"LTC-USD\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:00:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 55.34, \"product_id\": \"ZEC-USDC\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:00:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 0.295672, \"product_id\": \"ZRX-USD\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:00:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 0.12689, \"product_id\": \"BAT-USDC\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:05:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 4.99, \"product_id\": \"ETC-USD\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:05:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 132.1, \"product_id\": \"ETH-USD\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:05:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n{\"best_ask\": null, \"best_bid\": null, \"high_24h\": null, \"last_size\": null, \"low_24h\": null, \"open_24h\": null, \"price\": 0.065588, \"product_id\": \"GNT-USDC\", \"sequence\": null, \"side\": null, \"time\": \"2019-01-01T00:05:00.000000Z\", \"trade_id\": null, \"type\": \"ticker\", \"volume_24h\": null, \"volume_30d\": null}\r\n" ], [ "samples[:10]", "_____no_output_____" ], [ "len(samples)", "_____no_output_____" ], [ "!tail ../data/spec/stream.0.log", "{\"best_ask\": \"5173.06\", \"best_bid\": \"5173.05\", \"high_24h\": \"5191.19000000\", \"last_size\": \"0.02065387\", \"low_24h\": \"5013.81000000\", \"open_24h\": \"5075.74000000\", \"price\": \"5173.05000000\", \"product_id\": \"BTC-USD\", \"sequence\": 9248250881, \"side\": \"sell\", \"time\": \"2019-04-15T02:04:19.825000Z\", \"trade_id\": 62019564, \"type\": \"ticker\", \"volume_24h\": \"4706.98373027\", \"volume_30d\": \"304699.15859522\"}\r\n{\"best_ask\": \"169.06\", \"best_bid\": \"169.04\", \"high_24h\": \"169.76000000\", \"last_size\": \"0.27000000\", \"low_24h\": \"161.83000000\", \"open_24h\": \"163.51000000\", \"price\": \"169.05000000\", \"product_id\": \"ETH-USD\", \"sequence\": 6529944198, \"side\": \"buy\", \"time\": \"2019-04-15T02:04:20.473000Z\", \"trade_id\": 46357934, \"type\": \"ticker\", \"volume_24h\": \"48230.64536837\", \"volume_30d\": \"3493449.88297001\"}\r\n{\"best_ask\": \"169.06\", \"best_bid\": \"169.04\", \"high_24h\": \"169.76000000\", \"last_size\": \"0.91066661\", \"low_24h\": \"161.83000000\", \"open_24h\": \"163.51000000\", \"price\": \"169.06000000\", \"product_id\": \"ETH-USD\", \"sequence\": 6529944200, \"side\": \"buy\", \"time\": \"2019-04-15T02:04:20.473000Z\", \"trade_id\": 46357935, \"type\": \"ticker\", \"volume_24h\": \"48231.55603498\", \"volume_30d\": \"3493450.79363662\"}\r\n{\"best_ask\": \"5173.06\", \"best_bid\": \"5173.05\", \"high_24h\": \"5191.19000000\", \"last_size\": \"0.02771596\", \"low_24h\": \"5013.81000000\", \"open_24h\": \"5075.74000000\", \"price\": \"5173.05000000\", \"product_id\": \"BTC-USD\", \"sequence\": 9248250911, \"side\": \"sell\", \"time\": \"2019-04-15T02:04:20.812000Z\", \"trade_id\": 62019565, \"type\": \"ticker\", \"volume_24h\": \"4707.01144623\", \"volume_30d\": \"304699.18631118\"}\r\n{\"best_ask\": \"83.7\", \"best_bid\": \"83.6\", \"high_24h\": \"84.73000000\", \"last_size\": \"0.23736280\", \"low_24h\": \"76.50000000\", \"open_24h\": \"78.16000000\", \"price\": \"83.70000000\", \"product_id\": \"LTC-USD\", \"sequence\": 3301715282, \"side\": \"buy\", \"time\": \"2019-04-15T02:04:20.805000Z\", \"trade_id\": 37875530, \"type\": \"ticker\", \"volume_24h\": \"162563.19131454\", \"volume_30d\": \"6918730.94499712\"}\r\n{\"best_ask\": \"0.119478\", \"best_bid\": \"0.119149\", \"high_24h\": \"0.11949000\", \"last_size\": \"75.00000000\", \"low_24h\": \"0.11371000\", \"open_24h\": \"0.11585300\", \"price\": \"0.11947800\", \"product_id\": \"XLM-USD\", \"sequence\": 33234188, \"side\": \"buy\", \"time\": \"2019-04-15T02:04:21.300000Z\", \"trade_id\": 306906, \"type\": \"ticker\", \"volume_24h\": \"6989350\", \"volume_30d\": \"428430600\"}\r\n{\"best_ask\": \"6.56\", \"best_bid\": \"6.551\", \"high_24h\": \"6.60000000\", \"last_size\": \"1.36248847\", \"low_24h\": \"6.16200000\", \"open_24h\": \"6.31000000\", \"price\": \"6.56000000\", \"product_id\": \"ETC-USD\", \"sequence\": 217193603, \"side\": \"buy\", \"time\": \"2019-04-15T02:04:23.663000Z\", \"trade_id\": 1777237, \"type\": \"ticker\", \"volume_24h\": \"181141.91167347\", \"volume_30d\": \"8906396.43756951\"}\r\n{\"best_ask\": \"83.7\", \"best_bid\": \"83.58\", \"high_24h\": \"84.73000000\", \"last_size\": \"1.00389729\", \"low_24h\": \"76.50000000\", \"open_24h\": \"78.16000000\", \"price\": \"83.70000000\", \"product_id\": \"LTC-USD\", \"sequence\": 3301715355, \"side\": \"buy\", \"time\": \"2019-04-15T02:04:24.148000Z\", \"trade_id\": 37875531, \"type\": \"ticker\", \"volume_24h\": \"162564.19521183\", \"volume_30d\": \"6918731.94889441\"}\r\n{\"best_ask\": \"169.05\", \"best_bid\": \"169.04\", \"high_24h\": \"169.76000000\", \"last_size\": \"0.16477966\", \"low_24h\": \"161.83000000\", \"open_24h\": \"163.51000000\", \"price\": \"169.04000000\", \"product_id\": \"ETH-USD\", \"sequence\": 6529944263, \"side\": \"sell\", \"time\": \"2019-04-15T02:04:25.424000Z\", \"trade_id\": 46357936, \"type\": \"ticker\", \"volume_24h\": \"48231.72081464\", \"volume_30d\": \"3493450.95841628\"}\r\n{\"best_ask\": \"5173.06\", \"best_bid\": \"5173.05\", \"high_24h\": \"5191.19000000\", \"last_size\": \"0.26506365\", \"low_24h\": \"5013.81000000\", \"open_24h\": \"5075.74000000\", \"price\": \"5173.06000000\", \"product_id\": \"BTC-USD\", \"sequence\": 9248251064, \"side\": \"buy\", \"time\": \"2019-04-15T02:04:26.330000Z\", \"trade_id\": 62019566, \"type\": \"ticker\", \"volume_24h\": \"4707.27650988\", \"volume_30d\": \"304699.45137483\"}\r\n" ], [ "import cbpro\npublic_client = cbpro.PublicClient()", "_____no_output_____" ], [ "\nrates = public_client.get_product_historic_rates('ETH-USD',\n '2019-01-01 00:00:00-05:00', '2019-01-02 00:00:00-05:00', granularity=300)", "_____no_output_____" ], [ "rates", "_____no_output_____" ], [ "from datetime import datetime\n\nprint(len(rates))\nfor r in rates:\n print(datetime.fromtimestamp(r[0]))", "288\n2019-01-01 23:55:00\n2019-01-01 23:50:00\n2019-01-01 23:45:00\n2019-01-01 23:40:00\n2019-01-01 23:35:00\n2019-01-01 23:30:00\n2019-01-01 23:25:00\n2019-01-01 23:20:00\n2019-01-01 23:15:00\n2019-01-01 23:10:00\n2019-01-01 23:05:00\n2019-01-01 23:00:00\n2019-01-01 22:55:00\n2019-01-01 22:50:00\n2019-01-01 22:45:00\n2019-01-01 22:40:00\n2019-01-01 22:35:00\n2019-01-01 22:30:00\n2019-01-01 22:25:00\n2019-01-01 22:20:00\n2019-01-01 22:15:00\n2019-01-01 22:10:00\n2019-01-01 22:05:00\n2019-01-01 22:00:00\n2019-01-01 21:55:00\n2019-01-01 21:50:00\n2019-01-01 21:45:00\n2019-01-01 21:40:00\n2019-01-01 21:35:00\n2019-01-01 21:30:00\n2019-01-01 21:25:00\n2019-01-01 21:20:00\n2019-01-01 21:15:00\n2019-01-01 21:10:00\n2019-01-01 21:05:00\n2019-01-01 21:00:00\n2019-01-01 20:55:00\n2019-01-01 20:50:00\n2019-01-01 20:45:00\n2019-01-01 20:40:00\n2019-01-01 20:35:00\n2019-01-01 20:30:00\n2019-01-01 20:25:00\n2019-01-01 20:20:00\n2019-01-01 20:15:00\n2019-01-01 20:10:00\n2019-01-01 20:05:00\n2019-01-01 20:00:00\n2019-01-01 19:55:00\n2019-01-01 19:50:00\n2019-01-01 19:45:00\n2019-01-01 19:40:00\n2019-01-01 19:35:00\n2019-01-01 19:30:00\n2019-01-01 19:25:00\n2019-01-01 19:20:00\n2019-01-01 19:15:00\n2019-01-01 19:10:00\n2019-01-01 19:05:00\n2019-01-01 19:00:00\n2019-01-01 18:55:00\n2019-01-01 18:50:00\n2019-01-01 18:45:00\n2019-01-01 18:40:00\n2019-01-01 18:35:00\n2019-01-01 18:30:00\n2019-01-01 18:25:00\n2019-01-01 18:20:00\n2019-01-01 18:15:00\n2019-01-01 18:10:00\n2019-01-01 18:05:00\n2019-01-01 18:00:00\n2019-01-01 17:55:00\n2019-01-01 17:50:00\n2019-01-01 17:45:00\n2019-01-01 17:40:00\n2019-01-01 17:35:00\n2019-01-01 17:30:00\n2019-01-01 17:25:00\n2019-01-01 17:20:00\n2019-01-01 17:15:00\n2019-01-01 17:10:00\n2019-01-01 17:05:00\n2019-01-01 17:00:00\n2019-01-01 16:55:00\n2019-01-01 16:50:00\n2019-01-01 16:45:00\n2019-01-01 16:40:00\n2019-01-01 16:35:00\n2019-01-01 16:30:00\n2019-01-01 16:25:00\n2019-01-01 16:20:00\n2019-01-01 16:15:00\n2019-01-01 16:10:00\n2019-01-01 16:05:00\n2019-01-01 16:00:00\n2019-01-01 15:55:00\n2019-01-01 15:50:00\n2019-01-01 15:45:00\n2019-01-01 15:40:00\n2019-01-01 15:35:00\n2019-01-01 15:30:00\n2019-01-01 15:25:00\n2019-01-01 15:20:00\n2019-01-01 15:15:00\n2019-01-01 15:10:00\n2019-01-01 15:05:00\n2019-01-01 15:00:00\n2019-01-01 14:55:00\n2019-01-01 14:50:00\n2019-01-01 14:45:00\n2019-01-01 14:40:00\n2019-01-01 14:35:00\n2019-01-01 14:30:00\n2019-01-01 14:25:00\n2019-01-01 14:20:00\n2019-01-01 14:15:00\n2019-01-01 14:10:00\n2019-01-01 14:05:00\n2019-01-01 14:00:00\n2019-01-01 13:55:00\n2019-01-01 13:50:00\n2019-01-01 13:45:00\n2019-01-01 13:40:00\n2019-01-01 13:35:00\n2019-01-01 13:30:00\n2019-01-01 13:25:00\n2019-01-01 13:20:00\n2019-01-01 13:15:00\n2019-01-01 13:10:00\n2019-01-01 13:05:00\n2019-01-01 13:00:00\n2019-01-01 12:55:00\n2019-01-01 12:50:00\n2019-01-01 12:45:00\n2019-01-01 12:40:00\n2019-01-01 12:35:00\n2019-01-01 12:30:00\n2019-01-01 12:25:00\n2019-01-01 12:20:00\n2019-01-01 12:15:00\n2019-01-01 12:10:00\n2019-01-01 12:05:00\n2019-01-01 12:00:00\n2019-01-01 11:55:00\n2019-01-01 11:50:00\n2019-01-01 11:45:00\n2019-01-01 11:40:00\n2019-01-01 11:35:00\n2019-01-01 11:30:00\n2019-01-01 11:25:00\n2019-01-01 11:20:00\n2019-01-01 11:15:00\n2019-01-01 11:10:00\n2019-01-01 11:05:00\n2019-01-01 11:00:00\n2019-01-01 10:55:00\n2019-01-01 10:50:00\n2019-01-01 10:45:00\n2019-01-01 10:40:00\n2019-01-01 10:35:00\n2019-01-01 10:30:00\n2019-01-01 10:25:00\n2019-01-01 10:20:00\n2019-01-01 10:15:00\n2019-01-01 10:10:00\n2019-01-01 10:05:00\n2019-01-01 10:00:00\n2019-01-01 09:55:00\n2019-01-01 09:50:00\n2019-01-01 09:45:00\n2019-01-01 09:40:00\n2019-01-01 09:35:00\n2019-01-01 09:30:00\n2019-01-01 09:25:00\n2019-01-01 09:20:00\n2019-01-01 09:15:00\n2019-01-01 09:10:00\n2019-01-01 09:05:00\n2019-01-01 09:00:00\n2019-01-01 08:55:00\n2019-01-01 08:50:00\n2019-01-01 08:45:00\n2019-01-01 08:40:00\n2019-01-01 08:35:00\n2019-01-01 08:30:00\n2019-01-01 08:25:00\n2019-01-01 08:20:00\n2019-01-01 08:15:00\n2019-01-01 08:10:00\n2019-01-01 08:05:00\n2019-01-01 08:00:00\n2019-01-01 07:55:00\n2019-01-01 07:50:00\n2019-01-01 07:45:00\n2019-01-01 07:40:00\n2019-01-01 07:35:00\n2019-01-01 07:30:00\n2019-01-01 07:25:00\n2019-01-01 07:20:00\n2019-01-01 07:15:00\n2019-01-01 07:10:00\n2019-01-01 07:05:00\n2019-01-01 07:00:00\n2019-01-01 06:55:00\n2019-01-01 06:50:00\n2019-01-01 06:45:00\n2019-01-01 06:40:00\n2019-01-01 06:35:00\n2019-01-01 06:30:00\n2019-01-01 06:25:00\n2019-01-01 06:20:00\n2019-01-01 06:15:00\n2019-01-01 06:10:00\n2019-01-01 06:05:00\n2019-01-01 06:00:00\n2019-01-01 05:55:00\n2019-01-01 05:50:00\n2019-01-01 05:45:00\n2019-01-01 05:40:00\n2019-01-01 05:35:00\n2019-01-01 05:30:00\n2019-01-01 05:25:00\n2019-01-01 05:20:00\n2019-01-01 05:15:00\n2019-01-01 05:10:00\n2019-01-01 05:05:00\n2019-01-01 05:00:00\n2019-01-01 04:55:00\n2019-01-01 04:50:00\n2019-01-01 04:45:00\n2019-01-01 04:40:00\n2019-01-01 04:35:00\n2019-01-01 04:30:00\n2019-01-01 04:25:00\n2019-01-01 04:20:00\n2019-01-01 04:15:00\n2019-01-01 04:10:00\n2019-01-01 04:05:00\n2019-01-01 04:00:00\n2019-01-01 03:55:00\n2019-01-01 03:50:00\n2019-01-01 03:45:00\n2019-01-01 03:40:00\n2019-01-01 03:35:00\n2019-01-01 03:30:00\n2019-01-01 03:25:00\n2019-01-01 03:20:00\n2019-01-01 03:15:00\n2019-01-01 03:10:00\n2019-01-01 03:05:00\n2019-01-01 03:00:00\n2019-01-01 02:55:00\n2019-01-01 02:50:00\n2019-01-01 02:45:00\n2019-01-01 02:40:00\n2019-01-01 02:35:00\n2019-01-01 02:30:00\n2019-01-01 02:25:00\n2019-01-01 02:20:00\n2019-01-01 02:15:00\n2019-01-01 02:10:00\n2019-01-01 02:05:00\n2019-01-01 02:00:00\n2019-01-01 01:55:00\n2019-01-01 01:50:00\n2019-01-01 01:45:00\n2019-01-01 01:40:00\n2019-01-01 01:35:00\n2019-01-01 01:30:00\n2019-01-01 01:25:00\n2019-01-01 01:20:00\n2019-01-01 01:15:00\n2019-01-01 01:10:00\n2019-01-01 01:05:00\n2019-01-01 01:00:00\n2019-01-01 00:55:00\n2019-01-01 00:50:00\n2019-01-01 00:45:00\n2019-01-01 00:40:00\n2019-01-01 00:35:00\n2019-01-01 00:30:00\n2019-01-01 00:25:00\n2019-01-01 00:20:00\n2019-01-01 00:15:00\n2019-01-01 00:10:00\n2019-01-01 00:05:00\n2019-01-01 00:00:00\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc430576f093b2487f189dd056c16c7749aa077
3,010
ipynb
Jupyter Notebook
dalbem.ipynb
GabrielMendesdc/Selenium
50fa3229c8fffe752b52f62533d6167e4f9d327c
[ "MIT" ]
null
null
null
dalbem.ipynb
GabrielMendesdc/Selenium
50fa3229c8fffe752b52f62533d6167e4f9d327c
[ "MIT" ]
null
null
null
dalbem.ipynb
GabrielMendesdc/Selenium
50fa3229c8fffe752b52f62533d6167e4f9d327c
[ "MIT" ]
null
null
null
30.40404
111
0.521595
[ [ [ "from selenium.webdriver import Chrome, ChromeOptions\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom pdf2image import convert_from_path\nfrom time import sleep\nimport requests\nimport shutil\nimport os\n\ndef purgedir(parent):\n for root, dirs, files in os.walk(parent): \n for item in files:\n filespec = os.path.join(root, item)\n os.unlink(filespec)\n \n \ndef inita():\n s=Service(r'C:/Users/pdv/chromedriver.exe')\n browser = Chrome(service=s)\n browser.get('https://www.supermercadosdalben.com.br/ofertas.html')\n sleep(3)\n body = browser.find_element(By.TAG_NAME, 'body')\n img = body.find_elements(By.CLASS_NAME, 'img-responsive ')\n img[1].click()\n sleep(5)\n window_after = browser.window_handles[1]\n browser.switch_to.window(window_after)\n url = browser.current_url\n browser.quit()\n response = requests.get(url, stream=True)\n with open('C:/Users/pdv/Desktop/selenium-imgs/dalbem.pdf', 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response\n purgedir(f'C:/Users/pdv/Desktop/selenium-imgs/rename')\n images = convert_from_path('C:/Users/pdv/Desktop/selenium-imgs/dalbem.pdf',\n poppler_path=r'C:\\Program Files (x86)\\poppler-21.11.0\\Library\\bin',\n output_folder='C:/Users/pdv/Desktop/selenium-imgs/rename',\n fmt='PNG')\n \n for i in range(len(images)): \n images[i].save('pagina'+ str(i) +'.png', 'PNG')\n print('page')\n return 1\n\nif __name__ == \"__main__\":\n inita()", "page\npage\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
cbc43cebeb811af5fbd9fbfeddbc0e5c2aa843b6
8,117
ipynb
Jupyter Notebook
Amrita/Boolean_Assisgment.ipynb
Sureshkrishh/Practice-codes
3a96584866d0ca4f5975dccc4809eb615a8aa2e0
[ "MIT" ]
null
null
null
Amrita/Boolean_Assisgment.ipynb
Sureshkrishh/Practice-codes
3a96584866d0ca4f5975dccc4809eb615a8aa2e0
[ "MIT" ]
null
null
null
Amrita/Boolean_Assisgment.ipynb
Sureshkrishh/Practice-codes
3a96584866d0ca4f5975dccc4809eb615a8aa2e0
[ "MIT" ]
5
2020-08-04T15:58:22.000Z
2021-08-04T19:16:18.000Z
27.608844
122
0.479364
[ [ [ "# Boolean Assigmnet", "_____no_output_____" ] ], [ [ "a = True #Declare a boolean value and store it in a variable. \nprint(type(a)) #Check the type and print the id of the same.\nprint(id(a))", "<class 'bool'>\n8791376636240\n" ], [ "x , y = bool(6), bool(6) #Take one boolean value between 0 - 256.#Assign it to two different variables. \nprint(id(x)) #Check the id of both the variables.\nprint(id(y))\n \n#Object Reusability Concept #It should come same. Check why?", "8791376636240\n8791376636240\n" ], [ "#Arithmatic Operations on boolean data\n\nr , s = True , False #Take two different boolean values.#Store them in two different variables.\nsum = r + s #Find sum of both values\ndiff = r - s #Find differce between them\npro = r * s #Find the product of both.\nt = s / r #Fnd value after dividing first value with second value\nw = s % r #Find the remainder after dividing first value with second value\n#Cant do for boolean #Find the quotient after dividing first value with second value\nf = r ** s #Find the result of first value to the power of second value.\nprint(bool(sum)) #True\nprint(bool(diff)) #True\nprint(bool(pro)) #False\nprint(bool(t)) #False\nprint(bool(w)) #False\nprint(bool(f)) #True\n\n#print(type(sum),type(r))\n#print(Addition is bool(sum)) --Why this is giving False\n#diff = x.difference(y)-- This will not work 'bool' object has no attribute 'difference'\n#Division = You cannot divide by modulo by-- Zero ZeroDivisionError: division by zer ", "True\nTrue\nFalse\nFalse\nFalse\nTrue\n" ] ], [ [ "#Comparison Operators on boolean values\n\nA , B = True , False #Take two different boolean values.#Store them in two different variables.\nOP1 = A > B #Compare these two values with below operator:-\nOP2 = A < B #less than, '<'\nOP3 = A >= B #Greater than or equal to, '>='\nOP4 = A <= B #Less than or equal to, '<=' \n\nprint(type(OP1))\nprint(type(OP2))\nprint(type(OP3))\nprint(type(OP4))\n#Observe their output(return type should be boolean)\n", "_____no_output_____" ] ], [ [ "#Equality Operator\nC , D = True , False #Take two different boolean values.#Store them in two different variables.\n\nprint ( C == D) #Equuate them using equality operator (==, !=)\nprint ( C != D) #Observe the output(return type should be boolean)", "False\nTrue\n" ], [ "#Logical operators\n#Observe the output of below code #Cross check the output manually\n\nprint(True and True) #----------->Output is True\nprint(False and True) #----------->Output is False\nprint(True and False) #----------->Output is False\nprint(False and False) #----------->Output is False\n\nprint(True or True) #----------->Output is True\nprint(False or True) #----------->Output is True\nprint(True or False) #----------->Output is True\nprint(False or False) #----------->Output is False\n\nprint(not True) #----------->Output is False\nprint(not False) #----------->Output is True", "True\nFalse\nFalse\nFalse\nTrue\nTrue\nTrue\nFalse\nFalse\nTrue\n" ], [ "#Bitwise Operators #Do below operations on the values provided below:-\n\n#Bitwise and(&)\nprint(True & False) \nprint(True & True) \nprint(False & False)\nprint(False & False)\n\n#Bitwise or(|) -----> True, False -------> Output is True\nprint(True | False) \nprint(True | True) \nprint(False | False)\nprint(False | False)\n\n#Bitwise(^) -----> True, False -------> Output is True\nprint(True ^ False) \nprint(True ^ True) \nprint(False ^ False)\nprint(False ^ False)\n \n#Bitwise negation(~) ------> True -------> Output is -2\nprint(~False) \nprint(~True) \n\n#Bitwise left shift -----> True,2 -------> Output is 4\nprint(True << 2)\n\n#Bitwise right shift ----------> True,2 -------> Output is 0\nprint(True >> 2)\n\n#Cross check the output manually\n", "False\nTrue\nFalse\nFalse\nTrue\nTrue\nFalse\nFalse\nTrue\nFalse\nFalse\nFalse\n-1\n-2\n4\n0\n" ], [ "#What is the output of expression inside print statement. Cross check before running the program.\na = True\nb = True\nprint(a is b) #True or False? #\nprint(a is not b) #True or False?\n\na = False\nb = False\nprint(a is b) #True or False?\nprint(a is not b) #True or False?", "_____no_output_____" ], [ "#Membership operation\n#in, not in are two membership operators and it returns boolean value\n\nprint(True in [10,10.20,10+20j,'Python', True])\nprint(False in (10,10.20,10+20j,'Python', False))\nprint(True in {1,2,3, True})\nprint(True in {True:100, False:200, True:300})\nprint(False in {True:100, False:200, True:300})", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cbc443f095904e56077bb7e8456fca3eb64bb779
1,980
ipynb
Jupyter Notebook
index.ipynb
nitinverma78/deck_of_cards
5bc65f8b53c2bc2459cf0593a434233055d00251
[ "Apache-2.0" ]
null
null
null
index.ipynb
nitinverma78/deck_of_cards
5bc65f8b53c2bc2459cf0593a434233055d00251
[ "Apache-2.0" ]
3
2021-05-20T23:22:27.000Z
2022-02-26T10:29:28.000Z
index.ipynb
nitinverma78/deck_of_cards
5bc65f8b53c2bc2459cf0593a434233055d00251
[ "Apache-2.0" ]
null
null
null
19.223301
175
0.528283
[ [ [ "# Deck of Cards\n\n> A minimal example of using nbdev to create a python library.", "_____no_output_____" ], [ "https://nitinverma78.github.io/deck_of_cards/", "_____no_output_____" ], [ "This repo uses code from Allen Downey's ThinkPython2. This file was automatically generated from a Jupyter Notebook using nbdev. To change it you must edit `index.ipynb`", "_____no_output_____" ], [ "## Install", "_____no_output_____" ], [ "`pip install -e`\n> There is already a project called deck_of_cards on pypi. This project has no relation to that. This project is an example of how to create python packages with nbdev.", "_____no_output_____" ], [ "## How to use", "_____no_output_____" ], [ "Playing cards in python!", "_____no_output_____" ] ], [ [ "from deck_of_cards.deck import Deck\nd = Deck()\nprint(f'Number of playing cards in the deck: {len(d.cards)}')", "Number of playing cards in the deck: 52\n" ], [ "card = d.pop_card()\nprint(card)", "King of Spades\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
cbc45b63b7d56b6f1bda85be248b6ed845928851
3,570
ipynb
Jupyter Notebook
ipython/simulator_adapter_examples/rms_simulator.ipynb
ReactionMechanismGenerator/T3
13e50482282c1ae4b82a9057eeaba5f3b8de2e8c
[ "MIT" ]
5
2020-04-06T15:24:06.000Z
2022-03-10T19:46:14.000Z
ipython/simulator_adapter_examples/rms_simulator.ipynb
ReactionMechanismGenerator/T3
13e50482282c1ae4b82a9057eeaba5f3b8de2e8c
[ "MIT" ]
63
2020-03-15T14:18:04.000Z
2022-03-19T08:40:38.000Z
ipython/simulator_adapter_examples/rms_simulator.ipynb
ReactionMechanismGenerator/T3
13e50482282c1ae4b82a9057eeaba5f3b8de2e8c
[ "MIT" ]
3
2020-06-12T23:42:03.000Z
2021-04-13T03:18:18.000Z
27.461538
93
0.526611
[ [ [ "This notebook demonstrates how to use the rms simulator adapter", "_____no_output_____" ] ], [ [ "import os\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom tests.common import run_minimal\nfrom t3.common import IPYTHON_SIMULATOR_EXAMPLES_PATH\nfrom t3.main import T3\nfrom t3.simulate.rms_constantTP import RMSConstantTP\n\nfrom arc.common import read_yaml_file", "_____no_output_____" ], [ "# define path that contains the input file and T3 iteration folders\nEXAMPLE_DIR = os.path.join(IPYTHON_SIMULATOR_EXAMPLES_PATH, 'rms_simulator_data')", "_____no_output_____" ], [ "# read in the input dictionary to use T3 via its API\nminimal_input = os.path.join(EXAMPLE_DIR, 'input.yml')\ninput_dict = read_yaml_file(path=minimal_input)\ninput_dict['verbose'] = 10\ninput_dict['project_directory'] = EXAMPLE_DIR", "_____no_output_____" ], [ "# create an instance of T3, which stores information used by the rms adapter\nt3 = T3(**input_dict)\nt3.set_paths()", "_____no_output_____" ], [ "# simulate ideal gas with constant V and perform sensitivity analysis\nrms_simulator_adapter = RMSConstantTP(t3=t3.t3,\n rmg=t3.rmg,\n paths=t3.paths,\n logger=t3.logger,\n atol=t3.rmg['model']['atol'],\n rtol=t3.rmg['model']['rtol'],\n observable_list=observable_list,\n sa_atol=t3.t3['sensitivity']['atol'],\n sa_rtol=t3.t3['sensitivity']['rtol'],\n )\nrms_simulator_adapter.simulate()", "_____no_output_____" ], [ "# get the sensitivity analysis coefficients returned in a standard dictionary format\nsa_dict = rms_simulator_adapter.get_sa_coefficients()", "_____no_output_____" ], [ "# plot the results\nspecies = 'H(3)'\nk = 5\nplt.plot(sa_dict['time'], sa_dict['kinetics'][species][k])\nplt.xlabel('time (s)')\nplt.ylabel(f'dln({species})/dln(k{k})')\nplt.title('Sensitivity over time')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cbc462822bc6fd31be5cd2669d52a3ad3cfcc931
80,337
ipynb
Jupyter Notebook
python/d2l-en/pytorch/chapter_computational-performance/multiple-gpus.ipynb
rtp-aws/devpost_aws_disaster_recovery
2ccfff2d8b85614f3043f09d98c9981dedf43c05
[ "MIT" ]
1
2022-01-13T23:36:05.000Z
2022-01-13T23:36:05.000Z
python/d2l-en/pytorch/chapter_computational-performance/multiple-gpus.ipynb
rtp-aws/devpost_aws_disaster_recovery
2ccfff2d8b85614f3043f09d98c9981dedf43c05
[ "MIT" ]
9
2022-01-13T19:34:34.000Z
2022-01-14T19:41:18.000Z
python/d2l-en/pytorch/chapter_computational-performance/multiple-gpus.ipynb
rtp-aws/devpost_aws_disaster_recovery
2ccfff2d8b85614f3043f09d98c9981dedf43c05
[ "MIT" ]
null
null
null
42.596501
524
0.521926
[ [ [ "# Training on Multiple GPUs\n:label:`sec_multi_gpu`\n\nSo far we discussed how to train models efficiently on CPUs and GPUs. We even showed how deep learning frameworks allow one to parallelize computation and communication automatically between them in :numref:`sec_auto_para`. We also showed in :numref:`sec_use_gpu` how to list all the available GPUs on a computer using the `nvidia-smi` command.\nWhat we did *not* discuss is how to actually parallelize deep learning training. \nInstead, we implied in passing that one would somehow split the data across multiple devices and make it work. The present section fills in the details and shows how to train a network in parallel when starting from scratch. Details on how to take advantage of functionality in high-level APIs is relegated to :numref:`sec_multi_gpu_concise`.\nWe assume that you are familiar with minibatch stochastic gradient descent algorithms such as the ones described in :numref:`sec_minibatch_sgd`.\n\n\n## Splitting the Problem\n\nLet us start with a simple computer vision problem and a slightly archaic network, e.g., with multiple layers of convolutions, pooling, and possibly a few fully-connected layers in the end. \nThat is, let us start with a network that looks quite similar to LeNet :cite:`LeCun.Bottou.Bengio.ea.1998` or AlexNet :cite:`Krizhevsky.Sutskever.Hinton.2012`. \nGiven multiple GPUs (2 if it is a desktop server, 4 on an AWS g4dn.12xlarge instance, 8 on a p3.16xlarge, or 16 on a p2.16xlarge), we want to partition training in a manner as to achieve good speedup while simultaneously benefitting from simple and reproducible design choices. Multiple GPUs, after all, increase both *memory* and *computation* ability. In a nutshell, we have the following choices, given a minibatch of training data that we want to classify.\n\nFirst, we could partition the network across multiple GPUs. That is, each GPU takes as input the data flowing into a particular layer, processes data across a number of subsequent layers and then sends the data to the next GPU.\nThis allows us to process data with larger networks when compared with what a single GPU could handle.\nBesides,\nmemory footprint per GPU can be well controlled (it is a fraction of the total network footprint).\n\nHowever, the interface between layers (and thus GPUs) requires tight synchronization. This can be tricky, in particular if the computational workloads are not properly matched between layers. The problem is exacerbated for large numbers of GPUs.\nThe interface between layers also\nrequires large amounts of data transfer,\nsuch as activations and gradients.\nThis may overwhelm the bandwidth of the GPU buses.\nMoreover, compute-intensive, yet sequential operations are nontrivial to partition. See e.g., :cite:`Mirhoseini.Pham.Le.ea.2017` for a best effort in this regard. It remains a difficult problem and it is unclear whether it is possible to achieve good (linear) scaling on nontrivial problems. We do not recommend it unless there is excellent framework or operating system support for chaining together multiple GPUs.\n\n\nSecond, we could split the work layerwise. For instance, rather than computing 64 channels on a single GPU we could split up the problem across 4 GPUs, each of which generates data for 16 channels.\nLikewise, for a fully-connected layer we could split the number of output units.\n:numref:`fig_alexnet_original` (taken from :cite:`Krizhevsky.Sutskever.Hinton.2012`)\nillustrates this design, where this strategy was used to deal with GPUs that had a very small memory footprint (2 GB at the time).\nThis allows for good scaling in terms of computation, provided that the number of channels (or units) is not too small.\nBesides,\nmultiple GPUs can process increasingly larger networks since the available memory scales linearly.\n\n![Model parallelism in the original AlexNet design due to limited GPU memory.](../img/alexnet-original.svg)\n:label:`fig_alexnet_original`\n\nHowever,\nwe need a *very large* number of synchronization or barrier operations since each layer depends on the results from all the other layers.\nMoreover, the amount of data that needs to be transferred is potentially even larger than when distributing layers across GPUs. Thus, we do not recommend this approach due to its bandwidth cost and complexity.\n \nLast, we could partition data across multiple GPUs. This way all GPUs perform the same type of work, albeit on different observations. Gradients are aggregated across GPUs after each minibatch of training data.\nThis is the simplest approach and it can be applied in any situation.\nWe only need to synchronize after each minibatch. That said, it is highly desirable to start exchanging gradients parameters already while others are still being computed.\nMoreover, larger numbers of GPUs lead to larger minibatch sizes, thus increasing training efficiency.\nHowever, adding more GPUs does not allow us to train larger models.\n\n\n![Parallelization on multiple GPUs. From left to right: original problem, network partitioning, layerwise partitioning, data parallelism.](../img/splitting.svg)\n:label:`fig_splitting`\n\n\nA comparison of different ways of parallelization on multiple GPUs is depicted in :numref:`fig_splitting`.\nBy and large, data parallelism is the most convenient way to proceed, provided that we have access to GPUs with sufficiently large memory. See also :cite:`Li.Andersen.Park.ea.2014` for a detailed description of partitioning for distributed training. GPU memory used to be a problem in the early days of deep learning. By now this issue has been resolved for all but the most unusual cases. We focus on data parallelism in what follows.\n\n## Data Parallelism\n\nAssume that there are $k$ GPUs on a machine. Given the model to be trained, each GPU will maintain a complete set of model parameters independently though parameter values across the GPUs are identical and synchronized. \nAs an example,\n:numref:`fig_data_parallel` illustrates \ntraining with\ndata parallelism when $k=2$.\n\n\n![Calculation of minibatch stochastic gradient descent using data parallelism on two GPUs.](../img/data-parallel.svg)\n:label:`fig_data_parallel`\n\nIn general, the training proceeds as follows:\n\n* In any iteration of training, given a random minibatch, we split the examples in the batch into $k$ portions and distribute them evenly across the GPUs.\n* Each GPU calculates loss and gradient of the model parameters based on the minibatch subset it was assigned.\n* The local gradients of each of the $k$ GPUs are aggregated to obtain the current minibatch stochastic gradient.\n* The aggregate gradient is re-distributed to each GPU.\n* Each GPU uses this minibatch stochastic gradient to update the complete set of model parameters that it maintains.\n\n\n\n\nNote that in practice we *increase* the minibatch size $k$-fold when training on $k$ GPUs such that each GPU has the same amount of work to do as if we were training on a single GPU only. On a 16-GPU server this can increase the minibatch size considerably and we may have to increase the learning rate accordingly.\nAlso note that batch normalization in :numref:`sec_batch_norm` needs to be adjusted, e.g., by keeping a separate batch normalization coefficient per GPU.\nIn what follows we will use a toy network to illustrate multi-GPU training.\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l", "_____no_output_____" ] ], [ [ "## [**A Toy Network**]\n\nWe use LeNet as introduced in :numref:`sec_lenet` (with slight modifications). We define it from scratch to illustrate parameter exchange and synchronization in detail.\n", "_____no_output_____" ] ], [ [ "# Initialize model parameters\nscale = 0.01\nW1 = torch.randn(size=(20, 1, 3, 3)) * scale\nb1 = torch.zeros(20)\nW2 = torch.randn(size=(50, 20, 5, 5)) * scale\nb2 = torch.zeros(50)\nW3 = torch.randn(size=(800, 128)) * scale\nb3 = torch.zeros(128)\nW4 = torch.randn(size=(128, 10)) * scale\nb4 = torch.zeros(10)\nparams = [W1, b1, W2, b2, W3, b3, W4, b4]\n\n# Define the model\ndef lenet(X, params):\n h1_conv = F.conv2d(input=X, weight=params[0], bias=params[1])\n h1_activation = F.relu(h1_conv)\n h1 = F.avg_pool2d(input=h1_activation, kernel_size=(2, 2), stride=(2, 2))\n h2_conv = F.conv2d(input=h1, weight=params[2], bias=params[3])\n h2_activation = F.relu(h2_conv)\n h2 = F.avg_pool2d(input=h2_activation, kernel_size=(2, 2), stride=(2, 2))\n h2 = h2.reshape(h2.shape[0], -1)\n h3_linear = torch.mm(h2, params[4]) + params[5]\n h3 = F.relu(h3_linear)\n y_hat = torch.mm(h3, params[6]) + params[7]\n return y_hat\n\n# Cross-entropy loss function\nloss = nn.CrossEntropyLoss(reduction='none')", "_____no_output_____" ] ], [ [ "## Data Synchronization\n\nFor efficient multi-GPU training we need two basic operations. \nFirst we need to have the ability to [**distribute a list of parameters to multiple devices**] and to attach gradients (`get_params`). Without parameters it is impossible to evaluate the network on a GPU.\nSecond, we need the ability to sum parameters across multiple devices, i.e., we need an `allreduce` function.\n", "_____no_output_____" ] ], [ [ "def get_params(params, device):\n new_params = [p.to(device) for p in params]\n for p in new_params:\n p.requires_grad_()\n return new_params", "_____no_output_____" ] ], [ [ "Let us try it out by copying the model parameters to one GPU.\n", "_____no_output_____" ] ], [ [ "new_params = get_params(params, d2l.try_gpu(0))\nprint('b1 weight:', new_params[1])\nprint('b1 grad:', new_params[1].grad)", "b1 weight: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n device='cuda:0', requires_grad=True)\nb1 grad: None\n" ] ], [ [ "Since we did not perform any computation yet, the gradient with regard to the bias parameter is still zero.\nNow let us assume that we have a vector distributed across multiple GPUs. The following [**`allreduce` function adds up all vectors and broadcasts the result back to all GPUs**]. Note that for this to work we need to copy the data to the device accumulating the results.\n", "_____no_output_____" ] ], [ [ "def allreduce(data):\n for i in range(1, len(data)):\n data[0][:] += data[i].to(data[0].device)\n for i in range(1, len(data)):\n data[i][:] = data[0].to(data[i].device)", "_____no_output_____" ] ], [ [ "Let us test this by creating vectors with different values on different devices and aggregate them.\n", "_____no_output_____" ] ], [ [ "data = [torch.ones((1, 2), device=d2l.try_gpu(i)) * (i + 1) for i in range(2)]\nprint('before allreduce:\\n', data[0], '\\n', data[1])\nallreduce(data)\nprint('after allreduce:\\n', data[0], '\\n', data[1])", "before allreduce:\n tensor([[1., 1.]], device='cuda:0') \n tensor([[2., 2.]], device='cuda:1')\nafter allreduce:\n tensor([[3., 3.]], device='cuda:0') \n tensor([[3., 3.]], device='cuda:1')\n" ] ], [ [ "## Distributing Data\n\nWe need a simple utility function to [**distribute a minibatch evenly across multiple GPUs**]. For instance, on two GPUs we would like to have half of the data to be copied to either of the GPUs.\nSince it is more convenient and more concise, we use the built-in function from the deep learning framework to try it out on a $4 \\times 5$ matrix.\n", "_____no_output_____" ] ], [ [ "data = torch.arange(20).reshape(4, 5)\ndevices = [torch.device('cuda:0'), torch.device('cuda:1')]\nsplit = nn.parallel.scatter(data, devices)\nprint('input :', data)\nprint('load into', devices)\nprint('output:', split)", "input : tensor([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19]])\nload into [device(type='cuda', index=0), device(type='cuda', index=1)]\noutput: (tensor([[0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9]], device='cuda:0'), tensor([[10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19]], device='cuda:1'))\n" ] ], [ [ "For later reuse we define a `split_batch` function that splits both data and labels.\n", "_____no_output_____" ] ], [ [ "#@save\ndef split_batch(X, y, devices):\n \"\"\"Split `X` and `y` into multiple devices.\"\"\"\n assert X.shape[0] == y.shape[0]\n return (nn.parallel.scatter(X, devices),\n nn.parallel.scatter(y, devices))", "_____no_output_____" ] ], [ [ "## Training\n\nNow we can implement [**multi-GPU training on a single minibatch**]. Its implementation is primarily based on the data parallelism approach described in this section. We will use the auxiliary functions we just discussed, `allreduce` and `split_and_load`, to synchronize the data among multiple GPUs. Note that we do not need to write any specific code to achieve parallelism. Since the computational graph does not have any dependencies across devices within a minibatch, it is executed in parallel *automatically*.\n", "_____no_output_____" ] ], [ [ "def train_batch(X, y, device_params, devices, lr):\n X_shards, y_shards = split_batch(X, y, devices)\n # Loss is calculated separately on each GPU\n ls = [loss(lenet(X_shard, device_W), y_shard).sum()\n for X_shard, y_shard, device_W in zip(\n X_shards, y_shards, device_params)]\n for l in ls: # Backpropagation is performed separately on each GPU\n l.backward()\n # Sum all gradients from each GPU and broadcast them to all GPUs\n with torch.no_grad():\n for i in range(len(device_params[0])):\n allreduce([device_params[c][i].grad for c in range(len(devices))])\n # The model parameters are updated separately on each GPU\n for param in device_params:\n d2l.sgd(param, lr, X.shape[0]) # Here, we use a full-size batch", "_____no_output_____" ] ], [ [ "Now, we can define [**the training function**]. It is slightly different from the ones used in the previous chapters: we need to allocate the GPUs and copy all the model parameters to all the devices.\nObviously each batch is processed using the `train_batch` function to deal with multiple GPUs. For convenience (and conciseness of code) we compute the accuracy on a single GPU, though this is *inefficient* since the other GPUs are idle.\n", "_____no_output_____" ] ], [ [ "def train(num_gpus, batch_size, lr):\n train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n devices = [d2l.try_gpu(i) for i in range(num_gpus)]\n # Copy model parameters to `num_gpus` GPUs\n device_params = [get_params(params, d) for d in devices]\n num_epochs = 10\n animator = d2l.Animator('epoch', 'test acc', xlim=[1, num_epochs])\n timer = d2l.Timer()\n for epoch in range(num_epochs):\n timer.start()\n for X, y in train_iter:\n # Perform multi-GPU training for a single minibatch\n train_batch(X, y, device_params, devices, lr)\n torch.cuda.synchronize()\n timer.stop()\n # Evaluate the model on GPU 0\n animator.add(epoch + 1, (d2l.evaluate_accuracy_gpu(\n lambda x: lenet(x, device_params[0]), test_iter, devices[0]),))\n print(f'test acc: {animator.Y[0][-1]:.2f}, {timer.avg():.1f} sec/epoch '\n f'on {str(devices)}')", "_____no_output_____" ] ], [ [ "Let us see how well this works [**on a single GPU**].\nWe first use a batch size of 256 and a learning rate of 0.2.\n", "_____no_output_____" ] ], [ [ "train(num_gpus=1, batch_size=256, lr=0.2)", "test acc: 0.81, 2.4 sec/epoch on [device(type='cuda', index=0)]\n" ] ], [ [ "By keeping the batch size and learning rate unchanged and [**increasing the number of GPUs to 2**], we can see that the test accuracy roughly stays the same compared with\nthe previous experiment.\nIn terms of the optimization algorithms, they are identical. Unfortunately there is no meaningful speedup to be gained here: the model is simply too small; moreover we only have a small dataset, where our slightly unsophisticated approach to implementing multi-GPU training suffered from significant Python overhead. We will encounter more complex models and more sophisticated ways of parallelization going forward.\nLet us see what happens nonetheless for Fashion-MNIST.\n", "_____no_output_____" ] ], [ [ "train(num_gpus=2, batch_size=256, lr=0.2)", "test acc: 0.83, 2.5 sec/epoch on [device(type='cuda', index=0), device(type='cuda', index=1)]\n" ] ], [ [ "## Summary\n\n* There are multiple ways to split deep network training over multiple GPUs. We could split them between layers, across layers, or across data. The former two require tightly choreographed data transfers. Data parallelism is the simplest strategy.\n* Data parallel training is straightforward. However, it increases the effective minibatch size to be efficient.\n* In data parallelism, data are split across multiple GPUs, where each GPU executes its own forward and backward operation and subsequently gradients are aggregated and results are broadcast back to the GPUs.\n* We may use slightly increased learning rates for larger minibatches.\n\n## Exercises\n\n1. When training on $k$ GPUs, change the minibatch size from $b$ to $k \\cdot b$, i.e., scale it up by the number of GPUs.\n1. Compare accuracy for different learning rates. How does it scale with the number of GPUs?\n1. Implement a more efficient `allreduce` function that aggregates different parameters on different GPUs? Why is it more efficient?\n1. Implement multi-GPU test accuracy computation.\n", "_____no_output_____" ], [ "[Discussions](https://discuss.d2l.ai/t/1669)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cbc4657b78442b5ca13d53d33c5d3f83dc1355c2
3,178
ipynb
Jupyter Notebook
15_core_spells.ipynb
pysan-dev/pysan
60ca77ebac1749dadc91e6331f6da4b4c814e503
[ "Apache-2.0" ]
4
2020-05-18T20:19:25.000Z
2021-09-12T09:47:00.000Z
15_core_spells.ipynb
pysan-dev/pysan
60ca77ebac1749dadc91e6331f6da4b4c814e503
[ "Apache-2.0" ]
10
2020-05-19T12:10:03.000Z
2020-11-19T11:17:05.000Z
15_core_spells.ipynb
pysan-dev/pysan
60ca77ebac1749dadc91e6331f6da4b4c814e503
[ "Apache-2.0" ]
null
null
null
26.932203
354
0.568597
[ [ [ "# default_exp core.spells", "_____no_output_____" ] ], [ [ "# Spells\n\n> Describe spells within sequences", "_____no_output_____" ], [ "## Overview\n\nSpells are exclusively recurrent n-grams (the same element repeated a number of times), and are a useful prerequisite for a number of distance measures and other descriptive statistics. The `pysan.core.spells` module contains methods for understanding the prevalence and extremes of spells in sequences, plus some basic visualisation functionality.", "_____no_output_____" ], [ "## Methods", "_____no_output_____" ] ], [ [ "#export\ndef get_spells(sequence):\n \"Returns a list of tuples where each tuple holds the element and the length of the spell (also known as run or episode) for each spell in the sequence.\"\n\n # get each spell and its length\n spells = [(k, sum(1 for x in v)) for k,v in itertools.groupby(sequence)]\n # this is functionally equivalent to the following;\n # spells = [(k, len(list(v))) for k,v in itertools.groupby(sequence)]\n\n return spells", "_____no_output_____" ], [ "#export\ndef get_longest_spell(sequence):\n \"Returns a dict containing the element, count, and starting position of the longest spell in the sequence. The keys of this dict are 'element, 'count', and 'start'.\"\n\n spells = get_spells(sequence)\n\n longest_spell = max(count for element, count in spells)\n\n for i, (element, count) in enumerate(spells):\n if count == longest_spell:\n # sum the counts of all previous spells to get its starting position\n position_in_sequence = sum(count for _,count in spells[:i])\n\n return {'element':element, 'count':count,'start':position_in_sequence}", "_____no_output_____" ], [ "#export\ndef get_spell_durations(sequence):\n \"Computes the durations of each spell in the sequence, returning a list.\"\n\n spells = get_spells(sequence)\n durations = [spell[1] for spell in spells]\n\n return durations", "_____no_output_____" ] ], [ [ "# Plotting", "_____no_output_____" ], [ "todo: add plot_spell_durations", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
cbc468e0ff8df395121ff7e1a48d6f273b750246
29,236
ipynb
Jupyter Notebook
HW7extracredit/hw7_extra_credit_assignment.ipynb
ds-modules/EPS-130-SP20
f43b82658c98273166a79d463740d1ecaa6f48df
[ "BSD-3-Clause" ]
null
null
null
HW7extracredit/hw7_extra_credit_assignment.ipynb
ds-modules/EPS-130-SP20
f43b82658c98273166a79d463740d1ecaa6f48df
[ "BSD-3-Clause" ]
null
null
null
HW7extracredit/hw7_extra_credit_assignment.ipynb
ds-modules/EPS-130-SP20
f43b82658c98273166a79d463740d1ecaa6f48df
[ "BSD-3-Clause" ]
null
null
null
148.406091
21,164
0.867492
[ [ [ "# HW7 Extra Credit\n\n### This extra credit assignment, worth 50 pts toward the homework score, analyzes the displacement amplitude spectrum for a small $M_L$=4.1 earthquake that occurred in Berkeley on December 4, 1998. \n\n### Write python code to apply a ~$\\frac{1}{f_2}$ source model with attenuation to the observed displacment amplitude spectrum to determine:\n\n1. The scalar seismic moment\n\n2. The corner frequency of the earthquake\n\n3. The rupture area and slip\n\n4. The stress drop.\n\n5. Discuss your results in terms of what is typically found for earthquakes (use Lay and Wallace text as a reference).\n\n### The SH Greens function solution for an anelastic halfspace is:\n### u(f)=$\\frac{2 * |R_{SH}| * M_0}{4 * \\pi * \\rho * \\beta^3 *R} \\cdot \\frac{1}{[1 + (\\frac{f}{f_c})^2]^{(\\frac{p}{2})}} \\cdot e^{(\\frac{-f*\\pi*R}{Q*\\beta})}$\n#### Where Rsh is the SH radiation pattern (eqn 8.65 Lay and Wallace), $M_0$ is the scalar moment, $\\rho, \\beta$, Q (range 10-100), R, f and $f_c$ (range .1 to 10 Hz) are the density, shear wave velocity, attenuation quality factor, total distance, frequency and corner frequency. The parameter p allows for adjusting the high frequency fall off rate of the spectrum. For a Brune source p=2 (a minimum value of p to conserve energy is 1.5, and typically the maximum is 3).\n\n#### u(f) is the given amplitude spectrum plotted below.\n\n#### Be sure to use CGS (cm, grams, seconds) units for all parameters. The unit for scalar moment will therefore be dyne cm.\n\n#### Develop a nested for loop to search for optimal Mo, fc and Q parameters\n", "_____no_output_____" ] ], [ [ "#Initial Setup and Subroutine Definitions - Do Not Edit\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "#Model Parameters\ndistance=6.8e5 #units cm\ndepth=5.1e5 #units cm\nazimuth=137.8*np.pi/180 #radians\nstrike=139*np.pi/180 #radians\nrake=179*np.pi/180 #radians\ndip=69*np.pi/180 #radians\nbeta=3.2e5 #cm/s\ndensity=2.6 #grams/cc\n\n#Compute Total distance (R), Azimuth(phi) and takeoff angle(I)\nphi=strike-azimuth\nR=np.sqrt(distance**2 + depth**2);\nI=np.pi-np.arctan(distance/depth); #pi is for upgoing angle\n\n\n# Read Data File and Setup frequency and amplitude spectral amplitude arrays\ndata=pd.read_csv('brkspec.txt', sep=' ', delimiter=None, header=None,\n names = ['Hz','AmpSpec'])\nfreq=np.array(data[\"Hz\"])\nampspec=np.array(data[\"AmpSpec\"])\n\nplt.loglog(freq,ampspec)\nplt.title('Berkeley Event Amplitude Spectrum')\nplt.xlabel('frequency (Hz)')\nplt.ylabel('amplitude spectrum cm/Hz')\nplt.savefig('brkspec.jpg')\nplt.show()", "_____no_output_____" ], [ "##### Write code to calculate the SH radiation pattern coefficient\n\n\n#Write code to fit the spectral model to the observed displacement spectrum.\n#This can be accomplished with a nested for loop over the scalar moment and corner frequency\n#parameters\n\n#Define grid search range\nMo=np.arange(100.,400.,10.)*1e20 #dyne cm\nfc=np.arange(0.1,10.,0.05)\nq=np.arange(10.,100.,5.)\np=np.arange(2.0,3.5,10.)\n#p=np.array([2.0, 2.0])\n\n#Loop over model parameters and test for fit with data to determine best fit parameters\n\n\n#Plot the fit to the data, and discuss the uncertainties in the solution\n", "_____no_output_____" ] ], [ [ "#### This is an example of the fit that can be obtained\n\n<img src='brkspecfit.jpg'>", "_____no_output_____" ], [ "### Questions\n1. What are the scalar seismic moment, Mw, corner frequency and Q that best fit the spectra assuming p=2.0?\n\n2. How does the fit and the scalar moment, corner frequency and Q change if p=3.0?\n\n3. The fault radius can be determed from the corner frequency where radius=0.37*beta/fc. Use the fault radius and moment to estimate the average slip and the stress drop of the earthquake\n\n4. Discuss the estimated stress drop in terms of the expected range of values for earthquakes.\n\n5. How well determined do you think your corner frequency and moment estimates are. How do uncertainties in those quantitites translate to uncertainty in stress drop?", "_____no_output_____" ] ], [ [ "#Use the corner frequency to estimate the fault rupture area, the average slip on the fault\n#and the stress drop\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cbc46f8a7e1d54edcba70adfcdb8cc5173863267
2,872
ipynb
Jupyter Notebook
jupyter-notebooks/backup/UCSD 400___ L4 DHDS -- Deep Learning and AI -- exercise.ipynb
totalgood/dsdh
f74324543932c9c31cf0e1ffbde3f9c0562e23f0
[ "MIT" ]
null
null
null
jupyter-notebooks/backup/UCSD 400___ L4 DHDS -- Deep Learning and AI -- exercise.ipynb
totalgood/dsdh
f74324543932c9c31cf0e1ffbde3f9c0562e23f0
[ "MIT" ]
null
null
null
jupyter-notebooks/backup/UCSD 400___ L4 DHDS -- Deep Learning and AI -- exercise.ipynb
totalgood/dsdh
f74324543932c9c31cf0e1ffbde3f9c0562e23f0
[ "MIT" ]
1
2020-01-23T04:45:28.000Z
2020-01-23T04:45:28.000Z
25.642857
117
0.380571
[ [ [ "import pandas as pd\ntable = [[0]*6] * 2\npd.DataFrame(table,\n columns='input_dim layer_1_neurons layer_2_neurons layer_3_neurons train_accuracy test_accuracy'.split())\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cbc47dc7ba9a36a9db8bee10e8a2bcb37144e794
4,073
ipynb
Jupyter Notebook
DSA/tree/isValidBST.ipynb
lance-lh/Data-Structures-and-Algorithms
c432654edaeb752536e826e88bcce3ed2ab000fb
[ "MIT" ]
1
2019-03-27T13:00:28.000Z
2019-03-27T13:00:28.000Z
DSA/tree/isValidBST.ipynb
lance-lh/Data-Structures-and-Algorithms
c432654edaeb752536e826e88bcce3ed2ab000fb
[ "MIT" ]
null
null
null
DSA/tree/isValidBST.ipynb
lance-lh/Data-Structures-and-Algorithms
c432654edaeb752536e826e88bcce3ed2ab000fb
[ "MIT" ]
null
null
null
27.52027
97
0.423275
[ [ [ "Given a binary tree, determine if it is a valid binary search tree (BST).\n\nAssume a BST is defined as follows:\n\n- The left subtree of a node contains only nodes with keys less than the node's key.\n- The right subtree of a node contains only nodes with keys greater than the node's key.\n- Both the left and right subtrees must also be binary search trees.\n \n\nExample 1:\n\n 2\n / \\\n 1 3\n\n Input: [2,1,3]\n Output: true\nExample 2:\n\n 5\n / \\\n 1 4\n / \\\n 3 6\n\n Input: [5,1,4,null,null,3,6]\n Output: false\n Explanation: The root node's value is 5 but its right child's value is 4.", "_____no_output_____" ] ], [ [ "# recursive solution\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n lst = self.inorderTraversal(root)\n for i in range(1,len(lst)):\n if lst[i-1] >= lst[i]:\n return False\n return True\n \n def inorderTraversal(self, root):\n '''\n :param root: TreeNode\n :return: List[int]\n '''\n res = []\n if root:\n res = self.inorderTraversal(root.left)\n res.append(root.val)\n res += self.inorderTraversal(root.right)\n return res", "_____no_output_____" ], [ "# iterative solution\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n lst = self.inorderTraversal(root)\n for i in range(1,len(lst)):\n if lst[i-1] >= lst[i]:\n return False\n return True\n \n def inorderTraversal(self, root):\n '''\n :param root: TreeNode\n :return: List[int]\n '''\n # order: left -> root -> right\n res, stack = [], []\n while True:\n while root: \n stack.append(root) \n root = root.left \n \n if not stack:\n return res\n \n node = stack.pop()\n res.append(node.val)\n\n root = node.right", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
cbc480d47932c8101ccc797c6dc8faa5c4791f8b
217,536
ipynb
Jupyter Notebook
triton/Nano MobileNet.ipynb
emwjacobson/JetsonEfficiencyTesting
784ab3ef3d8e39734da210dc0e0bb3c2cb4275ad
[ "Info-ZIP" ]
null
null
null
triton/Nano MobileNet.ipynb
emwjacobson/JetsonEfficiencyTesting
784ab3ef3d8e39734da210dc0e0bb3c2cb4275ad
[ "Info-ZIP" ]
null
null
null
triton/Nano MobileNet.ipynb
emwjacobson/JetsonEfficiencyTesting
784ab3ef3d8e39734da210dc0e0bb3c2cb4275ad
[ "Info-ZIP" ]
null
null
null
673.486068
150,762
0.940998
[ [ [ "import matplotlib.pyplot as plt\n\nmax_frequency = \"921600000\"\neff_frequency = \"768000000\"\n\n# Batch Size,Concurrency,Inferences/Second,Client Send,Network+Server Send/Recv,Server Queue,Server Compute Input,Server Compute Infer,Server Compute Output,Client Recv,p50 latency,p90 latency,p95 latency,p99 latency\n\n\n# GPU Max\nmax_gpu_data = []\nmax_gpu_power_data = []\nmax_gpu_timings = []\n\nwith open(f\"data/nano/{max_frequency}_data.csv\", \"r\") as f:\n max_gpu_data = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\nwith open(f\"data/nano/{max_frequency}_power.csv\", \"r\") as f:\n max_gpu_power_data = [l.strip().split(\",\") for l in f.readlines()]\n\nwith open(f\"data/nano/{max_frequency}_timings.csv\", \"r\") as f:\n max_gpu_timings = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\n\n# GPU Eff\neff_gpu_data = []\neff_gpu_power_data = []\neff_gpu_timings = []\n\nwith open(f\"data/nano/{eff_frequency}_data.csv\", \"r\") as f:\n eff_gpu_data = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\nwith open(f\"data/nano/{eff_frequency}_power.csv\", \"r\") as f:\n eff_gpu_power_data = [l.strip().split(\",\") for l in f.readlines()]\n\nwith open(f\"data/nano/{eff_frequency}_timings.csv\", \"r\") as f:\n eff_gpu_timings = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\n\n# DLA Max\ndla_max_data = []\ndla_max_power_data = []\ndla_max_timings = []\n\nwith open(f\"data/nano/{eff_frequency}_dla_data.csv\", \"r\") as f:\n dla_max_data = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\nwith open(f\"data/nano/{eff_frequency}_dla_power.csv\", \"r\") as f:\n dla_max_power_data = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\nwith open(f\"data/nano/{eff_frequency}_dla_timings.csv\", \"r\") as f:\n dla_max_timings = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\n\ndla_power = [p for p in dla_max_power_data if p[0] > dla_max_timings[0][0] and p[0] < dla_max_timings[0][1]]\navg_dla_power = sum([float(r[1]) for r in dla_power])/len(dla_power)\njoule_per_inference = avg_dla_power / float(dla_max_data[0][2])\n\n\nmax_x = ['TRT']\nmax_y = [joule_per_inference*1000]\n\n\n# DLA Eff\ndla_eff_data = []\ndla_eff_power_data = []\ndla_eff_timings = []\n\nwith open(f\"data/nano/{max_frequency}_dla_data.csv\", \"r\") as f:\n dla_eff_data = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\nwith open(f\"data/nano/{max_frequency}_dla_power.csv\", \"r\") as f:\n dla_eff_power_data = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\nwith open(f\"data/nano/{max_frequency}_dla_timings.csv\", \"r\") as f:\n dla_eff_timings = [l.strip().split(\",\") for l in f.readlines()[1:]]\n\n\ndla_power = [p for p in dla_eff_power_data if p[0] > dla_eff_timings[0][0] and p[0] < dla_eff_timings[0][1]]\navg_dla_power = sum([float(r[1]) for r in dla_power])/len(dla_power)\njoule_per_inference = avg_dla_power / float(dla_eff_data[0][2])\n\n\neff_x = ['TRT']\neff_y = [joule_per_inference*1000]\n\n\nfor start, stop, batch_size in max_gpu_timings:\n power = [p for p in max_gpu_power_data if p[0] > start and p[0] < stop]\n d = [d for d in max_gpu_data if d[0] == batch_size][0]\n avg_power = sum([float(r[1]) for r in power])/len(power)\n joule_per_inference = avg_power / float(d[2])\n\n max_x.append(batch_size)\n max_y.append(joule_per_inference*1000)\n\nfor start, stop, batch_size in eff_gpu_timings:\n power = [p for p in eff_gpu_power_data if p[0] > start and p[0] < stop]\n d = [d for d in eff_gpu_data if d[0] == batch_size][0]\n avg_power = sum([float(r[1]) for r in power])/len(power)\n joule_per_inference = avg_power / float(d[2])\n\n eff_x.append(batch_size)\n eff_y.append(joule_per_inference*1000)\n\n\nexport_file = open(\"../export/nano_energy_by_batch.csv\", \"w\")\n\nfig, ax = plt.subplots(1, 2, figsize=(25, 10), sharey=True)\n\nax[0].set_axisbelow(True)\nax[0].grid(axis='y')\nax[0].set_title(f\"Energy Usage per Inference {int(max_frequency)/1000000}MHz\")\nax[0].set_xlabel(\"Batch Size\")\nax[0].set_ylabel(\"Energy(mJ) per Inference\")\nax[0].bar(max_x, max_y)\nexport_file.write(f\"{max_x}\\n\")\nexport_file.write(f\"{max_y}\\n\\n\")\n\nax[1].set_axisbelow(True)\nax[1].grid(axis='y')\nax[1].yaxis.set_tick_params(labelleft=True)\nax[1].set_title(f\"Energy Usage per Inference {int(eff_frequency)/1000000}MHz\")\nax[1].set_xlabel(\"Batch Size\")\nax[1].set_ylabel(\"Energy(mJ) per Inference\")\nax[1].bar(eff_x, eff_y)\nexport_file.write(f\"{eff_x}\\n\")\nexport_file.write(f\"{eff_y}\\n\")\nexport_file.close()", "_____no_output_____" ], [ "batch_size = 4\n\ndata = []\npower = []\ntimings = []\nwith open(f\"data/nano/batch_{batch_size}_data.csv\", \"r\") as f_data, open(f\"data/nano/batch_{batch_size}_power.csv\", \"r\") as f_power, open(f\"data/nano/batch_{batch_size}_timings.csv\", \"r\") as f_timings:\n data = [l.strip().split(\",\") for l in f_data.readlines()[1:]]\n power = [l.strip().split(\",\") for l in f_power.readlines()[1:]]\n timings = [l.strip().split(\",\") for l in f_timings.readlines()[1:]]\n\nx = []\ny = []\n\nfor start, stop, frequency in timings:\n power_raw = [p for p in power if p[0] > start and p[0] < stop]\n d = [d for d in data if d[0] == frequency][0]\n avg_power = sum([float(r[1]) for r in power_raw])/len(power_raw)\n joule_per_inference = avg_power / float(d[2])\n\n x.append(str(int(frequency)/1000000))\n y.append(joule_per_inference*1000)\n\nnormalized_y = [y[-1]/_y for _y in y]\n\nfig, ax = plt.subplots(1, 2, figsize=(25, 10))\n\nax[0].set_axisbelow(True)\nax[0].grid(axis='y')\nax[0].set_title(f\"Energy Usage per Inference per Frequency, Batch Size of {batch_size}\")\nax[0].set_xlabel(\"Frequency MHz\")\nax[0].set_ylabel(\"Energy(mJ) per Inference\")\nax[0].bar(x, y)\n\nax[1].set_axisbelow(True)\nax[1].grid(axis='y')\nax[1].set_title(f\"Normalized Energy Usage, Batch Size of {batch_size}\")\nax[1].set_xlabel(\"Frequency MHz\")\nax[1].set_ylabel(\"Normalized Power Usage\")\nax[1].bar(x, normalized_y)", "_____no_output_____" ], [ "batch_sizes = [1, 2, 4, 8, 16, 32]\n\ndata = []\npower = []\ntimings = []\nfor size in batch_sizes:\n with open(f\"data/nano/batch_{size}_data.csv\", \"r\") as f_data, open(f\"data/nano/batch_{size}_power.csv\", \"r\") as f_power, open(f\"data/nano/batch_{size}_timings.csv\", \"r\") as f_timings:\n data.append([l.strip().split(\",\") for l in f_data.readlines()[1:]])\n power.append([l.strip().split(\",\") for l in f_power.readlines()[1:]])\n timings.append([l.strip().split(\",\") for l in f_timings.readlines()[1:]])\n\nx = []\ny = []\n\nfor idx, timing_data in enumerate(timings):\n x.append([])\n y.append([])\n for start, stop, frequency in timing_data:\n power_raw = [p for p in power[idx] if p[0] > start and p[0] < stop]\n d = [d for d in data[idx] if d[0] == frequency][0]\n avg_power = sum([float(r[1]) for r in power_raw])/len(power_raw)\n joule_per_inference = avg_power / float(d[2])\n\n x[idx].append(str(int(frequency)/1000000))\n y[idx].append(joule_per_inference*1000)\n\nnormalized_y = []\nfor idx, _ in enumerate(batch_sizes):\n normalized_y.append([y[idx][-1]/_y for _y in y[idx]])\n\nexport_file = open(\"../export/nano_energy_by_freq.csv\", \"w\")\n\nfig, ax = plt.subplots(1, 2, figsize=(25, 10))\n\nax[0].set_axisbelow(True)\nax[0].grid(axis='y')\nax[0].set_title(f\"Energy Usage per Inference by Freqency\")\nax[0].set_xlabel(\"Frequency MHz\")\nax[0].set_ylabel(\"Energy(mJ) per Inference\")\nfor idx, size in enumerate(batch_sizes):\n ax[0].plot(x[idx], y[idx], label=f\"{size}\")\n export_file.write(f\"{x[idx]}\\n{y[idx]}\\n\")\nax[0].legend(loc=\"upper left\", title=\"Batch Size\")\nexport_file.write(f\"\\n\\n\")\n\nax[1].set_axisbelow(True)\nax[1].grid(axis='y')\nax[1].set_title(f\"Normalized Efficiency\")\nax[1].set_xlabel(\"Frequency MHz\")\nax[1].set_ylabel(\"Normalized Efficiency\")\nfor idx, size in enumerate(batch_sizes):\n ax[1].plot(x[idx], normalized_y[idx], label=f\"{size}\")\n export_file.write(f\"{x[idx]}\\n{normalized_y[idx]}\\n\")\nax[1].legend(loc=\"upper left\", title=\"Batch Size\")\nexport_file.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cbc48cfade8021cb4bca1ae1b6a2172f2d762ba8
28,360
ipynb
Jupyter Notebook
notebooks/Monte_Carlo_Simulationv2.ipynb
ilysainath/Building-a-Repeatable-Data-Analysis-Process-with-Jupyter-Notebooks
87a825a6a4b0aa25ef644498a781b222aa11369d
[ "BSD-3-Clause" ]
1,846
2015-05-18T02:04:30.000Z
2022-03-31T09:49:16.000Z
notebooks/Monte_Carlo_Simulationv2.ipynb
alanscardoso/pbpython
d170f4475720ca8f5fdc6770214b8f90f2197f75
[ "BSD-3-Clause" ]
28
2015-12-07T01:57:08.000Z
2021-08-24T01:21:02.000Z
notebooks/Monte_Carlo_Simulationv2.ipynb
alanscardoso/pbpython
d170f4475720ca8f5fdc6770214b8f90f2197f75
[ "BSD-3-Clause" ]
1,054
2015-05-18T06:19:11.000Z
2022-03-16T06:13:37.000Z
67.52381
17,920
0.768724
[ [ [ "### Monte Carlo Simulation with Python\n\nNotebook to accompany article on [Practical Business Python](https://pbpython.com/monte-carlo.html)\n\nUpdate to use numpy for faster loops based on comments [here](https://www.reddit.com/r/Python/comments/arxwkm/monte_carlo_simulation_with_python/)", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns", "_____no_output_____" ], [ "sns.set_style('whitegrid')", "_____no_output_____" ], [ "# Define the variables for the Percent to target based on historical results\navg = 1\nstd_dev = .1\nnum_reps = 500\nnum_simulations = 100000", "_____no_output_____" ], [ "# Show an example of calculating the percent to target\npct_to_target = np.random.normal(\n avg,\n std_dev,\n size=(num_reps, num_simulations)\n)", "_____no_output_____" ], [ "pct_to_target[0:10]", "_____no_output_____" ], [ "# Another example for the sales target distribution\nsales_target_values = [75_000, 100_000, 200_000, 300_000, 400_000, 500_000]\nsales_target_prob = [.3, .3, .2, .1, .05, .05]\nsales_target = np.random.choice(sales_target_values, p=sales_target_prob, \n size=(num_reps, num_simulations))", "_____no_output_____" ], [ "sales_target[0:10]", "_____no_output_____" ], [ "commission_percentages = np.take(\n np.array([0.02, 0.03, 0.04]),\n np.digitize(pct_to_target, bins=[.9, .99, 10])\n)", "_____no_output_____" ], [ "commission_percentages[0:10]", "_____no_output_____" ], [ "total_commissions = (commission_percentages * sales_target).sum(axis=0)", "_____no_output_____" ], [ "total_commissions.std()", "_____no_output_____" ], [ "# Show how to create the dataframe\ndf = pd.DataFrame(data={'Total_Commissions': total_commissions})\ndf.head()", "_____no_output_____" ], [ "df.plot(kind='hist', title='Commissions Distribution')", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc4a5104e6593ce7e1233bc51bb7fd64bbe2b81
51,673
ipynb
Jupyter Notebook
Case 2 version 3.ipynb
sannits/cognitive-systems-for-healthtechnology-applications
1bfdbf898852941320002822c8a461f5c53d1721
[ "MIT" ]
null
null
null
Case 2 version 3.ipynb
sannits/cognitive-systems-for-healthtechnology-applications
1bfdbf898852941320002822c8a461f5c53d1721
[ "MIT" ]
null
null
null
Case 2 version 3.ipynb
sannits/cognitive-systems-for-healthtechnology-applications
1bfdbf898852941320002822c8a461f5c53d1721
[ "MIT" ]
null
null
null
85.128501
14,240
0.748902
[ [ [ "# Case 2. Diabetic Retinopathy Analysis\nSanni Tolonen<br>\n26.2.2018<br>\nCognitive Systems for Health Technology Applications, Spring 2018<br>\nHelsinki Metropolia University of Applied Sciences", "_____no_output_____" ], [ "<h2>1. Objectives</h2><br>\nThe aim of this assignment is to learn to use convolutional neural networks to classify medical\nimages.<br>\nFor a little help in this assignment I checked what Ben Graham and his team had done in Kaggle Diabetic Retinopathy Detection competition report.", "_____no_output_____" ], [ "<h2> 2. Required libraries </h2>", "_____no_output_____" ] ], [ [ "# import libraries and functions\n\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport pickle\nfrom keras import layers\nfrom keras import models\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nimport time", "Using TensorFlow backend.\n" ] ], [ [ "Numpy is used for scientific computing and creating multidimensional arrays, matplotlib for ploting figures, pickle for saving the history of the model, keras for building the convolutional neural network and time for calculating time.", "_____no_output_____" ], [ "<h2> 3. Data description and preprocessing </h2>", "_____no_output_____" ], [ "This dataset is a large set of high-resolution retina images taken under a variety of imaging conditions. \nA clinician has rated the presence of diabetic retinopathy in each image on a scale of 0 to 4:\n\n<ul>\n<li>0 - No DR</li>\n<li>1 - Mild</li>\n<li>2 - Moderate</li>\n<li>3 - Severe</li>\n<li>4 - Proliferative DR</li>\n</ul>\n\nThe images come from different models and types of cameras, which can affect the visual appearance. Some images are shown as one would see the retina anatomically meaning macula on the left, optic nerve on the right for the right eye. Others are shown as one would see through a microscope condensing lens in example inverted, as one sees in a typical live eye exam. There are two ways to tell if an image is inverted:\n\nIt is inverted if the macula, the small dark central area, is slightly higher than the midline through the optic nerve. If the macula is lower than the midline of the optic nerve, it's not inverted.\nIf there is a notch on the side of the image (square, triangle, or circle) then it's not inverted. If there is no notch, it's inverted.", "_____no_output_____" ] ], [ [ "# dataset directories and labels files\n\ntrain_dir = \"../dataset2/train\" \nvalidation_dir = \"../dataset2/validation\" \ntest_dir = \"../dataset2/test\" \n\n# create datagenerators\n\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n fill_mode='nearest',\n horizontal_flip=True,\n zoom_range=0.2)\n\nvalidation_datagen = ImageDataGenerator(rescale = 1./255)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\n# training parameters\n\nbatch_size = 50\nepochs = 50\nsteps_per_epoch = 25 \nvalidation_steps = 10\nimage_height = 150\nimage_width = 200 \n\n# generator for train dataset\n\nprint('Training dataset.')\ntrain_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size = (image_height, image_width),\n batch_size = batch_size,\n class_mode = 'binary')\n\n# generator for validation dataset\n\nprint('Validation dataset.')\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_dir,\n target_size = (image_height, image_width),\n batch_size = batch_size,\n class_mode = 'binary')\n\n# generator for test dataset\n\nprint('Test dataset.')\ntest_generator = test_datagen.flow_from_directory(\n test_dir,\n target_size = (image_height, image_width),\n batch_size = batch_size,\n class_mode = 'binary')\n", "Training dataset.\nFound 1928 images belonging to 2 classes.\nValidation dataset.\nFound 1021 images belonging to 2 classes.\nTest dataset.\nFound 1013 images belonging to 2 classes.\n" ] ], [ [ "Dataset is splitted to train, validation and test datasets. All images will be rescaled by 1./255 and resized to 150x200. Training set is supplemented. It's filling mode is choosed 'nearest' which means that if there are generated empty pixels in prosessing generator is able to choose a pixel value from nearest pixel that has a value. It's also accepting horizontal flip, zoom range is maxium in 20%. For preprocessing I first tried the preprocessing function designed by Sakari Lukkarinen but I had some issues with that so I did some research and used a different approach.", "_____no_output_____" ], [ "<h2> 4. Modeling and compilation </h2>", "_____no_output_____" ], [ "This model is almost exactly alike the one in Sakaris GitHub repository. I wanted to try with this one also, since I had problems with the other one. For starters, there is two Conv2D layers followed by one MaxPool2D layer. After two sets of these, there is two Conv2D layers and then two sets of two Conv2D layers with a Dropout layer for weight regularization to avoid overfitting. In the end there is Flatten layer to flatten the input, a couple of Dense leyers and another Dropout layer.", "_____no_output_____" ] ], [ [ "# build the model\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation = 'relu', \n input_shape = (image_height, image_width, 3)))\nmodel.add(layers.Conv2D(32, (3, 3), activation = 'relu'))\nmodel.add(layers.MaxPool2D((3, 3), strides=2))\n\nmodel.add(layers.Conv2D(64, (3, 3), activation = 'relu'))\nmodel.add(layers.Conv2D(64, (3, 3), activation = 'relu'))\nmodel.add(layers.MaxPool2D((3, 3), strides=2))\n\nmodel.add(layers.Conv2D(96, (3, 3), activation = 'relu'))\nmodel.add(layers.Conv2D(96, (3, 3), activation = 'relu'))\nmodel.add(layers.MaxPool2D((3, 3), strides=2))\n\nmodel.add(layers.Conv2D(128, (3, 3), activation = 'relu'))\nmodel.add(layers.Conv2D(128, (3, 3), activation = 'relu'))\n\nmodel.add(layers.Conv2D(160, (3, 3), activation = 'relu'))\nmodel.add(layers.Conv2D(160, (3, 3), activation = 'relu'))\nmodel.add(layers.Dropout(0.1))\n\nmodel.add(layers.Conv2D(192, (3, 3), activation = 'relu'))\nmodel.add(layers.Conv2D(192, (3, 3), activation = 'relu'))\nmodel.add(layers.Dropout(0.1))\n\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(256, activation='relu'))\nmodel.add(layers.Dropout(0.2))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.summary()\n\n# compile the model\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=optimizers.RMSprop(),\n metrics=['acc'])\n", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_1 (Conv2D) (None, 148, 198, 32) 896 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 146, 196, 32) 9248 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 72, 97, 32) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 70, 95, 64) 18496 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 68, 93, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 33, 46, 64) 0 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 31, 44, 96) 55392 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 29, 42, 96) 83040 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 14, 20, 96) 0 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 12, 18, 128) 110720 \n_________________________________________________________________\nconv2d_8 (Conv2D) (None, 10, 16, 128) 147584 \n_________________________________________________________________\nconv2d_9 (Conv2D) (None, 8, 14, 160) 184480 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 6, 12, 160) 230560 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 6, 12, 160) 0 \n_________________________________________________________________\nconv2d_11 (Conv2D) (None, 4, 10, 192) 276672 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 2, 8, 192) 331968 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 2, 8, 192) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 3072) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 256) 786688 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 257 \n=================================================================\nTotal params: 2,272,929\nTrainable params: 2,272,929\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "<h2> 5. Training and validation </h2>", "_____no_output_____" ] ], [ [ "# train the model\n\nt1 = time.time()\n\nh = model.fit_generator(\n train_generator,\n steps_per_epoch = steps_per_epoch,\n verbose = 1,\n epochs = epochs,\n validation_data = validation_generator,\n validation_steps = validation_steps)\nt2 = time.time()\n\n# store the elapsed time into history\n\nh.history.update({'time_elapsed': t2 - t1})\n\n# save the model and history\n\nmodel.save('case_2_run_3.h5')\npickle.dump(h.history, open('case_2_history_3.p', 'wb'))", "Epoch 1/50\n25/25 [==============================] - 645s 26s/step - loss: 4.5573 - acc: 0.6998 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 2/50\n25/25 [==============================] - 578s 23s/step - loss: 4.7194 - acc: 0.7072 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 3/50\n25/25 [==============================] - 570s 23s/step - loss: 4.5941 - acc: 0.7150 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 4/50\n25/25 [==============================] - 613s 25s/step - loss: 4.6552 - acc: 0.7112 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 5/50\n25/25 [==============================] - 580s 23s/step - loss: 4.5131 - acc: 0.7200 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 6/50\n25/25 [==============================] - 555s 22s/step - loss: 4.7266 - acc: 0.7068 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 7/50\n25/25 [==============================] - 597s 24s/step - loss: 4.8390 - acc: 0.6998 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 8/50\n25/25 [==============================] - 571s 23s/step - loss: 4.7363 - acc: 0.7061 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 9/50\n25/25 [==============================] - 578s 23s/step - loss: 4.3454 - acc: 0.7304 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 10/50\n25/25 [==============================] - 632s 25s/step - loss: 4.8037 - acc: 0.7020 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 11/50\n25/25 [==============================] - 572s 23s/step - loss: 4.6162 - acc: 0.7136 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 12/50\n25/25 [==============================] - 593s 24s/step - loss: 4.6291 - acc: 0.7128 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 13/50\n25/25 [==============================] - 626s 25s/step - loss: 4.7106 - acc: 0.7077 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 14/50\n25/25 [==============================] - 589s 24s/step - loss: 4.6936 - acc: 0.7088 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 15/50\n25/25 [==============================] - 657s 26s/step - loss: 4.5260 - acc: 0.7192 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 16/50\n25/25 [==============================] - 614s 25s/step - loss: 4.6463 - acc: 0.7117 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 17/50\n25/25 [==============================] - 598s 24s/step - loss: 4.8096 - acc: 0.7016 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 18/50\n25/25 [==============================] - 622s 25s/step - loss: 4.7967 - acc: 0.7024 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 19/50\n25/25 [==============================] - 572s 23s/step - loss: 4.5527 - acc: 0.7175 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 20/50\n25/25 [==============================] - 568s 23s/step - loss: 4.5646 - acc: 0.7168 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 21/50\n25/25 [==============================] - 610s 24s/step - loss: 4.8483 - acc: 0.6992 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 22/50\n25/25 [==============================] - 578s 23s/step - loss: 4.5101 - acc: 0.7202 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 23/50\n25/25 [==============================] - 576s 23s/step - loss: 4.5002 - acc: 0.7208 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 24/50\n25/25 [==============================] - 591s 24s/step - loss: 4.8490 - acc: 0.6992 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 25/50\n25/25 [==============================] - 570s 23s/step - loss: 4.6813 - acc: 0.7096 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 26/50\n25/25 [==============================] - 573s 23s/step - loss: 4.8003 - acc: 0.7022 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 27/50\n25/25 [==============================] - 591s 24s/step - loss: 4.4228 - acc: 0.7256 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 28/50\n25/25 [==============================] - 546s 22s/step - loss: 4.7137 - acc: 0.7076 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 29/50\n25/25 [==============================] - 619s 25s/step - loss: 4.7102 - acc: 0.7078 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 30/50\n25/25 [==============================] - 580s 23s/step - loss: 4.6747 - acc: 0.7100 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 31/50\n25/25 [==============================] - 562s 22s/step - loss: 4.6936 - acc: 0.7088 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 32/50\n25/25 [==============================] - 617s 25s/step - loss: 4.5131 - acc: 0.7200 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 33/50\n25/25 [==============================] - 576s 23s/step - loss: 4.8103 - acc: 0.7016 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 34/50\n25/25 [==============================] - 563s 23s/step - loss: 4.7942 - acc: 0.7026 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 35/50\n25/25 [==============================] - 637s 25s/step - loss: 4.3712 - acc: 0.7288 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 36/50\n25/25 [==============================] - 574s 23s/step - loss: 4.8295 - acc: 0.7004 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 37/50\n25/25 [==============================] - 581s 23s/step - loss: 4.5646 - acc: 0.7168 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 38/50\n25/25 [==============================] - 577s 23s/step - loss: 4.5815 - acc: 0.7158 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 39/50\n25/25 [==============================] - 576s 23s/step - loss: 4.7839 - acc: 0.7032 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 40/50\n25/25 [==============================] - 579s 23s/step - loss: 4.8999 - acc: 0.6960 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 41/50\n25/25 [==============================] - 581s 23s/step - loss: 4.7424 - acc: 0.7058 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 42/50\n25/25 [==============================] - 556s 22s/step - loss: 4.3583 - acc: 0.7296 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 43/50\n25/25 [==============================] - 620s 25s/step - loss: 4.8232 - acc: 0.7008 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 44/50\n25/25 [==============================] - 581s 23s/step - loss: 4.5617 - acc: 0.7170 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 45/50\n25/25 [==============================] - 558s 22s/step - loss: 4.5554 - acc: 0.7174 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 46/50\n25/25 [==============================] - 623s 25s/step - loss: 4.8225 - acc: 0.7008 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 47/50\n25/25 [==============================] - 603s 24s/step - loss: 4.4522 - acc: 0.7238 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 48/50\n25/25 [==============================] - 573s 23s/step - loss: 4.7581 - acc: 0.7048 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 49/50\n25/25 [==============================] - 588s 24s/step - loss: 4.8660 - acc: 0.6981 - val_loss: 4.9644 - val_acc: 0.6920\nEpoch 50/50\n25/25 [==============================] - 579s 23s/step - loss: 4.3712 - acc: 0.7288 - val_loss: 4.9644 - val_acc: 0.6920\n" ], [ "print('Time per epoch {:.2f} hours.'.format((t2-t1)/3600))\nprint('Time per epoch {:.2f} minutes.'.format((t2-t1)/40/60))", "Time per epoch 8.19 hours.\nTime per epoch 12.28 minutes.\n" ] ], [ [ "<h2> 6. Evaluation </h2>", "_____no_output_____" ], [ "Here the model created above is tested with the testing set. ", "_____no_output_____" ] ], [ [ "test_generator = test_datagen.flow_from_directory(\n test_dir,\n target_size = (image_height, image_width),\n batch_size = batch_size,\n class_mode = 'binary')\n\nr = model.evaluate_generator(test_generator, steps = 20)\n\n# loss and accuracy\nr", "Found 1013 images belonging to 2 classes.\n" ] ], [ [ "<h2> 7. Results and discussion </h2>", "_____no_output_____" ], [ "Training accuracy is still under 0.75 the whole time. The final testing accuracy is 0.71. In the end of training the loss function is 4.77 which is really big. This means that the inconsistency between predicted value and actual label is large. The final testing loss function is 4.70. ", "_____no_output_____" ] ], [ [ "acc = h.history['acc']\nval_acc = h.history['val_acc']\nloss = h.history['loss']\nval_loss = h.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'bo', label='Training accuracy')\nplt.plot(epochs, val_acc, 'b', label='Validation accracy')\nplt.title('Training and validation accuracy')\nplt.ylim([0, 1])\nplt.xlabel('Epochs')\nplt.grid()\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.ylim([0, 10])\nplt.xlabel('Epochs')\nplt.grid()\nplt.legend() \n\nplt.show()", "_____no_output_____" ] ], [ [ "<h2>8. Conclusions</h2>", "_____no_output_____" ], [ "I had still the same problem, validation accuracy and validation loss stay the same and the results are even worse, I can not seem to understand what is going wrong.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cbc4a5ad4e1a65baa8e6ceff118922b384d41634
6,492
ipynb
Jupyter Notebook
2. Posterior_Model.ipynb
zhao-lab/chang-energy-evaluation-itsc18
955ef15f8079b99316925eed14fd283a7f825f6f
[ "MIT" ]
null
null
null
2. Posterior_Model.ipynb
zhao-lab/chang-energy-evaluation-itsc18
955ef15f8079b99316925eed14fd283a7f825f6f
[ "MIT" ]
null
null
null
2. Posterior_Model.ipynb
zhao-lab/chang-energy-evaluation-itsc18
955ef15f8079b99316925eed14fd283a7f825f6f
[ "MIT" ]
3
2018-05-23T16:34:27.000Z
2019-11-08T14:04:38.000Z
30.914286
103
0.526802
[ [ [ "from __future__ import division\nimport numpy as np\nfrom numpy import linalg as LA\n#np.seterr(divide='ignore') # these warnings are usually harmless for this code\nfrom matplotlib import pyplot as plt\nimport matplotlib\n%matplotlib inline\nimport os\nimport scipy.stats as stats\nimport pyhsmm\nfrom pyhsmm.util.text import progprint_xrange\nimport pyhsmm.basic.distributions as distributions\nimport scipy.io as sio\nimport csv\nimport copy\nimport time\nimport pickle\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import Table, MetaData, Column, Integer, String\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sklearn import preprocessing", "_____no_output_____" ], [ "filename = 'data_devices_trip.sav'\ndata_devices_trip = pickle.load(open(filename, 'rb'))", "_____no_output_____" ], [ "#EEFECTS: return new data in form: data = {} and data[device]={\"trip\":[]}\ndef dataTransform(data_devices):\n data = {}\n for i, devi in enumerate(data_devices):\n #print(i, devi)\n data[devi] = {}\n for ii in range(data_devices[devi].shape[0]):\n data_temp = data_devices[devi][ii]\n trip = int(data_temp[0])\n speed = data_temp[1]\n acc = data_temp[2]\n try:\n data[devi][trip].append([speed,acc])\n except:\n data[devi][trip] = []\n data[devi][trip].append([speed,acc])\n return data", "_____no_output_____" ], [ "# get data_devices_trip = {} and data_devices_trip[device]={\"trip\":[]}\nfilename = 'data_devices.sav'\ndata_devices = pickle.load(open(filename, 'rb'))\ndata_devices_trip = dataTransform(data_devices)\n#another way to get data_devices_trip, but this way is a little bit slow\n#filename = 'data_devices_trip.sav'\n#data_devices_trip = pickle.load(open(filename, 'rb'))", "_____no_output_____" ], [ "\n\n\nposteriormodels = {}\ni = 0\nfor devi, value1 in data_devices_trip.items() :\n#for i, devi in enumerate(data_devices):\n print('devi', devi)\n if(len(data_devices_trip[devi]) == 0):\n print('oops, this is a none set')\n continue\n else:\n posteriormodels[devi]={}\n \n for trip,value2 in data_devices_trip[devi].items():\n print('trip',trip)\n data_trip = np.array(data_devices_trip[devi][trip])\n \n data_scaled = preprocessing.scale(data_trip)#implement data normalization \n Nmax = 200 # preset the maximum states\n # and some hyperparameters\n obs_dim = data_scaled.shape[1] # data dimensions\n obs_hypparams = {'mu_0':np.zeros(np.int(obs_dim)),\n 'sigma_0':np.eye(np.int(obs_dim)),\n 'kappa_0':0.25, # 0.2 5\n 'nu_0':obs_dim+2}\n # Define the observation distribution\n obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]\n\n # Define the posterior inference model\n posteriormodels[devi][trip] = pyhsmm.models.WeakLimitStickyHDPHMM(\n kappa=6.,alpha=1.,gamma=1.,init_state_concentration=1.,\n obs_distns=obs_distns)\n\n # Sampling process, for 100 round\n Sampling_step = 100\n Sampling_xaxis = range(1,Sampling_step+1)\n\n # Add the data to the model and train\n posteriormodels[devi][trip].add_data(data_scaled)\n Meth2_LLH = np.zeros((Sampling_step,1))\n\n # Sampling process, for 100 around\n for idx in progprint_xrange(Sampling_step):\n posteriormodels[devi][trip].resample_model()\n #Meth2_LLH[idx] = posteriormodel.log_likelihood()\n i = i + 1 \n if i == 6:\n break\n \n# save the model to disk\nfilename = 'posterior_models_test.sav'\npickle.dump(posteriormodels, open(filename, 'wb'))", "_____no_output_____" ], [ "posteriormodels = {}\ni = 0\nfor devi, value1 in data_devices_trip.items() :\n#for i, devi in enumerate(data_devices):\n print('devi', devi)\n if(len(data_devices_trip[devi]) == 0):\n print('oops, this is a none set')\n continue\n else:\n posteriormodels[devi]={}\n i = i + 1 \n if i == 6:\n break \n ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cbc4cb659a23d10ad55352c93dfb2e4b70e591cf
1,296
ipynb
Jupyter Notebook
list_processing.ipynb
akjeyaramaji9700/my-python-repo
5ec20b53abc5fe03284d30f25290911e845a82ab
[ "MIT" ]
null
null
null
list_processing.ipynb
akjeyaramaji9700/my-python-repo
5ec20b53abc5fe03284d30f25290911e845a82ab
[ "MIT" ]
null
null
null
list_processing.ipynb
akjeyaramaji9700/my-python-repo
5ec20b53abc5fe03284d30f25290911e845a82ab
[ "MIT" ]
null
null
null
16
36
0.4375
[ [ [ "x,y,z = 8,9,12\nx=-x\ny=-z\nz=-x\nprint(x,y,z)", "-8 -12 8\n" ], [ "def rsum(n):\n if n==1:\n return 1;\n else:\n return rsum(n-1)*n;\n\nprint(rsum(5))", "120\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cbc4e66336880f50e53a83b6d80b7e1dcf39bde3
37,710
ipynb
Jupyter Notebook
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
lclc19/vertex-ai-samples
1844df54a6fc3d7afff1110a6758afaf13181b19
[ "Apache-2.0" ]
2
2021-10-02T02:17:20.000Z
2021-11-17T10:35:01.000Z
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
lclc19/vertex-ai-samples
1844df54a6fc3d7afff1110a6758afaf13181b19
[ "Apache-2.0" ]
4
2021-08-18T18:58:26.000Z
2022-02-10T07:03:36.000Z
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
lclc19/vertex-ai-samples
1844df54a6fc3d7afff1110a6758afaf13181b19
[ "Apache-2.0" ]
1
2021-08-12T08:36:19.000Z
2021-08-12T08:36:19.000Z
31.930567
437
0.605224
[ [ [ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Vertex SDK: AutoML training image classification model for batch prediction\n\n<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n</table>\n<br/><br/><br/>", "_____no_output_____" ], [ "## Overview\n\n\nThis tutorial demonstrates how to use the Vertex SDK to create image classification models and do batch prediction using Google Cloud's [AutoML](https://cloud.google.com/vertex-ai/docs/start/automl-users).", "_____no_output_____" ], [ "### Dataset\n\nThe dataset used for this tutorial is the [Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip.", "_____no_output_____" ], [ "### Objective\n\nIn this tutorial, you create an AutoML image classification model from a Python script, and then do a batch prediction using the Vertex SDK. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console.\n\nThe steps performed include:\n\n- Create a Vertex `Dataset` resource.\n- Train the model.\n- View the model evaluation.\n- Make a batch prediction.\n\nThere is one key difference between using batch prediction and using online prediction:\n\n* Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.\n\n* Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.", "_____no_output_____" ], [ "### Costs\n\nThis tutorial uses billable components of Google Cloud (GCP):\n\n* Vertex AI\n* Cloud Storage\n\nLearn about [Vertex AI\npricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage\npricing](https://cloud.google.com/storage/pricing), and use the [Pricing\nCalculator](https://cloud.google.com/products/calculator/)\nto generate a cost estimate based on your projected usage.", "_____no_output_____" ], [ "## Installation\n\nInstall the latest version of Vertex SDK.", "_____no_output_____" ] ], [ [ "import sys\nimport os\n\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = '--user'\nelse:\n USER_FLAG = ''\n\n! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG", "_____no_output_____" ] ], [ [ "Install the latest GA version of *google-cloud-storage* library as well.", "_____no_output_____" ] ], [ [ "! pip3 install -U google-cloud-storage $USER_FLAG", "_____no_output_____" ] ], [ [ "### Restart the kernel\n\nOnce you've installed the Vertex SDK and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.", "_____no_output_____" ] ], [ [ "if not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)", "_____no_output_____" ] ], [ [ "## Before you begin\n\n### GPU runtime\n\n*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**\n\n### Set up your Google Cloud project\n\n**The following steps are required, regardless of your notebook environment.**\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)\n\n3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)\n\n4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.\n\n5. Enter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.", "_____no_output_____" ] ], [ [ "PROJECT_ID = \"[your-project-id]\" #@param {type:\"string\"}", "_____no_output_____" ], [ "if PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)", "_____no_output_____" ], [ "! gcloud config set project $PROJECT_ID", "_____no_output_____" ] ], [ [ "#### Region\n\nYou can also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.\n\n- Americas: `us-central1`\n- Europe: `europe-west4`\n- Asia Pacific: `asia-east1`\n\nYou may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations)", "_____no_output_____" ] ], [ [ "REGION = 'us-central1' #@param {type: \"string\"}", "_____no_output_____" ] ], [ [ "#### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.", "_____no_output_____" ] ], [ [ "from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")", "_____no_output_____" ] ], [ [ "### Authenticate your Google Cloud account\n\n**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.\n\n**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\n\n**Otherwise**, follow these steps:\n\nIn the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.\n\n**Click Create service account**.\n\nIn the **Service account name** field, enter a name, and click **Create**.\n\nIn the **Grant this service account access to project** section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select **Vertex Administrator**. Type \"Storage Object Admin\" into the filter box, and select **Storage Object Admin**.\n\nClick Create. A JSON file that contains your key downloads to your local environment.\n\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.", "_____no_output_____" ] ], [ [ "# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''", "_____no_output_____" ] ], [ [ "### Create a Cloud Storage bucket\n\n**The following steps are required, regardless of your notebook environment.**\n\nWhen you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\n\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.", "_____no_output_____" ] ], [ [ "BUCKET_NAME = \"gs://[your-bucket-name]\" #@param {type:\"string\"}", "_____no_output_____" ], [ "if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP", "_____no_output_____" ] ], [ [ "**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.", "_____no_output_____" ] ], [ [ "! gsutil mb -l $REGION $BUCKET_NAME", "_____no_output_____" ] ], [ [ "Finally, validate access to your Cloud Storage bucket by examining its contents:", "_____no_output_____" ] ], [ [ "! gsutil ls -al $BUCKET_NAME", "_____no_output_____" ] ], [ [ "### Set up variables\n\nNext, set up some variables used throughout the tutorial.\n### Import libraries and define constants", "_____no_output_____" ] ], [ [ "import google.cloud.aiplatform as aip", "_____no_output_____" ] ], [ [ "## Initialize Vertex SDK\n\nInitialize the Vertex SDK for your project and corresponding bucket.", "_____no_output_____" ] ], [ [ "aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)", "_____no_output_____" ] ], [ [ "# Tutorial\n\nNow you are ready to start creating your own AutoML image classification model.", "_____no_output_____" ], [ "## Create a Dataset Resource\n\nFirst, you create an image Dataset resource for the Flowers dataset.", "_____no_output_____" ], [ "### Data preparation\n\nThe Vertex `Dataset` resource for images has some requirements for your data:\n\n- Images must be stored in a Cloud Storage bucket.\n- Each image file must be in an image format (PNG, JPEG, BMP, ...).\n- There must be an index file stored in your Cloud Storage bucket that contains the path and label for each image.\n- The index file must be either CSV or JSONL.", "_____no_output_____" ], [ "#### CSV\n\nFor image classification, the CSV index file has the requirements:\n\n- No heading.\n- First column is the Cloud Storage path to the image.\n- Second column is the label.", "_____no_output_____" ], [ "#### Location of Cloud Storage training data.\n\nNow set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.", "_____no_output_____" ] ], [ [ "IMPORT_FILE = 'gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv'", "_____no_output_____" ] ], [ [ "#### Quick peek at your data\n\nYou will use a version of the Flowers dataset that is stored in a public Cloud Storage bucket, using a CSV index file.\n\nStart by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows.", "_____no_output_____" ] ], [ [ "if 'IMPORT_FILES' in globals():\n FILE = IMPORT_FILES[0]\nelse:\n FILE = IMPORT_FILE\n\ncount = ! gsutil cat $FILE | wc -l\nprint(\"Number of Examples\", int(count[0]))\n\nprint(\"First 10 rows\")\n! gsutil cat $FILE | head", "_____no_output_____" ] ], [ [ "### Create the Dataset\n\nNext, create the `Dataset` resource using the `create()` method for the `ImageDataset` class, which takes the following parameters:\n\n- `display_name`: The human readable name for the `Dataset` resource.\n- `gcs_source`: A list of one or more dataset index file to import the data items into the `Dataset` resource.\n- `import_schema_uri`: The data labeling schema for the data items.\n\nThis operation may take several minutes.", "_____no_output_____" ] ], [ [ "dataset = aip.ImageDataset.create(\n display_name=\"Flowers\" + \"_\" + TIMESTAMP,\n gcs_source=[IMPORT_FILE],\n import_schema_uri=aip.schema.dataset.ioformat.image.single_label_classification,\n)\n\nprint(dataset.resource_name)", "_____no_output_____" ] ], [ [ "## Train the model\n\nNow train an AutoML image classification model using your Vertex `Dataset` resource. To train the model, do the following steps:\n\n1. Create an Vertex training pipeline for the `Dataset` resource.\n2. Execute the pipeline to start the training.", "_____no_output_____" ], [ "### Create and run training pipeline\n\nTo train an AutoML image classification model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.\n\n#### Create training pipeline\n\nAn AutoML training pipeline is created with the `AutoMLImageTrainingJob` class, with the following parameters:\n\n- `display_name`: The human readable name for the `TrainingJob` resource.\n- `prediction_type`: The type task to train the model for.\n - `classification`: An image classification model.\n - `object_detection`: An image object detection model.\n- `multi_label`: If a classification task, whether single (`False`) or multi-labeled (`True`).\n- `model_type`: The type of model for deployment.\n - `CLOUD`: Deployment on Google Cloud\n - `CLOUD_HIGH_ACCURACY_1`: Optimized for accuracy over latency for deployment on Google Cloud.\n - `CLOUD_LOW_LATENCY_`: Optimized for latency over accuracy for deployment on Google Cloud.\n - `MOBILE_TF_VERSATILE_1`: Deployment on an edge device.\n - `MOBILE_TF_HIGH_ACCURACY_1`:Optimized for accuracy over latency for deployment on an edge device.\n - `MOBILE_TF_LOW_LATENCY_1`: Optimized for latency over accuracy for deployment on an edge device.\n- `base_model`: (optional) Transfer learning from existing `Model` resource -- supported for image classification only.\n\nThe instantiated object is the DAG for the training job.", "_____no_output_____" ] ], [ [ "dag = aip.AutoMLImageTrainingJob(\n display_name=\"flowers_\" + TIMESTAMP,\n prediction_type=\"classification\",\n multi_label=False,\n model_type=\"CLOUD\",\n base_model=None,\n)", "_____no_output_____" ] ], [ [ "#### Run the training pipeline\n\nNext, you run the DAG to start the training job by invoking the method `run()`, with the following parameters:\n\n- `dataset`: The `Dataset` resource to train the model.\n- `model_display_name`: The human readable name for the trained model.\n- `training_fraction_split`: The percentage of the dataset to use for training.\n- `validation_fraction_split`: The percentage of the dataset to use for validation.\n- `test_fraction_split`: The percentage of the dataset to use for test (holdout data).\n- `budget_milli_node_hours`: (optional) Maximum training time specified in unit of millihours (1000 = hour).\n- `disable_early_stopping`: If `True`, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.\n\nThe `run` method when completed returns the `Model` resource.\n\nThe execution of the training pipeline will take upto 20 minutes.", "_____no_output_____" ] ], [ [ "model = dag.run(\n dataset=dataset,\n model_display_name=\"flowers_\" + TIMESTAMP,\n training_fraction_split=0.8,\n validation_fraction_split=0.1,\n test_fraction_split=0.1,\n budget_milli_node_hours=8000,\n disable_early_stopping=False\n)", "_____no_output_____" ] ], [ [ "## Model deployment for batch prediction\n\nNow deploy the trained Vertex `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for online prediction.\n\nFor online prediction, you:\n\n1. Create an `Endpoint` resource for deploying the `Model` resource to.\n\n2. Deploy the `Model` resource to the `Endpoint` resource.\n\n3. Make online prediction requests to the `Endpoint` resource.\n\nFor batch-prediction, you:\n\n1. Create a batch prediction job.\n\n2. The job service will provision resources for the batch prediction request.\n\n3. The results of the batch prediction request are returned to the caller.\n\n4. The job service will unprovision the resoures for the batch prediction request.", "_____no_output_____" ], [ "## Make a batch prediction request\n\nNow do a batch prediction to your deployed model.", "_____no_output_____" ], [ "### Get test item(s)\n\nNow do a batch prediction to your Vertex model. You will use arbitrary examples out of the dataset as a test items. Don't be concerned that the examples were likely used in training the model -- we just want to demonstrate how to make a prediction.", "_____no_output_____" ] ], [ [ "test_items = !gsutil cat $IMPORT_FILE | head -n2\nif len(str(test_items[0]).split(',')) == 3:\n _, test_item_1, test_label_1 = str(test_items[0]).split(',')\n _, test_item_2, test_label_2 = str(test_items[1]).split(',')\nelse:\n test_item_1, test_label_1 = str(test_items[0]).split(',')\n test_item_2, test_label_2 = str(test_items[1]).split(',')\n\nprint(test_item_1, test_label_1)\nprint(test_item_2, test_label_2)", "_____no_output_____" ] ], [ [ "### Copy test item(s)\n\nFor the batch prediction, you will copy the test items over to your Cloud Storage bucket.", "_____no_output_____" ] ], [ [ "file_1 = test_item_1.split('/')[-1]\nfile_2 = test_item_2.split('/')[-1]\n\n! gsutil cp $test_item_1 $BUCKET_NAME/$file_1\n! gsutil cp $test_item_2 $BUCKET_NAME/$file_2\n\ntest_item_1 = BUCKET_NAME + \"/\" + file_1\ntest_item_2 = BUCKET_NAME + \"/\" + file_2", "_____no_output_____" ] ], [ [ "### Make the batch input file\n\nNow make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:\n\n- `content`: The Cloud Storage path to the image.\n- `mime_type`: The content type. In our example, it is an `jpeg` file.\n\nFor example:\n\n {'content': '[your-bucket]/file1.jpg', 'mime_type': 'jpeg'}", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport json\n\ngcs_input_uri = BUCKET_NAME + '/test.jsonl'\nwith tf.io.gfile.GFile(gcs_input_uri, 'w') as f:\n data = {\"content\": test_item_1, \"mime_type\": \"image/jpeg\"}\n f.write(json.dumps(data) + '\\n')\n data = {\"content\": test_item_2, \"mime_type\": \"image/jpeg\"}\n f.write(json.dumps(data) + '\\n')\n\nprint(gcs_input_uri)\n! gsutil cat $gcs_input_uri", "_____no_output_____" ] ], [ [ "### Make the batch prediction request\n\nNow that your `Model` resource is trained, you can make a batch prediction by invoking the `batch_request()` method, with the following parameters:\n\n- `job_display_name`: The human readable name for the batch prediction job.\n- `gcs_source`: A list of one or more batch request input files.\n- `gcs_destination_prefix`: The Cloud Storage location for storing the batch prediction resuls.\n- `sync`: If set to `True`, the call will block while waiting for the asynchronous batch job to complete.", "_____no_output_____" ] ], [ [ "batch_predict_job = model.batch_predict(\n job_display_name=\"$(DATASET_ALIAS)_\" + TIMESTAMP,\n gcs_source=gcs_input_uri,\n gcs_destination_prefix=BUCKET_NAME,\n sync=False\n)\n\nprint(batch_predict_job)", "_____no_output_____" ] ], [ [ "### Wait for completion of batch prediction job\n\nNext, wait for the batch job to complete.", "_____no_output_____" ] ], [ [ "batch_predict_job.wait()", "_____no_output_____" ] ], [ [ "### Get the predictions\n\nNext, get the results from the completed batch prediction job.\n\nThe results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method `iter_outputs()` to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:\n\n- `content`: The prediction request.\n- `prediction`: The prediction response.\n - `ids`: The internal assigned unique identifiers for each prediction request.\n - `displayNames`: The class names for each class label.\n - `confidences`: The predicted confidence, between 0 and 1, per class label.", "_____no_output_____" ] ], [ [ "bp_iter_outputs = batch_predict_job.iter_outputs()\n\nprediction_results = list()\nfor blob in bp_iter_outputs:\n if blob.name.split(\"/\")[-1].startswith(\"prediction\"):\n prediction_results.append(blob.name)\n\ntags = list()\nfor prediction_result in prediction_results:\n gfile_name = f\"gs://{bp_iter_outputs.bucket.name}/{prediction_result}\"\n with tf.io.gfile.GFile(name=gfile_name, mode=\"r\") as gfile:\n for line in gfile.readlines():\n line = json.loads(line)\n print(line)\n break", "_____no_output_____" ] ], [ [ "# Cleaning up\n\nTo clean up all GCP resources used in this project, you can [delete the GCP\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial:\n\n- Dataset\n- Pipeline\n- Model\n- Endpoint\n- Batch Job\n- Custom Job\n- Hyperparameter Tuning Job\n- Cloud Storage Bucket", "_____no_output_____" ] ], [ [ "delete_dataset = True\ndelete_pipeline = True\ndelete_model = True\ndelete_endpoint = True\ndelete_batchjob = True\ndelete_customjob = True\ndelete_hptjob = True\ndelete_bucket = True\n\n\n# Delete the dataset using the Vertex dataset object\ntry:\n if delete_dataset and 'dataset' in globals():\n dataset.delete()\nexcept Exception as e:\n print(e)\n\n# Delete the model using the Vertex model object\ntry:\n if delete_model and 'model' in globals():\n model.delete()\nexcept Exception as e:\n print(e)\n\n# Delete the endpoint using the Vertex endpoint object\ntry:\n if delete_endpoint and 'model' in globals():\n endpoint.delete()\nexcept Exception as e:\n print(e)\n\n# Delete the batch prediction job using the Vertex batch prediction object\ntry:\n if delete_batchjob and 'model' in globals():\n batch_predict_job.delete()\nexcept Exception as e:\n print(e)\n\nif delete_bucket and 'BUCKET_NAME' in globals():\n ! gsutil rm -r $BUCKET_NAME", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc4ecd895c700f4387d1d0ef1360d425015ad15
62,697
ipynb
Jupyter Notebook
tutorial/source/dmm.ipynb
ssameerr/pyro
c04fc931631ec9e8694def207b5ca0e432d5e501
[ "MIT" ]
null
null
null
tutorial/source/dmm.ipynb
ssameerr/pyro
c04fc931631ec9e8694def207b5ca0e432d5e501
[ "MIT" ]
null
null
null
tutorial/source/dmm.ipynb
ssameerr/pyro
c04fc931631ec9e8694def207b5ca0e432d5e501
[ "MIT" ]
null
null
null
65.241415
1,033
0.662807
[ [ [ "# Deep Markov Model \n\n## Introduction\n\nWe're going to build a deep probabilistic model for sequential data: the deep markov model. The particular dataset we want to model is composed of snippets of polyphonic music. Each time slice in a sequence spans a quarter note and is represented by an 88-dimensional binary vector that encodes the notes at that time step. \n\nSince music is (obviously) temporally coherent, we need a model that can represent complex time dependencies in the observed data. It would not, for example, be appropriate to consider a model in which the notes at a particular time step are independent of the notes at previous time steps. One way to do this is to build a latent variable model in which the variability and temporal structure of the observations is controlled by the dynamics of the latent variables. \n\nOne particular realization of this idea is a markov model, in which we have a chain of latent variables, with each latent variable in the chain conditioned on the previous latent variable. This is a powerful approach, but if we want to represent complex data with complex (and in this case unknown) dynamics, we would like our model to be sufficiently flexible to accommodate dynamics that are potentially highly non-linear. Thus a deep markov model: we allow for the transition probabilities governing the dynamics of the latent variables as well as the the emission probabilities that govern how the observations are generated by the latent dynamics to be parameterized by (non-linear) neural networks.\n\nThe specific model we're going to implement is based on the following reference:\n\n[1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp;\n Rahul G. Krishnan, Uri Shalit, David Sontag\n \nPlease note that while we do not assume that the reader of this tutorial has read the reference, it's definitely a good place to look for a more comprehensive discussion of the deep markov model in the context of other time series models.\n\nWe've described the model, but how do we go about training it? The inference strategy we're going to use is variational inference, which requires specifying a parameterized family of distributions that can be used to approximate the posterior distribution over the latent random variables. Given the non-linearities and complex time-dependencies inherent in our model and data, we expect the exact posterior to be highly non-trivial. So we're going to need a flexible family of variational distributions if we hope to learn a good model. Happily, together Pytorch and Pyro provide all the necessary ingredients. As we will see, assembling them will be straightforward. Let's get to work.", "_____no_output_____" ], [ "## The Model\n \nA convenient way to describe the high-level structure of the model is with a graphical model.", "_____no_output_____" ] ], [ [ "<center><figure><img src=\"_static/img/model.png\" style=\"width: 500px;\"><figcaption> <font size=\"+1\"><b>Figure 1</b>: The model rolled out for T=3 time steps.</font></figcaption></figure></center>", "_____no_output_____" ] ], [ [ "Here, we've rolled out the model assuming that the sequence of observations is of length three: $\\{{\\bf x}_1, {\\bf x}_2, {\\bf x}_3\\}$. Mirroring the sequence of observations we also have a sequence of latent random variables: $\\{{\\bf z}_1, {\\bf z}_2, {\\bf z}_3\\}$. The figure encodes the structure of the model. The corresponding joint distribution is\n\n$$p({\\bf x}_{123} , {\\bf z}_{123})=p({\\bf x}_1|{\\bf z}_1)p({\\bf x}_2|{\\bf z}_2)p({\\bf x}_3|{\\bf z}_3)p({\\bf z}_1)p({\\bf z}_2|{\\bf z}_1)p({\\bf z}_3|{\\bf z}_2)$$\n\nConditioned on ${\\bf z}_t$, each observation ${\\bf x}_t$ is independent of the other observations. This can be read off from the fact that each ${\\bf x}_t$ only depends on the corresponding latent ${\\bf z}_t$, as indicated by the downward pointing arrows. We can also read off the markov property of the model: each latent ${\\bf z}_t$, when conditioned on the previous latent ${\\bf z}_{t-1}$, is independent of all previous latents $\\{ {\\bf z}_{t-2}, {\\bf z}_{t-3}, ...\\}$. This effectively says that everything one needs to know about the state of the system at time $t$ is encapsulated by the latent ${\\bf z}_{t}$.\n\nWe will assume that the observation likelihoods, i.e. the probability distributions $p({{\\bf x}_t}|{{\\bf z}_t})$ that control the observations, are given by the bernoulli distribution. This is an appropriate choice since our observations are all 0 or 1. For the probability distributions $p({\\bf z}_t|{\\bf z}_{t-1})$ that control the latent dynamics, we choose (conditional) gaussian distributions with diagonal covariances. This is reasonable since we assume that the latent space is continuous. \n \n\n \nThe solid black squares represent non-linear functions parameterized by neural networks. This is what makes this a _deep_ markov model. Note that the black squares appear in two different places: in between pairs of latents and in between latents and observations. The non-linear function that connects the latent variables ('Trans' in Fig. 1) controls the dynamics of the latent variables. Since we allow the conditional probability distribution of ${\\bf z}_{t}$ to depend on ${\\bf z}_{t-1}$ in a complex way, we will be able to capture complex dynamics in our model. Similarly, the non-linear function that connects the latent variables to the observations ('Emit' in Fig. 1) controls how the observations depend on the latent dynamics. \n\nSome additional notes:\n- we can freely choose the dimension of the latent space to suit the problem at hand: small latent spaces for simple problems and larger latent spaces for problems with complex dynamics\n- note the parameter ${\\bf z}_0$ in Fig. 1. as will become more apparent from the code, this is just a convenient way for us to parameterize the probability distribution $p({\\bf z}_1)$ for the first time step, where there are no previous latents to condition on.\n\n### The Gated Transition and the Emitter\n\nWithout further ado, let's start writing some code. We first define the two Pytorch Modules that correspond to the black squares in Fig. 1. First the emission function:", "_____no_output_____" ] ], [ [ "class Emitter(nn.Module):\n \"\"\"\n Parameterizes the bernoulli observation likelihood p(x_t | z_t)\n \"\"\"\n def __init__(self, input_dim, z_dim, emission_dim):\n super(Emitter, self).__init__()\n # initialize the three linear transformations used in the neural network\n self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim)\n self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim)\n self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim)\n # initialize the two non-linearities used in the neural network\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, z_t):\n \"\"\"\n Given the latent z at a particular time step t we return the vector of \n probabilities `ps` that parameterizes the bernoulli distribution p(x_t|z_t)\n \"\"\"\n h1 = self.relu(self.lin_z_to_hidden(z_t))\n h2 = self.relu(self.lin_hidden_to_hidden(h1))\n ps = self.sigmoid(self.lin_hidden_to_input(h2))\n return ps", "_____no_output_____" ] ], [ [ "In the constructor we define the linear transformations that will be used in our emission function. Note that `emission_dim` is the number of hidden units in the neural network. We also define the non-linearities that we will be using. The forward call defines the computational flow of the function. We take in the latent ${\\bf z}_{t}$ as input and do a sequence of transformations until we obtain a vector of length 88 that defines the emission probabilities of our bernoulli likelihood. Because of the sigmoid, each element of `ps` will be between 0 and 1 and will define a valid probability. Taken together the elements of `ps` encode which notes we expect to observe at time $t$ given the state of the system (as encoded in ${\\bf z}_{t}$).", "_____no_output_____" ], [ "Now we define the gated transition function:", "_____no_output_____" ] ], [ [ "class GatedTransition(nn.Module):\n \"\"\"\n Parameterizes the gaussian latent transition probability p(z_t | z_{t-1})\n See section 5 in the reference for comparison.\n \"\"\"\n def __init__(self, z_dim, transition_dim):\n super(GatedTransition, self).__init__()\n # initialize the six linear transformations used in the neural network\n self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)\n self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)\n self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)\n self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)\n self.lin_sig = nn.Linear(z_dim, z_dim)\n self.lin_z_to_mu = nn.Linear(z_dim, z_dim)\n # modify the default initialization of lin_z_to_mu\n # so that it's starts out as the identity function\n self.lin_z_to_mu.weight.data = torch.eye(z_dim)\n self.lin_z_to_mu.bias.data = torch.zeros(z_dim)\n # initialize the three non-linearities used in the neural network\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t_1):\n \"\"\"\n Given the latent z_{t-1} corresponding to the time step t-1\n we return the mean and sigma vectors that parameterize the\n (diagonal) gaussian distribution p(z_t | z_{t-1})\n \"\"\"\n # compute the gating function and one minus the gating function\n gate_intermediate = self.relu(self.lin_gate_z_to_hidden(z_t_1))\n gate = self.sigmoid(self.lin_gate_hidden_to_z(gate_intermediate))\n one_minus_gate = ng_ones(gate.size()).type_as(gate) - gate\n # compute the 'proposed mean'\n proposed_mean_intermediate = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1))\n proposed_mean = self.lin_proposed_mean_hidden_to_z(proposed_mean_intermediate)\n # assemble the actual mean used to sample z_t, which mixes a linear transformation \n # of z_{t-1} with the proposed mean modulated by the gating function\n mu = one_minus_gate * self.lin_z_to_mu(z_t_1) + gate * proposed_mean\n # compute the sigma used to sample z_t, using the proposed mean from above as input\n # the softplus ensures that sigma is positive\n sigma = self.softplus(self.lin_sig(self.relu(proposed_mean)))\n # return mu, sigma which can be fed into Normal\n return mu, sigma\n", "_____no_output_____" ] ], [ [ "This mirrors the structure of `Emitter` above, with the difference that the computational flow is a bit more complicated. This is for two reasons. First, the output of `GatedTransition` needs to define a valid (diagonal) gaussian distribution. So we need to output two parameters: the mean `mu`, and the (square root) covariance `sigma`. These both need to have the same dimension as the latent space. Second, we don't want to _force_ the dynamics to be non-linear. Thus our mean `mu` is a sum of two terms, only one of which depends non-linearily on the input `z_t_1`. This way we can support both linear and non-linear dynamics (or indeed have the dynamics of part of the latent space be linear, while the remainder of the dynamics is non-linear). ", "_____no_output_____" ], [ "### Model - a Pyro Stochastic Function\n\nSo far everything we've done is pure Pytorch. To finish translating our model into code we need to bring Pyro into the picture. Basically we need to implement the stochastic nodes (i.e. the circles) in Fig. 1. To do this we introduce a callable `model()` that contains the Pyro primitives `pyro.sample` and `pyro.observe`. The `sample` statements will be used to specify the joint distribution over the latents ${\\bf z}_{1:T}$. The `observe` statements will specify how the observations ${\\bf x}_{1:T}$ depend on the latents. Before we look at the complete code for `model()`, let's look at a stripped down version that contains the main logic:", "_____no_output_____" ] ], [ [ "def model(...):\n z_prev = self.z_0\n\n # sample the latents z and observed x's one time step at a time\n for t in range(1, T_max + 1): \n # the next two lines of code sample z_t ~ p(z_t | z_{t-1})\n # first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1})\n z_mu, z_sigma = self.trans(z_prev)\n # then sample z_t according to dist.Normal(z_mu, z_sigma)\n z_t = pyro.sample(\"z_%d\" % t, dist.Normal, z_mu, z_sigma)\n \n # compute the probabilities that parameterize the bernoulli likelihood\n emission_probs_t = self.emitter(z_t)\n # the next statement instructs pyro to observe x_t according to the\n # bernoulli distribution p(x_t|z_t) \n pyro.observe(\"obs_x_%d\" % t, dist.bernoulli, \n mini_batch[:, t - 1, :], emission_probs_t)\n # the latent sampled at this time step will be conditioned upon \n # in the next time step so keep track of it\n z_prev = z_t ", "_____no_output_____" ] ], [ [ "The first thing we need to do is sample ${\\bf z}_1$. Once we've sampled ${\\bf z}_1$, we can sample ${\\bf z}_2 \\sim p({\\bf z}_2|{\\bf z}_1)$ and so on. This is the logic implemented in the `for` loop. The parameters `z_mu` and `z_sigma` that define the probability distributions $p({\\bf z}_t|{\\bf z}_{t-1})$ are computed using `self.trans`, which is just an instance of the `GatedTransition` module defined above. For the first time step at $t=1$ we condition on `self.z_0`, which is a (trainable) `Parameter`, while for subsequent time steps we condition on the previously drawn latent. Note that each random variable `z_t` is assigned a unique name by the user.\n\nOnce we've sampled ${\\bf z}_t$ at a given time step, we need to observe the datapoint ${\\bf x}_t$. So we pass `z_t` through `self.emitter`, an instance of the `Emitter` module defined above to obtain `emission_probs_t`. Together with the argument `dist.bernoulli` in the `observe` statement, these probabilities fully specify the observation likelihood. Finally, we also specify the slice of observed data ${\\bf x}_t$: `mini_batch[:, t - 1, :]`. \n\nThis fully specifies our model and encapsulates it in a callable that can be passed to Pyro. Before we move on let's look at the full version of `model()` and go through some of the details we glossed over in our first pass.", "_____no_output_____" ] ], [ [ "def model(self, mini_batch, mini_batch_reversed, mini_batch_mask,\n mini_batch_seq_lengths, annealing_factor=1.0):\n\n # this is the number of time steps we need to process in the mini-batch\n T_max = mini_batch.size(1)\n\n # register all pytorch (sub)modules with pyro\n pyro.module(\"dmm\", self)\n\n # set z_prev = z_0 to setup the recursive conditioning\n z_prev = self.z_0\n\n # sample the latents z and observed x's one time step at a time\n for t in range(1, T_max + 1): \n # the next three lines of code sample z_t ~ p(z_t | z_{t-1})\n # first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1})\n z_mu, z_sigma = self.trans(z_prev)\n # then sample z_t according to dist.Normal(z_mu, z_sigma)\n z_t = pyro.sample(\"z_%d\" % t, dist.Normal, z_mu, z_sigma,\n log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t])\n\n # compute the probabilities that parameterize the bernoulli likelihood\n emission_probs_t = self.emitter(z_t)\n # the next statement instructs pyro to observe x_t according to the\n # bernoulli distribution p(x_t|z_t)\n pyro.observe(\"obs_x_%d\" % t, dist.bernoulli, mini_batch[:, t - 1, :],\n emission_probs_t,\n log_pdf_mask=mini_batch_mask[:, t - 1:t])\n # the latent sampled at this time step will be conditioned upon\n # in the next time step so keep track of it\n z_prev = z_t", "_____no_output_____" ] ], [ [ "The first thing to note is that `model()` takes a number of arguments. For now let's just take a look at `mini_batch` and `mini_batch_mask`. `mini_batch` is a three dimensional tensor, with the first dimension being the batch dimension, the second dimension being the temporal dimension, and the final dimension being the features (88-dimensional in our case). To speed up the code, whenever we run `model` we're going to process an entire mini-batch of sequences (i.e. we're going to take advantage of vectorization). \n\nThis is sensible because our model is implicitly defined over a single observed sequence. The probability of a set of sequences is just given by the products of the individual sequence probabilities. In other words, given the parameters of the model the sequences are conditionally independent.\n\nThis vectorization introduces some complications because sequences can be of different lengths. This is where `mini_batch_mask` comes in. `mini_batch_mask` is a two dimensional 0/1 mask of dimensions `mini_batch_size` x `T_max`, where `T_max` is the maximum length of any sequence in the mini-batch. This encodes which parts of `mini_batch` are valid observations. \n\nSo the first thing we do is grab `T_max`: we have to unroll our model for at least this many time steps. Note that this will result in a lot of 'wasted' computation, since some of the sequences will be shorter than `T_max`, but this is a small price to pay for the big speed-ups that come with vectorization. We just need to make sure that none of the 'wasted' computations 'pollute' our model computation. We accomplish this by passing the mask appropriate to time step $t$ as an argument `log_pdf_mask` to both the `sample` and `observe` statements.\n\nFinally, the line `pyro.module(\"dmm\", self)` is equivalent to a bunch of `pyro.param` statements for each parameter in the model. This lets Pyro know which parameters are part of the model. Just like for `sample` and `observe` statements, we give the module a unique name. This name will be incorporated into the name of the `Parameters` in the model. We leave a discussion of the KL annealing factor for later.", "_____no_output_____" ], [ "## Inference\n\nAt this point we've fully specified our model. The next step is to set ourselves up for inference. As mentioned in the introduction, our inference strategy is going to be variational inference (see [SVI Part I](svi_part_i.html) for an introduction). So our next task is to build a family of variational distributions appropriate to doing inference in a deep markov model. However, at this point it's worth emphasizing that nothing about the way we've implemented `model()` ties us to variational inference. In principle we could use _any_ inference strategy available in Pyro. For example, in this particular context one could imagine using some variant of Sequential Monte Carlo (although this is not currently supported in Pyro).\n\n### Guide\n\nThe purpose of the guide (i.e. the variational distribution) is to provide a (parameterized) approximation to the exact posterior $p({\\bf z}_{1:T}|{\\bf x}_{1:T})$. Actually, there's an implicit assumption here which we should make explicit, so let's take a step back. \nSuppose our dataset $\\mathcal{D}$ consists of $N$ sequences \n$\\{ {\\bf x}_{1:T_1}^1, {\\bf x}_{1:T_2}^2, ..., {\\bf x}_{1:T_N}^N \\}$. Then the posterior we're actually interested in is given by \n$p({\\bf z}_{1:T_1}^1, {\\bf z}_{1:T_2}^2, ..., {\\bf z}_{1:T_N}^N | \\mathcal{D})$, i.e. we want to infer the latents for _all_ $N$ sequences. Even for small $N$ this is a very high-dimensional distribution that will require a very large number of parameters to specify. In particular if we were to directly parameterize the posterior in this form, the number of parameters required would grow (at least) linearly with $N$. One way to avoid this nasty growth with the size of the dataset is *amortization* (see the analogous discussion in [SVI Part II](http://pyro.ai/examples/svi_part_ii.html)).\n\n#### Aside: Amortization\n\nThis works as follows. Instead of introducing variational parameters for each sequence in our dataset, we're going to learn a single parametric function $f({\\bf x}_{1:T})$ and work with a variational distribution that has the form $\\prod_{n=1}^N q({\\bf z}_{1:T_n}^n | f({\\bf x}_{1:T_n}^n))$. The function $f(\\cdot)$&mdash;which basically maps a given observed sequence to a set of variational parameters tailored to that sequence&mdash;will need to be sufficiently rich to capture the posterior accurately, but now we can handle large datasets without having to introduce an obscene number of variational parameters.\n\nSo our task is to construct the function $f(\\cdot)$. Since in our case we need to support variable-length sequences, it's only natural that $f(\\cdot)$ have a RNN in the loop. Before we look at the various component parts that make up our $f(\\cdot)$ in detail, let's look at a computational graph that encodes the basic structure: <p>", "_____no_output_____" ] ], [ [ "<center><figure><img src=\"_static/img/guide.png\" style=\"width: 400px;\"><figcaption> <font size=\"+1\"><b>Figure 2</b>: The guide rolled out for T=3 time steps. </font></figcaption></figure></center>", "_____no_output_____" ] ], [ [ "At the bottom of the figure we have our sequence of three observations. These observations will be consumed by a RNN that reads the observations from right to left and outputs three hidden states $\\{ {\\bf h}_1, {\\bf h}_2,{\\bf h}_3\\}$. Note that this computation is done _before_ we sample any latent variables. Next, each of the hidden states will be fed into a `Combiner` module whose job is to output the mean and covariance of the the conditional distribution $q({\\bf z}_t | {\\bf z}_{t-1}, {\\bf x}_{t:T})$, which we take to be given by a diagonal gaussian distribution. (Just like in the model, the conditional structure of ${\\bf z}_{1:T}$ in the guide is such that we sample ${\\bf z}_t$ forward in time.) In addition to the RNN hidden state, the `Combiner` also takes the latent random variable from the previous time step as input, except for $t=1$, where it instead takes the trainable (variational) parameter ${\\bf z}_0^{\\rm{q}}$. \n\n#### Aside: Guide Structure\nWhy do we setup the RNN to consume the observations from right to left? Why not left to right? With this choice our conditional distribution $q({\\bf z}_t |...)$ depends on two things:\n\n- the latent ${\\bf z}_{t-1}$ from the previous time step; and \n- the observations ${\\bf x}_{t:T}$, i.e. the current observation together with all future observations\n\nWe are free to make other choices; all that is required is that that the guide is a properly normalized distribution that plays nice with autograd. This particular choice is motivated by the dependency structure of the true posterior: see reference [1] for a detailed discussion. In brief, while we could, for example, condition on the entire sequence of observations, because of the markov structure of the model everything that we need to know about the previous observations ${\\bf x}_{1:t-1}$ is encapsulated by ${\\bf z}_{t-1}$. We could condition on more things, but there's no need; and doing so will probably tend to dilute the learning signal. So running the RNN from right to left is the most natural choice for this particular model.\n\nSo much for the high-level structure of the guide. Let's look at the component parts in detail. First, the `Combiner` module:", "_____no_output_____" ] ], [ [ "class Combiner(nn.Module):\n \"\"\"\n Parameterizes q(z_t | z_{t-1}, x_{t:T}), which is the basic building block\n of the guide (i.e. the variational distribution). The dependence on x_{t:T} is\n through the hidden state of the RNN (see the pytorch module `rnn` below)\n \"\"\"\n def __init__(self, z_dim, rnn_dim):\n super(Combiner, self).__init__()\n # initialize the three linear transformations used in the neural network\n self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim)\n self.lin_hidden_to_mu = nn.Linear(rnn_dim, z_dim)\n self.lin_hidden_to_sigma = nn.Linear(rnn_dim, z_dim)\n # initialize the two non-linearities used in the neural network\n self.tanh = nn.Tanh()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t_1, h_rnn):\n \"\"\"\n Given the latent z at at a particular time step t-1 as well as the hidden\n state of the RNN h(x_{t:T}) we return the mean and sigma vectors that\n parameterize the (diagonal) gaussian distribution q(z_t | z_{t-1}, x_{t:T})\n \"\"\"\n # combine the rnn hidden state with a transformed version of z_t_1\n h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn)\n # use the combined hidden state to compute the mean used to sample z_t\n mu = self.lin_hidden_to_mu(h_combined)\n # use the combined hidden state to compute the sigma used to sample z_t\n sigma = self.softplus(self.lin_hidden_to_sigma(h_combined))\n # return mu, sigma which can be fed into Normal\n return mu, sigma", "_____no_output_____" ] ], [ [ "This module has the same general structure as `Emitter` and `GatedTransition` in the model. The only thing of note is that because the `Combiner` needs to consume two inputs at each time step, it transforms the inputs into a single combined hidden state `h_combined` before it computes the outputs. \n\nApart from the RNN, we now have all the ingredients we need to construct our guide distribution.\nHappily, Pytorch has great built-in RNN modules, so we don't have much work to do here. We'll see where we instantiate the RNN later. Let's instead jump right into the definition of the stochastic function `guide()`.", "_____no_output_____" ] ], [ [ "def guide(self, mini_batch, mini_batch_reversed, mini_batch_mask,\n mini_batch_seq_lengths, annealing_factor=1.0):\n\n # this is the number of time steps we need to process in the mini-batch\n T_max = mini_batch.size(1)\n # register all pytorch (sub)modules with pyro\n pyro.module(\"dmm\", self)\n\n # if on gpu we need the fully broadcast view of the rnn initial state\n # to be in contiguous gpu memory\n h_0_contig = self.h_0 if not self.use_cuda \\\n else self.h_0.expand(1, mini_batch.size(0), self.rnn.hidden_size).contiguous()\n # push the observed x's through the rnn;\n # rnn_output contains the hidden state at each time step\n rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig)\n # reverse the time-ordering in the hidden state and un-pack it\n rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths)\n # set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...)\n z_prev = self.z_q_0\n\n # sample the latents z one time step at a time\n for t in range(1, T_max + 1): \n # get the parameters for the distribution q(z_t | z_{t-1}, x_{t:T})\n z_mu, z_sigma = self.combiner(z_prev, rnn_output[:, t - 1, :]) \n # sample z_t from the distribution q(z_t|...)\n z_t = pyro.sample(\"z_%d\" % t, dist.Normal, z_mu, z_sigma, \n log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t])\n # the latent sampled at this time step will be conditioned upon in the next time step\n # so keep track of it\n z_prev = z_t", "_____no_output_____" ] ], [ [ "The high-level structure of `guide()` is very similar to `model()`. First note that the model and guide take the same arguments: this is a general requirement for model/guide pairs in Pyro. As in the model, there's a call to `pyro.module` that registers all the parameters with Pyro. Also, the `for` loop has the same structure as the one in `model()`, with the difference that the guide only needs to sample latents (there are no `observe` statements). Finally, note that the names of the latent variables in the guide exactly match those in the model. This is how Pyro knows to correctly align random variables. \n\nThe RNN logic should be familar to Pytorch users, but let's go through it quickly. First we prepare the initial state of the RNN, `h_0`. Then we invoke the RNN via its forward call; the resulting tensor `rnn_output` contains the hidden states for the entire mini-batch. Note that because we want the RNN to consume the observations from right to left, the input to the RNN is `mini_batch_reversed`, which is a copy of `mini_batch` with all the sequences running in _reverse_ temporal order. Furthermore, `mini_batch_reversed` has been wrapped in a Pytorch `rnn.pack_padded_sequence` so that the RNN can deal with variable-length sequences. Since we do our sampling in latent space in normal temporal order, we use the helper function `pad_and_reverse` to reverse the hidden state sequences in `rnn_output`, so that we can feed the `Combiner` RNN hidden states that are correctly aligned and ordered. This helper function also unpacks the `rnn_output` so that it is no longer in the form of a Pytorch `rnn.pack_padded_sequence`.", "_____no_output_____" ], [ "## Packaging the Model and Guide as a Pytorch Module\n\nAt this juncture, we're ready to to proceed to inference. But before we do so let's quickly go over how we packaged the model and guide as a single Pytorch Module. This is generally good practice, especially for larger models.", "_____no_output_____" ] ], [ [ "class DMM(nn.Module):\n \"\"\"\n This pytorch Module encapsulates the model as well as the \n variational distribution (the guide) for the Deep Markov Model\n \"\"\"\n def __init__(self, input_dim=88, z_dim=100, emission_dim=100, \n transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0, \n num_iafs=0, iaf_dim=50, use_cuda=False):\n super(DMM, self).__init__()\n # instantiate pytorch modules used in the model and guide below\n self.emitter = Emitter(input_dim, z_dim, emission_dim)\n self.trans = GatedTransition(z_dim, transition_dim)\n self.combiner = Combiner(z_dim, rnn_dim)\n self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu',\n batch_first=True, bidirectional=False, num_layers=1, dropout=rnn_dropout_rate)\n\n # define a (trainable) parameters z_0 and z_q_0 that help define the probability\n # distributions p(z_1) and q(z_1)\n # (since for t = 1 there are no previous latents to condition on)\n self.z_0 = nn.Parameter(torch.zeros(z_dim))\n self.z_q_0 = nn.Parameter(torch.zeros(z_dim))\n # define a (trainable) parameter for the initial hidden state of the rnn\n self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim))\n\n self.use_cuda = use_cuda\n # if on gpu cuda-ize all pytorch (sub)modules\n if use_cuda:\n self.cuda()\n\n # the model p(x_{1:T} | z_{1:T}) p(z_{1:T})\n def model(...):\n\n # ... as above ...\n\n # the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution)\n def guide(...):\n \n # ... as above ...", "_____no_output_____" ] ], [ [ "Since we've already gone over `model` and `guide`, our focus here is on the constructor. First we instantiate the four Pytorch modules that we use in our model and guide. On the model-side: `Emitter` and `GatedTransition`. On the guide-side: `Combiner` and the RNN. \n\nNext we define Pytorch `Parameter`s for the initial state of the RNN as well as `z_0` and `z_q_0`, which are fed into `self.trans` and `self.combiner`, respectively, in lieu of the non-existent random variable $\\bf z_0$. \n\nThe important point to make here is that all of these `Module`s and `Parameter`s are attributes of `DMM` (which itself inherits from `nn.Module`). This has the consequence they are all automatically registered as belonging to the module. So, for example, when we call `parameters()` on an instance of `DMM`, Pytorch will know to return all the relevant parameters. It also means that when we invoke `pyro.module(\"dmm\", self)` in `model()` and `guide()`, all the parameters of both the model and guide will be registered with Pyro. Finally, it means that if we're running on a GPU, the call to `cuda()` will move all the parameters into GPU memory.\n", "_____no_output_____" ], [ "## Stochastic Variational Inference\n\nWith our model and guide at hand, we're finally ready to do inference. Before we look at the full logic that is involved in a complete experimental script, let's first see how to take a single gradient step. First we instantiate an instance of `DMM` and setup an optimizer.", "_____no_output_____" ] ], [ [ "# instantiate the dmm\ndmm = DMM(input_dim, z_dim, emission_dim, transition_dim, rnn_dim,\n args.rnn_dropout_rate, args.num_iafs, args.iaf_dim, args.cuda)\n\n# setup optimizer\nadam_params = {\"lr\": args.learning_rate, \"betas\": (args.beta1, args.beta2),\n \"clip_norm\": args.clip_norm, \"lrd\": args.lr_decay,\n \"weight_decay\": args.weight_decay}\noptimizer = ClippedAdam(adam_params)", "_____no_output_____" ] ], [ [ "Here we're using an implementation of the Adam optimizer that includes gradient clipping. This mitigates some of the problems that can occur when training recurrent neural networks (e.g. vanishing/exploding gradients). Next we setup the inference algorithm. ", "_____no_output_____" ] ], [ [ "# setup inference algorithm\nsvi = SVI(dmm.model, dmm.guide, optimizer, \"ELBO\", trace_graph=False)", "_____no_output_____" ] ], [ [ "The inference algorithm `SVI` uses a stochastic gradient estimator to take gradient steps on an objective function, which in this case is given by the ELBO (the evidence lower bound). As the name indicates, the ELBO is a lower bound to the log evidence: $\\log p(\\mathcal{D})$. As we take gradient steps that maximize the ELBO, we move our guide $q(\\cdot)$ closer to the exact posterior. \n\nThe argument `trace_graph=False` indicates that we're using a version of the gradient estimator that doesn't need access to the dependency structure of the model and guide. Since all the latent variables in our model are reparameterizable, this is the appropriate gradient estimator for our use case. (It's also the default option.)\n\nAssuming we've prepared the various arguments of `dmm.model` and `dmm.guide`, taking a gradient step is accomplished by calling", "_____no_output_____" ] ], [ [ "svi.step(mini_batch, ...)", "_____no_output_____" ] ], [ [ "That's all there is to it!\n\nWell, not quite. This will be the main step in our inference algorithm, but we still need to implement a complete training loop with preparation of mini-batches, evaluation, and so on. This sort of logic will be familiar to any deep learner but let's see how it looks in PyTorch/Pyro.", "_____no_output_____" ], [ "## The Black Magic of Optimization\n\nActually, before we get to the guts of training, let's take a moment and think a bit about the optimization problem we've setup. We've traded Bayesian inference in a non-linear model with a high-dimensional latent space&mdash;a hard problem&mdash;for a particular optimization problem. Let's not kid ourselves, this optimization problem is pretty hard too. Why? Let's go through some of the reasons:\n- the space of parameters we're optimizing over is very high-dimensional (it includes all the weights in all the neural networks we've defined).\n- our objective function (the ELBO) cannot be computed analytically. so our parameter updates will be following noisy Monte Carlo gradient estimates\n- data-subsampling serves as an additional source of stochasticity: even if we wanted to, we couldn't in general take gradient steps on the ELBO defined over the whole dataset (actually in our particular case the dataset isn't so large, but let's ignore that).\n- given all the neural networks and non-linearities we have in the loop, our (stochastic) loss surface is highly non-trivial\n\nThe upshot is that if we're going to find reasonable (local) optima of the ELBO, we better take some care in deciding how to do optimization. This isn't the time or place to discuss all the different strategies that one might adopt, but it's important to emphasize how decisive a good or bad choice in learning hyperparameters (the learning rate, the mini-batch size, etc.) can be. \n\nBefore we move on, let's discuss one particular optimization strategy that we're making use of in greater detail: KL annealing. In our case the ELBO is the sum of two terms: an expected log likelihood term (which measures model fit) and a sum of KL divergence terms (which serve to regularize the approximate posterior):\n\n$\\rm{ELBO} = \\mathbb{E}_{q({\\bf z}_{1:T})}[\\log p({\\bf x}_{1:T}|{\\bf z}_{1:T})] - \\mathbb{E}_{q({\\bf z}_{1:T})}[ \\log q({\\bf z}_{1:T}) - \\log p({\\bf z}_{1:T})]$\n\nThis latter term can be a quite strong regularizer, and in early stages of training it has a tendency to favor regions of the loss surface that contain lots of bad local optima. One strategy to avoid these bad local optima, which was also adopted in reference [1], is to anneal the KL divergence terms by multiplying them by a scalar `annealing_factor` that ranges between zero and one:\n\n$\\mathbb{E}_{q({\\bf z}_{1:T})}[\\log p({\\bf x}_{1:T}|{\\bf z}_{1:T})] - \\rm{annealing\\_factor} \\times \\mathbb{E}_{q({\\bf z}_{1:T})}[ \\log q({\\bf z}_{1:T}) - \\log p({\\bf z}_{1:T})]$\n\nThe idea is that during the course of training the `annealing_factor` rises slowly from its initial value at/near zero to its final value at 1.0. The annealing schedule is arbitrary; below we will use a simple linear schedule.\n\nFinally, we should mention that the main difference between the DMM implementation described here and the one used in reference [1] is that they take advantage of the analytic formula for the KL divergence between two gaussian distributions (whereas we rely on Monte Carlo estimates). This leads to lower variance gradient estimates of the ELBO, which makes training a bit easier. We can still train the model without making this analytic substitution, but training probably takes somewhat longer because of the higher variance. Support for analytic KL divergences in Pyro is something we plan to add in the near future.", "_____no_output_____" ], [ "## Data Loading, Training, and Evaluation\n\nFirst we load the data. There are 229 sequences in the training dataset, each with an average length of ~60 time steps.", "_____no_output_____" ] ], [ [ "jsb_file_loc = \"./data/jsb_processed.pkl\"\ndata = pickle.load(open(jsb_file_loc, \"rb\"))\ntraining_seq_lengths = data['train']['sequence_lengths']\ntraining_data_sequences = data['train']['sequences']\ntest_seq_lengths = data['test']['sequence_lengths']\ntest_data_sequences = data['test']['sequences']\nval_seq_lengths = data['valid']['sequence_lengths']\nval_data_sequences = data['valid']['sequences']\nN_train_data = len(training_seq_lengths)\nN_train_time_slices = np.sum(training_seq_lengths)\nN_mini_batches = int(N_train_data / args.mini_batch_size +\n int(N_train_data % args.mini_batch_size > 0))", "_____no_output_____" ] ], [ [ "For this dataset we will typically use a `mini_batch_size` of 20, so that there will be 12 mini-batches per epoch. Next we define the function `process_minibatch` which prepares a mini-batch for training and takes a gradient step:", "_____no_output_____" ] ], [ [ "def process_minibatch(epoch, which_mini_batch, shuffled_indices):\n if args.annealing_epochs > 0 and epoch < args.annealing_epochs:\n # compute the KL annealing factor approriate for the current mini-batch in the current epoch\n min_af = args.minimum_annealing_factor\n annealing_factor = min_af + (1.0 - min_af) * \\ \n (float(which_mini_batch + epoch * N_mini_batches + 1) /\n float(args.annealing_epochs * N_mini_batches))\n else:\n # by default the KL annealing factor is unity\n annealing_factor = 1.0 \n\n # compute which sequences in the training set we should grab\n mini_batch_start = (which_mini_batch * args.mini_batch_size)\n mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size, N_train_data])\n mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end]\n # grab the fully prepped mini-batch using the helper function in the data loader\n mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \\\n = poly.get_mini_batch(mini_batch_indices, training_data_sequences,\n training_seq_lengths, cuda=args.cuda)\n # do an actual gradient step\n loss = svi.step(mini_batch, mini_batch_reversed, mini_batch_mask,\n mini_batch_seq_lengths, annealing_factor)\n # keep track of the training loss\n return loss", "_____no_output_____" ] ], [ [ "We first compute the KL annealing factor appropriate to the mini-batch (according to a linear schedule as described earlier). We then compute the mini-batch indices, which we pass to the helper function `get_mini_batch`. This helper function takes care of a number of different things:\n- it sorts each mini-batch by sequence length\n- it calls another helper function to get a copy of the mini-batch in reversed temporal order\n- it packs each reversed mini-batch in a `rnn.pack_padded_sequence`, which is then ready to be ingested by the RNN\n- it cuda-izes all tensors if we're on a GPU\n- it calls another helper function to get an appropriate 0/1 mask for the mini-batch\n\nWe then pipe all the return values of `get_mini_batch()` into `elbo.step(...)`. Recall that these arguments will be further piped to `model(...)` and `guide(...)` during construction of the gradient estimator in `elbo`. Finally, we return a float which is a noisy estimate of the loss for that mini-batch.\n\nWe now have all the ingredients required for the main bit of our training loop:", "_____no_output_____" ] ], [ [ "times = [time.time()]\nfor epoch in range(args.num_epochs):\n # accumulator for our estimate of the negative log likelihood \n # (or rather -elbo) for this epoch\n epoch_nll = 0.0 \n # prepare mini-batch subsampling indices for this epoch\n shuffled_indices = np.arange(N_train_data)\n np.random.shuffle(shuffled_indices)\n\n # process each mini-batch; this is where we take gradient steps\n for which_mini_batch in range(N_mini_batches):\n epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices)\n\n # report training diagnostics\n times.append(time.time())\n epoch_time = times[-1] - times[-2]\n log(\"[training epoch %04d] %.4f \\t\\t\\t\\t(dt = %.3f sec)\" %\n (epoch, epoch_nll / N_train_time_slices, epoch_time))\n", "_____no_output_____" ] ], [ [ "At the beginning of each epoch we shuffle the indices pointing to the training data. We then process each mini-batch until we've gone through the entire training set, accumulating the training loss as we go. Finally we report some diagnostic info. Note that we normalize the loss by the total number of time slices in the training set (this allows us to compare to reference [1]). ", "_____no_output_____" ], [ "## Evaluation\nThis training loop is still missing any kind of evaluation diagnostics. Let's fix that. First we need to prepare the validation and test data for evaluation. Since the validation and test datasets are small enough that we can easily fit them into memory, we're going to process each dataset batchwise (i.e. we will not be breaking up the dataset into mini-batches). [_Aside: at this point the reader may ask why we don't do the same thing for the training set. The reason is that additional stochasticity due to data-subsampling is often advantageous during optimization: in particular it can help us avoid local optima._] And, in fact, in order to get a lessy noisy estimate of the ELBO, we're going to compute a multi-sample estimate. The simplest way to do this would be as follows:", "_____no_output_____" ] ], [ [ "val_loss = svi.evaluate_loss(val_batch, ..., num_particles=5)", "_____no_output_____" ] ], [ [ "This, however, would involve an explicit `for` loop with five iterations. For our particular model, we can do better and vectorize the whole computation. The only way to do this currently in Pyro is to explicitly replicate the data `n_eval_samples` many times. This is the strategy we follow:", "_____no_output_____" ] ], [ [ "# package repeated copies of val/test data for faster evaluation\n# (i.e. set us up for vectorization)\ndef rep(x):\n return np.repeat(x, n_eval_samples, axis=0)\n\n# get the validation/test data ready for the dmm: pack into sequences, etc.\nval_seq_lengths = rep(val_seq_lengths)\ntest_seq_lengths = rep(test_seq_lengths)\nval_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch(\n np.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences),\n val_seq_lengths, volatile=True, cuda=args.cuda)\ntest_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = poly.get_mini_batch(\n np.arange(n_eval_samples * test_data_sequences.shape[0]), rep(test_data_sequences),\n test_seq_lengths, volatile=True, cuda=args.cuda)", "_____no_output_____" ] ], [ [ "Note that we make use of the same helper function `get_mini_batch` as before, except this time we select the entire datasets. Also, we mark the data as `volatile`, which lets Pytorch know that we won't be computing any gradients; this results in further speed-ups. With the test and validation data now fully prepped, we define the helper function that does the evaluation: ", "_____no_output_____" ] ], [ [ "def do_evaluation():\n # put the RNN into evaluation mode (i.e. turn off drop-out if applicable)\n dmm.rnn.eval()\n\n # compute the validation and test loss\n val_nll = svi.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask,\n val_seq_lengths) / np.sum(val_seq_lengths)\n test_nll = svi.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask,\n test_seq_lengths) / np.sum(test_seq_lengths)\n\n # put the RNN back into training mode (i.e. turn on drop-out if applicable)\n dmm.rnn.train()\n return val_nll, test_nll", "_____no_output_____" ] ], [ [ "We simply call the `evaluate_loss` method of `elbo`, which takes the same arguments as `step()`, namely the arguments that are passed to the model and guide. Note that we have to put the RNN into and out of evaluation mode to account for dropout. We can now stick `do_evaluation()` into the training loop; see `dmm.py` for details.", "_____no_output_____" ], [ "## Results\n\nLet's make sure that our implementation gives reasonable results. We can use the numbers reported in reference [1] as a sanity check. For the same dataset and a similar model/guide setup (dimension of the latent space, number of hidden units in the RNN, etc.) they report a normalized negative log likelihood (NLL) of `6.93` on the testset (lower is better$)^{\\S}$. This is to be compared to our result of `6.87`. These numbers are very much in the same ball park, which is reassuring. It seems that, at least for this dataset, not using analytic expressions for the KL divergences doesn't degrade the quality of the learned model (although, as discussed above, the training probably takes somewhat longer).", "_____no_output_____" ] ], [ [ "<figure><img src=\"_static/img/test_nll.png\" style=\"width: 400px;\"><center><figcaption> <font size=\"-1\"><b>Figure 3</b>: Progress on the test set NLL as training progresses for a sample training run. </font></figcaption></figure></center>", "_____no_output_____" ] ], [ [ "In the figure we show how the test NLL progresses during training for a single sample run (one with a rather conservative learning rate). Most of the progress is during the first 3000 epochs or so, with some marginal gains if we let training go on for longer. On a GeForce GTX 1080, 5000 epochs takes about 20 hours.\n\n\n| `num_iafs` | test NLL |\n|---|---|\n| `0` | `6.87` | \n| `1` | `6.82` |\n| `2` | `6.80` |\n\nFinally, we also report results for guides with normalizing flows in the mix (details to be found in the next section). \n\n${ \\S\\;}$ Actually, they seem to report two numbers—6.93 and 7.03—for the same model/guide and it's not entirely clear how the two reported numbers are different.", "_____no_output_____" ], [ "## Bells, whistles, and other improvements\n\n### Inverse Autoregressive Flows\n\nOne of the great things about a probabilistic programming language is that it encourages modularity. Let's showcase an example in the context of the DMM. We're going to make our variational distribution richer by adding normalizing flows to the mix (see reference [2] for a discussion). **This will only cost us four additional lines of code!**\n\nFirst, in the `DMM` constructor we add", "_____no_output_____" ] ], [ [ "iafs = [InverseAutoregressiveFlow(z_dim, iaf_dim) for _ in range(num_iafs)]\nself.iafs = nn.ModuleList(iafs)", "_____no_output_____" ] ], [ [ "This instantiates `num_iafs` many normalizing flows of the `InverseAutoregressiveFlow` type (see references [3,4]); each normalizing flow will have `iaf_dim` many hidden units. We then bundle the normalizing flows in a `nn.ModuleList`; this is just the PyTorchy way to package a list of `nn.Module`s. Next, in the guide we add the lines", "_____no_output_____" ] ], [ [ "if self.iafs.__len__() > 0:\n z_dist = TransformedDistribution(z_dist, self.iafs)", "_____no_output_____" ] ], [ [ "Here we're taking the base distribution `z_dist`, which in our case is a conditional gaussian distribution, and using the `TransformedDistribution` construct we transform it into a non-gaussian distribution that is, by construction, richer than the base distribution. Voila!", "_____no_output_____" ], [ "### Checkpointing\n\nIf we want to recover from a catastrophic failure in our training loop, there are two kinds of state we need to keep track of. The first is the various parameters of the model and guide. The second is the state of the optimizers (e.g. in Adam this will include the running average of recent gradient estimates for each parameter).\n\nIn Pyro, the parameters can all be found in the `ParamStore`. However, Pytorch also keeps track of them for us via the `parameters()` method of `nn.Module`. So one simple way we can save the parameters of the model and guide is to make use of the `state_dict()` method of `dmm` in conjunction with `torch.save()`; see below. In the case that we have `InverseAutoregressiveFlow`'s in the loop, this is in fact the only option at our disposal. This is because the `InverseAutoregressiveFlow` module contains what are called 'persistent buffers' in PyTorch parlance. These are things that carry state but are not `Parameter`s. The `state_dict()` and `load_state_dict()` methods of `nn.Module` know how to deal with buffers correctly.\n\nTo save the state of the optimizers, we have to use functionality inside of `pyro.optim.PyroOptim`. Recall that the typical user never interacts directly with PyTorch `Optimizers` when using Pyro; since parameters can be created dynamically in an arbitrary probabilistic program, Pyro needs to manage `Optimizers` for us. In our case saving the optimizer state will be as easy as calling `optimizer.save()`. The loading logic is entirely analagous. So our entire logic for saving and loading checkpoints only takes a few lines:", "_____no_output_____" ] ], [ [ " # saves the model and optimizer states to disk\n def save_checkpoint():\n log(\"saving model to %s...\" % args.save_model)\n torch.save(dmm.state_dict(), args.save_model)\n log(\"saving optimizer states to %s...\" % args.save_opt)\n optimizer.save(args.save_opt)\n log(\"done saving model and optimizer checkpoints to disk.\")\n\n # loads the model and optimizer states from disk\n def load_checkpoint():\n assert exists(args.load_opt) and exists(args.load_model), \\\n \"--load-model and/or --load-opt misspecified\"\n log(\"loading model from %s...\" % args.load_model)\n dmm.load_state_dict(torch.load(args.load_model))\n log(\"loading optimizer states from %s...\" % args.load_opt)\n optimizer.load(args.load_opt)\n log(\"done loading model and optimizer states.\")", "_____no_output_____" ] ], [ [ "## Some final comments\n\nA deep markov model is a relatively complex model. Now that we've taken the effort to implement a version of the deep markov model tailored to the polyphonic music dataset, we should ask ourselves what else we can do. What if we're handed a different sequential dataset? Do we have to start all over?\n\nNot at all! The beauty of probalistic programming is that it enables&mdash;and encourages&mdash;modular approaches to modeling and inference. Adapting our polyphonic music model to a dataset with continuous observations is as simple as changing the observation likelihood. The vast majority of the code could be taken over unchanged. This means that with a little bit of extra work, the code in this tutorial could be repurposed to enable a huge variety of different models. \n\n## References\n\n[1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp;\n Rahul G. Krishnan, Uri Shalit, David Sontag\n \n[2] `Variational Inference with Normalizing Flows`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nDanilo Jimenez Rezende, Shakir Mohamed \n \n[3] `Improving Variational Inference with Inverse Autoregressive Flow`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nDiederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling \n\n[4] `MADE: Masked Autoencoder for Distribution Estimation Mathieu`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nGermain, Karol Gregor, Iain Murray, Hugo Larochelle \n\n[5] `Modeling Temporal Dependencies in High-Dimensional Sequences:`\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\n`Application to Polyphonic Music Generation and Transcription`,\n<br />&nbsp;&nbsp;&nbsp;&nbsp;\nBoulanger-Lewandowski, N., Bengio, Y. and Vincent, P.", "_____no_output_____" ] ] ]
[ "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "raw" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cbc4f3f440740a3988eddb6230915d1d1dfbe20b
8,732
ipynb
Jupyter Notebook
classical-systems/CS24_Two_Probabilistic_Bits.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
1
2021-08-15T10:57:16.000Z
2021-08-15T10:57:16.000Z
classical-systems/CS24_Two_Probabilistic_Bits.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
classical-systems/CS24_Two_Probabilistic_Bits.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
3
2021-08-11T11:12:38.000Z
2021-09-14T09:15:08.000Z
37.637931
309
0.523133
[ [ [ "<a href=\"https://qworld.net\" target=\"_blank\" align=\"left\"><img src=\"../qworld/images/header.jpg\" align=\"left\"></a>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\I}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & 1} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $\n$ \\newcommand{\\greenbit}[1] {\\mathbf{{\\color{green}#1}}} $\n$ \\newcommand{\\bluebit}[1] {\\mathbf{{\\color{blue}#1}}} $\n$ \\newcommand{\\redbit}[1] {\\mathbf{{\\color{red}#1}}} $\n$ \\newcommand{\\brownbit}[1] {\\mathbf{{\\color{brown}#1}}} $\n$ \\newcommand{\\blackbit}[1] {\\mathbf{{\\color{black}#1}}} $", "_____no_output_____" ], [ "<font style=\"font-size:28px;\" align=\"left\"><b>Two Probabilistic Bits </b></font>\n<br>\n_prepared by Abuzer Yakaryilmaz_\n<br><br>\n[<img src=\"../qworld/images/watch_lecture.jpg\" align=\"left\">](https://youtu.be/ulbd-1c71sk)\n<br><br><br>", "_____no_output_____" ], [ "Suppose that we have two probabilistic bits, and our probabilistic states respectively are\n\n$ \\myvector{0.2 \\\\ 0.8} \\mbox{ and } \\myvector{0.6 \\\\ 0.4 }. $\n\nIf we combine both bits as a single system, then what is the state of the combined system?", "_____no_output_____" ], [ "In total, we have four different states. We can name them as follows:\n<ul>\n <li>00: both bits are in states 0</li>\n <li>01: the first bit is in state 0 and the second bit is in state 1</li>\n <li>10: the first bit is in state 1 and the second bit is in state 0</li>\n <li>11: both bits are in states 1</li>\n</ul>", "_____no_output_____" ], [ "<h3> Task 1 </h3>\n\n<b>Discussion and analysis:</b>\n\nWhat are the probabilities of being in states $ 00 $, $ 01 $, $ 10 $, and $11$?\n\nHow can we represent these probabilities as a column vector?", "_____no_output_____" ], [ "<h3> Representation for states 0 and 1</h3>\n\nThe vector representation of state 0 is $ \\myvector{1 \\\\ 0} $. Similarly, the vector representation of state 1 is $ \\myvector{0 \\\\ 1} $.\n\nWe use $ \\pstate{0} $ to represent $ \\myvector{1 \\\\ 0} $ and $ \\pstate{1} $ to represent $ \\myvector{0 \\\\ 1} $.\n\nThen, the probabilistic state $ \\myvector{0.2 \\\\ 0.8} $ is also represented as $ 0.2 \\pstate{0} + 0.8 \\pstate{1} $.\n\nSimilarly, the probabilistic state $ \\myvector{0.6 \\\\ 0.4} $ is also represented as $ 0.6 \\pstate{0} + 0.4 \\pstate{1} $.", "_____no_output_____" ], [ "<h3> Composite systems </h3>\n\nWhen two systems are composed, then their states are tensored to calculate the state of composite system.\n\nThe probabilistic state of the first bit is $ \\myvector{0.2 \\\\ 0.8} = 0.2 \\pstate{0} + 0.8 \\pstate{1} $.\n\nThe probabilistic state of the second bit is $ \\myvector{0.6 \\\\ 0.4} = 0.6 \\pstate{0} + 0.4 \\pstate{1} $.\n\nThen, the probabilistic state of the composite system is $ \\big( 0.2 \\pstate{0} + 0.8 \\pstate{1} \\big) \\otimes \\big( 0.6 \\pstate{0} + 0.4 \\pstate{1} \\big) $.\n", "_____no_output_____" ], [ "<h3> Task 2 </h3>\n\nFind the probabilistic state of the composite system.\n\n<i> \nRule 1: Tensor product distributes over addition in the same way as the distribution of multiplication over addition.\n\nRule 2: $ \\big( 0.3 \\pstate{1} \\big) \\otimes \\big( 0.7 \\pstate{0} \\big) = (0.3 \\cdot 0.7) \\big( \\pstate{1} \\otimes \\pstate{0} \\big) = 0.21 \\pstate{10} $.\n</i>", "_____no_output_____" ], [ "<a href=\"CS24_Two_Probabilistic_Bits_Solutions.ipynb#task2\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 3</h3>\n\nFind the probabilistic state of the composite system by calculating this tensor product $ \\myvector{0.2 \\\\ 0.8} \\otimes \\myvector{0.6 \\\\ 0.4 } $.", "_____no_output_____" ], [ "<a href=\"CS24_Two_Probabilistic_Bits_Solutions.ipynb#task3\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 4</h3>\n\nFind the vector representations of $ \\pstate{00} $, $ \\pstate{01} $, $\\pstate{10}$, and $ \\pstate{11} $.\n\n<i>The vector representation of $ \\pstate{ab} $ is $ \\pstate{a} \\otimes \\pstate{b} $ for $ a,b \\in \\{0,1\\} $.</i>", "_____no_output_____" ], [ "<a href=\"CS24_Two_Probabilistic_Bits_Solutions.ipynb#task4\">click for our solution</a>", "_____no_output_____" ], [ "---\n\n<h3> Extra: Task 5 </h3>\n\nSuppose that we have three bits.\n\nFind the vector representations of $ \\pstate{abc} $ for each $ a,b,c \\in \\{0,1\\} $.", "_____no_output_____" ], [ "<h3> Extra: Task 6 </h3>\n\n<i>This task is challenging.</i>\n\nSuppose that we have four bits. \n\nNumber 9 is represented as $ 1001 $ in binary. Verify that the vector representation of $ \\pstate{1001} $ is the zero vector except its $10$th entry, which is 1.\n\nNumber 7 is represented as $ 0111 $ in binary. Verify that the vector representation of $ \\pstate{0111} $ is the zero vector except its $8$th entry, which is 1.\n\nGeneralize this idea for any number between 0 and 15.\n\nGeneralize this idea for any number of bits.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cbc4fa3c0d48fe1db5a9d6b9b356211f3a67ecbf
206,259
ipynb
Jupyter Notebook
talk-executed.ipynb
brandonwarren/intro-to-qubit
b70a69d4453dc78c8f48aae61181b87062c45322
[ "Apache-2.0" ]
1
2020-02-09T00:58:11.000Z
2020-02-09T00:58:11.000Z
talk-executed.ipynb
brandonwarren/intro-to-qubit
b70a69d4453dc78c8f48aae61181b87062c45322
[ "Apache-2.0" ]
null
null
null
talk-executed.ipynb
brandonwarren/intro-to-qubit
b70a69d4453dc78c8f48aae61181b87062c45322
[ "Apache-2.0" ]
null
null
null
166.070048
12,804
0.888199
[ [ [ "# Introduction to the Quantum Bit\n### Where we'll explore:\n* **Quantum Superposition**\n* **Quantum Entanglement**\n* **Running experiments on a laptop-hosted simulator**\n* **Running experiments on a real quantum computer**\n\n### Brandon Warren\n### SDE, Zonar Systems\ngithub.com/brandonwarren/intro-to-qubit contains this Jupyter notebook and installation tips.", "_____no_output_____" ] ], [ [ "import py_cas_slides as slides", "loaded\n" ], [ "# real 6-qubit quantum computer, incl interface electronics\nslides.system()", "_____no_output_____" ], [ "# import QISkit, define function to set backend that will execute our circuits\n\nHISTO_SIZE = (9,4) # width, height in inches\nCIRCUIT_SIZE = 1.0 # scale (e.g. 0.5 is half-size)\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute\nfrom qiskit import BasicAer as Aer\nfrom qiskit.tools.visualization import plot_histogram\nfrom qiskit import __qiskit_version__\n\nprint(__qiskit_version__)\n\ndef set_backend(use_simulator: bool, n_qubits: int, preferred_backend: str=''):\n if use_simulator:\n backend = Aer.get_backend('qasm_simulator')\n else:\n from qiskit import IBMQ\n provider = IBMQ.load_account()\n if preferred_backend:\n # use backend specified by caller\n backend = provider.get_backend(preferred_backend)\n print(f\"Using {backend.name()}\")\n else:\n # use least-busy backend that has enough qubits\n from qiskit.providers.ibmq import least_busy\n large_enough_devices = provider.backends(filters=lambda x: x.configuration().n_qubits >= n_qubits and not x.configuration().simulator)\n backend = least_busy(large_enough_devices)\n print(f\"The best backend is {backend.name()}\")\n return backend\n\ndef add_missing_keys(counts):\n # we want all keys present in counts, even if they are zero value\n for key in ['00', '01', '10', '11']:\n if key not in counts:\n counts[key] = 0", "{'qiskit-terra': '0.11.1', 'qiskit-aer': '0.3.4', 'qiskit-ignis': '0.2.0', 'qiskit-ibmq-provider': '0.4.5', 'qiskit-aqua': '0.6.2', 'qiskit': '0.14.1'}\n" ], [ "# use simulator for now\nbackend = set_backend(use_simulator=True, n_qubits=2)", "_____no_output_____" ], [ "# write code to build this quantum circuit\n# logic flows left to right\n# quantum bits begin in ground state (zero)\n# measurement copies result to classical bit\nslides.simple_2qubits() # simplest possible 2-qubit circuit", "_____no_output_____" ], [ "# 1. Build simplest possible 2-qubit quantum circuit and draw it\n\nq_reg = QuantumRegister(2, 'q') # the 2 qubits we'll be using\nc_reg = ClassicalRegister(2, 'c') # clasical bits to hold results of measurements\n\ncircuit = QuantumCircuit(q_reg, c_reg) # begin circuit - just 2 qubits and 2 classical bits\n\n# measure while still in ground state\ncircuit.measure(q_reg, c_reg) # measure qubits, place results in classical bits\n\n# circuit is now complete\ncircuit.draw(output='mpl', scale=CIRCUIT_SIZE)", "_____no_output_____" ], [ "# run it 1000 times on simulator\nresult = execute(circuit, backend=backend, shots=1000).result()\ncounts = result.get_counts(circuit)\nprint(counts)\nadd_missing_keys(counts)\nprint(counts)\nplot_histogram(counts, figsize=HISTO_SIZE)", "{'00': 1000}\n{'00': 1000, '01': 0, '10': 0, '11': 0}\n" ], [ "# 2. Apply X gate (NOT gate) to high qubit (q1)\n\nq_reg = QuantumRegister(2, 'q')\nc_reg = ClassicalRegister(2, 'c')\n\ncircuit = QuantumCircuit(q_reg, c_reg)\n\n###### apply X gate to high qubit ######\ncircuit.x(q_reg[1])\n\ncircuit.measure(q_reg, c_reg)\n\ncircuit.draw(output='mpl', scale=CIRCUIT_SIZE)", "_____no_output_____" ], [ "# run it 1000 times on simulator\nresult = execute(circuit, backend=backend, shots=1000).result()\ncounts = result.get_counts(circuit)\nprint(counts)\nadd_missing_keys(counts)\nplot_histogram(counts, figsize=HISTO_SIZE)", "{'10': 1000}\n" ], [ "# We've seen the two simplest quantum circuits possible.\n# Let's take it up a notch and place each qubit into a quantum superposition.\n# ?\nslides.super_def()", "_____no_output_____" ], [ "# Like you flip a coin - while it is spinning it is H and T.\n# When you catch it, it is H or T. \n# BUT: it is as if it was that way all along. \n# What's the difference between that, and a coin under a\n# piece of paper that is revealed?\nslides.feynman_quote()", "_____no_output_____" ], [ "slides.double_slit()\n# (2)", "_____no_output_____" ], [ "# Like the photon that is in 2 places at once, the qubit can\n# be in 2 states at once, and become 0 or 1 when it is measured.\n\n# Let's place our 2 qubits in superposion and measure them.\n# The act of measurement collapses the superposition,\n# resulting in 1 of the 2 possible values.\n\n# H - Hadamard will turn our 0 into a superposition of 0 and 1.\n# It rotates the state of the qubit.\n# (coin over table analogy)\n\n# 3. Apply H gate to both qubits\n\nq_reg = QuantumRegister(2, 'q')\nc_reg = ClassicalRegister(2, 'c')\n\ncircuit = QuantumCircuit(q_reg, c_reg)\n\n###### apply H gate to both qubits ######\ncircuit.h(q_reg[0])\ncircuit.h(q_reg[1])\n\ncircuit.measure(q_reg, c_reg)\n\ncircuit.draw(output='mpl', scale=CIRCUIT_SIZE)", "_____no_output_____" ], [ "# histo - 2 bits x 2 possibilities = 4 combinations of equal probability\nresult = execute(circuit, backend=backend, shots=1000).result()\ncounts = result.get_counts(circuit)\nprint(counts)\nadd_missing_keys(counts)\nplot_histogram(counts, figsize=HISTO_SIZE)\n# TRUE random numbers! (when run on real device)", "{'00': 245, '01': 262, '10': 228, '11': 265}\n" ], [ "# Special case of superposition, entanglement, revealed by EPR expmt\nslides.mermin_quote()", "_____no_output_____" ], [ "# Before we get to that, i'd like to set the stage by intro\n# 2 concepts: locality and hidden variables.\n# The principle of locality says that for one thing to affect\n# another, they have to be in the same location, or need some\n# kind of field or signal connecting the two, with\n# the fastest possible propagation speed being that of light. \n# This even applies to gravity, which prop at the speed of light.\n# [We are 8 light-minutes from the Sun, so if the Sun all of a\n# sudden vanished somehow, we would still orbit for another 8 min.]\n# \n# Even though Einstein helped launch the new field of QM, he never\n# really liked it. In particular, he couln't accept the randomness.\nslides.einstein_dice()", "_____no_output_____" ], [ "slides.bohr_response()", "_____no_output_____" ], [ "# (3)\nslides.epr_nyt()", "_____no_output_____" ], [ "# (4)\nslides.einstein_vs_bohr()", "_____no_output_____" ], [ "# [Describe entanglement using coins odd,even]\n# 4. Entanglement - even-parity\n\nq_reg = QuantumRegister(2, 'q')\nc_reg = ClassicalRegister(2, 'c')\n\ncircuit = QuantumCircuit(q_reg, c_reg)\n\n###### place q[0] in superposition ######\ncircuit.h(q_reg[0])\n\n###### CNOT gate - control=q[0] target=q[1] - places into even-parity Bell state\n# Target is inverted if control is true\ncircuit.cx(q_reg[0], q_reg[1])\n\ncircuit.measure(q_reg, c_reg)\n\ncircuit.draw(output='mpl', scale=CIRCUIT_SIZE)", "_____no_output_____" ], [ "result = execute(circuit, backend=backend, shots=1000).result()\ncounts = result.get_counts(circuit)\nprint(counts)\nadd_missing_keys(counts)\nplot_histogram(counts, figsize=HISTO_SIZE)", "{'00': 507, '11': 493}\n" ], [ "# 5. Entanglement - odd-parity\n\nq_reg = QuantumRegister(2, 'q')\nc_reg = ClassicalRegister(2, 'c')\n\ncircuit = QuantumCircuit(q_reg, c_reg)\n\n###### place q[0] in superposition ######\ncircuit.h(q_reg[0])\n\n###### CNOT gate - control=q[0] target=q[1] - places into even-parity Bell state\n# Target is inverted if control is true\ncircuit.cx(q_reg[0], q_reg[1])\n\n# a 0/1 superposition is converted to a 1/0 superposition\n# i.e. rotates state 180 degrees\n# creates odd-parity entanglement\ncircuit.x(q_reg[0])\n\ncircuit.measure(q_reg, c_reg)\n\ncircuit.draw(output='mpl', scale=CIRCUIT_SIZE)", "_____no_output_____" ], [ "result = execute(circuit, backend=backend, shots=1000).result()\ncounts = result.get_counts(circuit)\nprint(counts)\nadd_missing_keys(counts)\nplot_histogram(counts, figsize=HISTO_SIZE)", "{'01': 512, '10': 488}\n" ], [ "# (5)\nslides.Bell_CHSH_inequality()", "_____no_output_____" ], [ "# Let's run the Bell expmt on a real device.\n# This will not be a simulation!\n# backend = set_backend(use_simulator=False, n_qubits=2) # 1st avail is RISKY\nbackend = set_backend(use_simulator=False, n_qubits=2, preferred_backend='ibmq_ourense')", "Using ibmq_ourense\n" ], [ "# [quickly: draw circuits, execute, then go over code and circuits]\n\n# 6. Bell experiment\n\nimport numpy as np\n\n# Define the Quantum and Classical Registers\nq = QuantumRegister(2, 'q')\nc = ClassicalRegister(2, 'c')\n\n# create Bell state\nbell = QuantumCircuit(q, c)\nbell.h(q[0]) # place q[0] in superposition\nbell.cx(q[0], q[1]) # CNOT gate - control=q[0] target=q[1] - places into even-parity Bell state\n\n# setup measurement circuits\n\n# ZZ not used for Bell inequality, but interesting for real device (i.e. not perfect)\nmeas_zz = QuantumCircuit(q, c)\nmeas_zz.barrier()\nmeas_zz.measure(q, c)\n\n# ZW: A=Z=0° B=W=45°\nmeas_zw = QuantumCircuit(q, c)\nmeas_zw.barrier()\nmeas_zw.s(q[1])\nmeas_zw.h(q[1])\nmeas_zw.t(q[1])\nmeas_zw.h(q[1])\nmeas_zw.measure(q, c)\n\n# ZV: A=Z=0° B=V=-45°\nmeas_zv = QuantumCircuit(q, c)\nmeas_zv.barrier()\nmeas_zv.s(q[1])\nmeas_zv.h(q[1])\nmeas_zv.tdg(q[1])\nmeas_zv.h(q[1])\nmeas_zv.measure(q, c)\n\n# XW: A=X=90° B=W=45°\nmeas_xw = QuantumCircuit(q, c)\nmeas_xw.barrier()\nmeas_xw.h(q[0])\nmeas_xw.s(q[1])\nmeas_xw.h(q[1])\nmeas_xw.t(q[1])\nmeas_xw.h(q[1])\nmeas_xw.measure(q, c)\n\n# XV: A=X=90° B=V=-45° - instead of being 45° diff,\n# they are 90°+45°=135° = 180°-45°,\n# which is why the correlation is negative and we negate it\n# before adding the the rest of the correlations.\nmeas_xv = QuantumCircuit(q, c)\nmeas_xv.barrier()\nmeas_xv.h(q[0])\nmeas_xv.s(q[1])\nmeas_xv.h(q[1])\nmeas_xv.tdg(q[1])\nmeas_xv.h(q[1])\nmeas_xv.measure(q, c)\n\n# build circuits\ncircuits = []\nlabels = []\nab_labels = []\ncircuits.append(bell + meas_zz)\nlabels.append('ZZ')\nab_labels.append(\"\") # not used\ncircuits.append(bell + meas_zw)\nlabels.append('ZW')\nab_labels.append(\"<AB>\")\ncircuits.append(bell + meas_zv)\nlabels.append('ZV')\nab_labels.append(\"<AB'>\")\ncircuits.append(bell + meas_xw)\nlabels.append('XW')\nab_labels.append(\"<A'B>\")\ncircuits.append(bell + meas_xv)\nlabels.append('XV')\nab_labels.append(\"<A'B'>\")\n\nprint(\"Circuit to measure ZZ (A=Z=0° B=Z=0°) - NOT part of Bell expmt\")\ncircuits[0].draw(output='mpl', scale=CIRCUIT_SIZE)", "Circuit to measure ZZ (A=Z=0° B=Z=0°) - NOT part of Bell expmt\n" ], [ "print(\"Circuit to measure ZW (A=Z=0° B=W=45°)\")\nprint(\"The gates to the right of the vertical bar rotate the measurement axis.\")\ncircuits[1].draw(output='mpl', scale=CIRCUIT_SIZE)", "Circuit to measure ZW (A=Z=0° B=W=45°)\nThe gates to the right of the vertical bar rotate the measurement axis.\n" ], [ "print(\"Circuit to measure ZV (A=Z=0° B=V=-45°)\")\ncircuits[2].draw(output='mpl', scale=CIRCUIT_SIZE)", "Circuit to measure ZV (A=Z=0° B=V=-45°)\n" ], [ "print(\"Circuit to measure XW (A=X=90° B=W=45°)\")\ncircuits[3].draw(output='mpl', scale=CIRCUIT_SIZE)", "Circuit to measure XW (A=X=90° B=W=45°)\n" ], [ "print(\"Circuit to meas XV (A=X=90° B=V=-45°) (negative correlation)\")\ncircuits[4].draw(output='mpl', scale=CIRCUIT_SIZE)", "Circuit to meas XV (A=X=90° B=V=-45°) (negative correlation)\n" ], [ "# execute, then review while waiting\n\nfrom datetime import datetime, timezone\nimport time\n\n# execute circuits\nshots = 1024\njob = execute(circuits, backend=backend, shots=shots)\nprint('after call execute()')\n\nif backend.name() != 'qasm_simulator':\n try:\n info = None\n max_tries = 3\n while max_tries>0 and not info:\n time.sleep(1) # need to wait a little bit before calling queue_info()\n info = job.queue_info()\n print(f'queue_info: {info}')\n max_tries -= 1\n now_utc = datetime.now(timezone.utc)\n print(f'\\njob status: {info._status} as of {now_utc.strftime(\"%H:%M:%S\")} UTC')\n print(f'position: {info.position}')\n print(f'estimated start time: {info.estimated_start_time.strftime(\"%H:%M:%S\")}')\n print(f'estimated complete time: {info.estimated_complete_time.strftime(\"%H:%M:%S\")}')\n wait_time = info.estimated_complete_time - now_utc\n wait_min, wait_sec = divmod(wait_time.seconds, 60)\n print(f'estimated wait time is {wait_min} minutes {wait_sec} seconds')\n except Exception as err:\n print(f'error getting job info: {err}')\n\nresult = job.result() # blocks until complete\nprint(f'job complete as of {datetime.now(timezone.utc).strftime(\"%H:%M:%S\")} UTC')\n\n# gather data\ncounts = []\nfor i, label in enumerate(labels):\n circuit = circuits[i]\n data = result.get_counts(circuit)\n counts.append(data)\n\n# show counts of Bell state measured in Z-axis\nprint('\\n', labels[0], counts[0], '\\n')\n\n# show histogram of Bell state measured in Z-axis\n# real devices are not yet perfect. due to noise.\nadd_missing_keys(counts[0])\nplot_histogram(counts[0], figsize=HISTO_SIZE)", "after call execute()\nqueue_info: None\nqueue_info: QueueInfo(_status='PENDING_IN_QUEUE', estimated_complete_time=datetime.datetime(2020, 2, 6, 18, 33, 48, 186000, tzinfo=datetime.timezone.utc), estimated_start_time=datetime.datetime(2020, 2, 6, 18, 33, 11, 666000, tzinfo=datetime.timezone.utc), group_priority=1.0, hub_priority=0.290388, position=12, project_priority=1.0)\n\njob status: PENDING_IN_QUEUE as of 16:53:18 UTC\nposition: 12\nestimated start time: 18:33:11\nestimated complete time: 18:33:48\nestimated wait time is 100 minutes 29 seconds\njob complete as of 18:05:42 UTC\n\n ZZ {'00': 493, '01': 25, '10': 32, '11': 474} \n\n" ], [ "# tabular output\nprint(' (+) (+) (-) (-)')\nprint(' P(00) P(11) P(01) P(10) correlation')\nC = 0.0\nfor i in range(1, len(labels)):\n AB = 0.0\n print(f'{labels[i]} ', end ='')\n N = 0\n for out in ('00', '11', '01', '10'):\n P = counts[i][out]/float(shots)\n N += counts[i][out]\n if out in ('00', '11'):\n AB += P\n else:\n AB -= P\n print(f'{P:.3f} ', end='')\n if N != shots:\n print(f'ERROR: N={N} shots={shots}')\n print(f'{AB:6.3f} {ab_labels[i]}')\n if labels[i] == 'XV':\n # the negative correlation - make it positive before summing it\n C -= AB\n else:\n C += AB\n\nprint(f\"\\nC = <AB> + <AB'> + <A'B> - <A'B'>\")\nprint(f' = <ZW> + <ZV> + <XW> - <XV>')\nprint(f' = {C:.2f}\\n')\n\nif C <= 2.0:\n print(\"Einstein: 1 Quantum theory: 0\")\nelse:\n print(\"Einstein: 0 Quantum theory: 1\")", " (+) (+) (-) (-)\n P(00) P(11) P(01) P(10) correlation\nZW 0.339 0.446 0.062 0.152 0.570 <AB>\nZV 0.473 0.352 0.127 0.049 0.648 <AB'>\nXW 0.340 0.508 0.090 0.062 0.695 <A'B>\nXV 0.131 0.117 0.446 0.306 -0.504 <A'B'>\n\nC = <AB> + <AB'> + <A'B> - <A'B'>\n = <ZW> + <ZV> + <XW> - <XV>\n = 2.42\n\nEinstein: 0 Quantum theory: 1\n" ] ], [ [ "## Superposition and entanglement main points\n* Superposition is demonstrated by the double-slit experiment, which suggests that a photon can be in two positions at once, because the interference pattern only forms if two photons interfere with each other, and it forms even if we send one photon at a time.\n\n* Hidden variable theories seek to provide determinism to quantum physics.\n\n* The principle of locality states that an influence of one particle on another cannot propagate faster than the speed of light.\n\n* Entanglement cannot be explained by local hidden variable theories.\n\n## Summary\n* Two of the strangest concepts in quantum physics, superposition and entanglement, are used in quantum computing, and are waiting to be explored by you.\n\n* You can run simple experiments on your laptop, and when you're ready, run them on a real quantum computer, over the cloud, for free.\n\n* IBM's qiskit.org contains software, tutorials, and an active Slack community.\n\n* My Github repo includes this presentation, tips on installing IBM's Qiskit on your laptop, and links for varying levels of explanations of superpositions and entanglements:\ngithub.com/brandonwarren/intro-to-qubit\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cbc4fb366befc1b6f34f39080d7b24a22b7acfa9
19,982
ipynb
Jupyter Notebook
Python/1. Python Basics/Notebooks/3. Classes and OOP/Objects_Classes_(some more primes fun).ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
null
null
null
Python/1. Python Basics/Notebooks/3. Classes and OOP/Objects_Classes_(some more primes fun).ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
null
null
null
Python/1. Python Basics/Notebooks/3. Classes and OOP/Objects_Classes_(some more primes fun).ipynb
okara83/Becoming-a-Data-Scientist
f09a15f7f239b96b77a2f080c403b2f3e95c9650
[ "MIT" ]
2
2022-02-09T15:41:33.000Z
2022-02-11T07:47:40.000Z
19,982
19,982
0.613352
[ [ [ "# Objects and Classes\n\n---\n\n## The baisc idea is to capture the atributes of an object (plane, matrix, pet, ...) in an abstract description, along with the methods to interact with such objects.\n\n>> ## This abstract description is what we call a class\n\n\n\n---\n\n## Specific instances of a class are captured as objects.\n\n>> convention is tht class names are specificed with capital letters", "_____no_output_____" ] ], [ [ "class Complex:\n def __init__(self, realpart, imagpart):\n self.r = realpart\n self.i = imagpart\n \nx = Complex(3.0, -4.5)", "_____no_output_____" ], [ "x.r, x.i", "_____no_output_____" ] ], [ [ "Try to write a class that takes in a point as an object. three-space", "_____no_output_____" ] ], [ [ "class Point3D:\n def __init__(self, x, y, z):\n \"\"\"Initialize a point in a three dimensional plane of real values\"\"\"\n self.x = x\n self.y = y\n self.z = z\n\n def distance(self, point):\n \"\"\"Compute Distance to Another Point\"\"\"\n d = (\n (self.x - point.x) ** 2 + (self.y - point.y) ** 2 + (self.z - point.z) ** 2\n ) ** 0.5\n return d\n\n def shiftedPoint(self, shx, shy, shz):\n \"\"\"shift point by specified offset\"\"\"\n newx = self.x + shx\n newy = self.y + shy\n newz = self.x + shz\n\n return Point3D(newx, newy, newz)\n", "_____no_output_____" ], [ "p = Point3D(0,0,1)\nq = Point3D(0,0,2)\n\np.distance(q)", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "q = p.shiftedPoint(42,0,5)\nq.x", "_____no_output_____" ], [ "def Euclidean_GCD(a, b):\n while b != 0:\n t = b\n b = a % b\n a = t\n return a\n\n\nclass Rational:\n def __init__(self, n, d):\n \"\"\"construct a rational number in the lowest term\"\"\"\n if d == 0:\n raise ZeroDivisionError(\"Denominator of rational may not be zero.\")\n else:\n g = Euclidean_GCD(n, d)\n self.n = n / g\n self.d = d / g\n\n def __add__(self, other):\n \"\"\"add two rational numbers\"\"\"\n return Rational(self.n * other.d + other.n * self.d, self.d * other.d)\n\n def __sub__(self, other):\n \"\"\"subtract two rational numbers\"\"\"\n return Rational(self.n * other.d - other.n * self.d, self.d * other.d)\n\n def __mul__(self, other):\n \"\"\"multiply two rational numbers\"\"\"\n return Rational(self.n * other.n, self.d * other.d)\n\n def __div__(self, other):\n \"\"\"divide two rational numbers\"\"\"\n return Rational(self.n * other.d, self.d * other.n)\n\n def __eq__(self, other):\n \"\"\"check if two rational numbers are equivalent\"\"\"\n if self.n * other.d == other.n * self.d:\n return True\n else:\n return False\n\n def __str__(self):\n \"\"\"convert fraction to string\"\"\"\n return str(self.n) + \"/\" + str(self.d)\n\n def __repr__(self):\n \"\"\"returns a valid python description of a fraction\"\"\"\n\n return \"Rational(\" + str(int(self.n)) + \",\" + str(int(self.d)) + \")\"\n\n def __le__(self):\n \"\"\"<= for fractions\"\"\"\n self_float = self.n / self.d\n other_float = other.n / other.d\n\n if self.n * other.d <= other.n * self.d:\n return True\n else:\n return False\n", "_____no_output_____" ], [ "peter=Rational(1,2)", "_____no_output_____" ], [ "print(peter)", "1.0/2.0\n" ], [ "petra = Rational(1,2)\npeter = Rational(2,4)\nalice = Rational(3,5)\n\npetra == peter", "_____no_output_____" ], [ "petra == alice", "_____no_output_____" ], [ "alice + petra == alice + peter", "_____no_output_____" ], [ "petra - alice == alice - peter", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "# Iterators in Python\n\n---\n\n## To iterate over an an object in Python wiht a for-loop, the following steps are performed:\n\n\n>>**1. Derive an assoicated iterator by applying iter() to the object**\n\n>> **2. The next function is applied to the iterator until a stop iteration exception occurs**\n\n", "_____no_output_____" ] ], [ [ "a = 'Hey there'\n\naa = iter(a)\n\naa", "_____no_output_____" ], [ "type(a)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "next(aa)", "_____no_output_____" ], [ "class SmallMatrix:\n def __init__(self, m11, m12, m21, m22):\n self.row1 = (\n m11,\n m12,\n )\n self.row2 = (\n m21,\n m22,\n )\n\n def __str__(self):\n \"\"\"convert fraction to string\"\"\"\n row1_string = str(self.row1[0]) + \" \" + str(self.row1[1])\n row2_string = str(self.row2[0]) + \" \" + str(self.row1[1])\n return row1_string + \"\\n\" + row2_string\n\n def __iter__(self):\n self._counter = 0 # common conventon in python code. A single underscore means for private use only\n return self\n\n def __next__(self):\n\n if self._counter == 0:\n self_counter += 1\n return self.row1[0]\n\n if self._counter == 1:\n self_counter += 1\n return self.row1[1]\n\n if self._counter == 2:\n self_counter += 1\n return self.row2[0]\n\n if self._counter == 3:\n self_counter += 1\n return self.row2[1]\n\n raise StopIteration\n", "_____no_output_____" ], [ "a = SmallMatrix(42, 0, 9, 18)\nfor i in a.row1:\n print(i)", "42\n0\n" ] ], [ [ "# Generators in Python\n\n---\n\n## Often, we can work with a generator which saves us from implementing __next__ and __iter__. Generators look just like functions, but instead of \"return\" they use yeild. When a generator is called repeatedly. It continues after the yeild statement, maintaining all values from the prior call.\n\n", "_____no_output_____" ] ], [ [ "def squares():\n a = 0\n while True:\n yield a * a\n a+=1", "_____no_output_____" ], [ "g = squares()", "_____no_output_____" ], [ "next(g)", "_____no_output_____" ], [ "next(g)", "_____no_output_____" ], [ "next(g)", "_____no_output_____" ], [ "next(g)", "_____no_output_____" ], [ "next(g)", "_____no_output_____" ], [ "[next(g) for i in range(50)]", "_____no_output_____" ], [ "def is_prime(m):\n \"\"\"return True if and only if n is a prime number\"\"\"\n n = abs(m)\n if n == 0 or n == 1 or (n % 2 == 0 and n > 2):\n return False\n\n for i in range(3, int(n ** (1 / 2) + 1), 2):\n if n % i == 0:\n return False\n return True", "_____no_output_____" ], [ "def Endless_Primes():\n yield 2\n n += 3\n while True:\n if isprime(n):\n yield n\n n += 12", "_____no_output_____" ], [ "def twinprimes(b):\n a = 3\n while True:\n if is_prime(a) == True and is_prime(b) == True:\n yield a + b\n a += 2", "_____no_output_____" ], [ "[next(g) for i in range (20)]", "_____no_output_____" ], [ "k = twinprimes(3)\nx=3", "_____no_output_____" ], [ "[next(k) for i in range (x)] ### this runs super long for x>3", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc5040ed833ff781174b13d6e87967078acbd76
175,707
ipynb
Jupyter Notebook
src/pytorch/Cup.ipynb
luca-demartino/machine-learning-project
e98b8b4ac09fdff331bacb2a73377eb4e5588543
[ "MIT" ]
null
null
null
src/pytorch/Cup.ipynb
luca-demartino/machine-learning-project
e98b8b4ac09fdff331bacb2a73377eb4e5588543
[ "MIT" ]
null
null
null
src/pytorch/Cup.ipynb
luca-demartino/machine-learning-project
e98b8b4ac09fdff331bacb2a73377eb4e5588543
[ "MIT" ]
null
null
null
92.819334
19,492
0.693854
[ [ [ "import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.optim as optim\nfrom sklearn.model_selection import train_test_split\nfrom torch.autograd import Variable\nfrom tqdm import trange", "_____no_output_____" ], [ "sys.path.append('/home/raffaele/Documents/ml-project/src/')", "_____no_output_____" ] ], [ [ "## Import Dataset", "_____no_output_____" ] ], [ [ "data = np.genfromtxt('/home/raffaele/Documents/ml-project/cup/ML-CUP20-TR.csv', delimiter=',', dtype=np.float32)\nX = data[:, 1:-2]\ny = data[:, -2:]", "_____no_output_____" ], [ "print(X.shape)\nprint(y.shape)", "(1524, 10)\n(1524, 2)\n" ] ], [ [ "### Split train set and Validation Set", "_____no_output_____" ] ], [ [ "Xtrain, Xval, ytrain, yval = train_test_split(X, y, test_size=0.10, random_state=42)", "_____no_output_____" ], [ "print(Xtrain.shape)\nprint(ytrain.shape)\nprint(Xval.shape)\nprint(yval.shape)", "(1371, 10)\n(1371, 2)\n(153, 10)\n(153, 2)\n" ], [ "BATCH_SIZE = len(Xtrain)", "_____no_output_____" ], [ "train_dataset = TensorDataset(torch.Tensor(Xtrain), torch.Tensor(ytrain))\ntrain_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle=True)\n# train_loader = DataLoader(train_dataset, shuffle=True)", "_____no_output_____" ] ], [ [ "## Define Models", "_____no_output_____" ] ], [ [ "class Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.input_layer = nn.Linear(10,100)\n self.hidden1 = nn.Linear(100,50)\n self.output = nn.Linear(50,2)\n\n def forward(self, x):\n x = torch.relu(self.input_layer(x))\n x = torch.relu(self.hidden1(x))\n x = self.output(x)\n return x", "_____no_output_____" ], [ "net = Net()\nprint(net)", "Net(\n (input_layer): Linear(in_features=10, out_features=100, bias=True)\n (hidden1): Linear(in_features=100, out_features=50, bias=True)\n (output): Linear(in_features=50, out_features=2, bias=True)\n)\n" ], [ "def train(net, optimizer, epochs=100, val_split=None):\n loss_list = []\n acc_list = []\n val_loss_list = []\n val_acc_list = []\n history = {\"loss\" : loss_list, \"acc\" : acc_list,\n \"val_loss\": val_loss_list, \"val_acc\" : val_acc_list}\n \n# optimizer = optim.SGD(net.parameters(),lr = 0.01,momentum = 0.)\n# criterion = nn.MSELoss()\n \n if (len(val_split) == 2):\n test_dataset = TensorDataset(torch.Tensor(val_split[0]), torch.Tensor(val_split[1]))\n test_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle=True)\n# test_loader = DataLoader(test_dataset, shuffle=True)\n \n for epoch in (t := trange(epochs)):\n for inputs, targets in train_loader:\n optimizer.zero_grad()\n out = net(inputs)\n loss = MEE(out, targets)\n# loss = nn.MSELoss(out, targets)\n loss.backward()\n optimizer.step()\n \n acc,_ = evaluate(net, train_loader, verbose=False)\n val_acc, val_loss = evaluate(net, test_loader, verbose=False, criterion=True)\n val_loss_list.append(val_loss)\n loss_list.append(loss)\n acc_list.append(acc)\n val_acc_list.append(val_acc)\n t.set_description('epoch %d/%d loss=%.5f acc=%.2f val_loss=%.5f val_acc=%.2f'\n %(epoch+1, epochs, loss.item(), acc, val_loss, val_acc)) \n \n return history", "_____no_output_____" ], [ "def evaluate(net, test_loader, verbose=True, criterion=False):\n correct = 0\n total = 0\n loss = 0\n\n with torch.no_grad():\n for data in test_loader:\n X,y = data\n output = net(X)\n if (criterion):\n loss = MEE(output, y)\n# loss = nn.MSELoss(out, targets)\n for idx, i in enumerate(output):\n# pred = torch.round(torch.max(i))\n pred = output[idx]\n# print(pred)\n# print(y[idx])\n# print(pred)\n# print(y[idx])\n if ((pred == y[idx]).all()):\n correct+=1\n total+=1\n if verbose:\n print(\"Accuracy: \", round(correct/total, 2))\n# print(correct)\n return round(correct/total, 2), loss", "_____no_output_____" ] ], [ [ "### Initialize the weights", "_____no_output_____" ] ], [ [ "def init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.0)\n \nnet.apply(init_weights)", "_____no_output_____" ], [ "def MEE(y_real, y_pred): \n# return torch.mean(torch.cdist(y_real, y_pred, p=2))\n# return torch.div(torch.sum(F.pairwise_distance(y_real, y_pred, p=2)), len(y_real))\n return torch.mean(torch.linalg.norm(y_real - y_pred, axis=1))\n# return torch.mean(torch.sqrt(torch.square((y_real - y_pred))))\n# return torch.div(torch.linalg.norm(y_pred - y_real), len(y_real))", "_____no_output_____" ], [ "optimizer = optim.SGD(net.parameters(),lr = 0.006, momentum = 0.8, weight_decay=0.0001)", "_____no_output_____" ], [ "history = train(net, epochs=1000, optimizer=optimizer, val_split=(Xval, yval))", "epoch 1000/1000 loss=2.86976 acc=0.00 val_loss=3.34428 val_acc=0.00: 100%|██████████| 1000/1000 [01:07<00:00, 14.81it/s]\n" ], [ "import sys\nsys.path.append('/home/raffaele/Documents/ml-project/src/')", "_____no_output_____" ], [ "from torch_utility import *", "_____no_output_____" ], [ "plot_loss(history)", "_____no_output_____" ], [ "data = [\n history['loss'][-1],\n history['acc'][-1],\n history['val_loss'][-1],\n history['val_acc'][-1],\n]\ntable_info(data)", "\t MSE Accuracy\n-----------------------------\nTrain\t|2.8697636|\t0.00|\nTest\t|3.3442764|\t0.00|\n" ], [ "to_predict = torch.tensor(list(Xval), dtype=torch.float, requires_grad=False)\nout = net(to_predict)", "_____no_output_____" ], [ "out = out.detach().numpy()", "_____no_output_____" ], [ "x = out[:,0]\ny = out[:,1]", "_____no_output_____" ], [ "plt.scatter(x,y)", "_____no_output_____" ], [ "x_real = yval[:,0]\ny_real = yval[:,1]\nplt.scatter(x_real, y_real)", "_____no_output_____" ], [ "from sklearn.metrics import euclidean_distances\n\ndef mean_euclidean_error(y_true, y_pred):\n assert y_true.shape == y_pred.shape\n# return np.mean(np.linalg.norm(y_pred - y_true))\n# return np.divide(np.linalg.norm(y - y_real), len(y_real))\n# return np.mean(euclidean_distances(y_true, y_pred))\n return np.mean(np.linalg.norm(y_true - y_pred, axis=1)) #utilizzare questa loss la prossima grid", "_____no_output_____" ], [ "mean_euclidean_error(out, yval)", "_____no_output_____" ], [ "from sklearn.metrics import euclidean_distances\n\ndef mean_euclidean_error(y_true, y_pred):\n assert y_true.shape == y_pred.shape\n# return np.mean(np.linalg.norm(y_pred - y_true))\n# return np.divide(np.linalg.norm(y - y_real), len(y_real))\n# return np.mean(euclidean_distances(y_true, y_pred))\n return np.mean(np.linalg.norm(y_true - y_pred, axis=1)) #utilizzare questa loss la prossima grid", "_____no_output_____" ], [ "class MEE(torch.nn.Module):\n\n def __init__(self):\n super(MEE, self).__init__()\n\n def forward(self, y_true, y_pred):\n# return torch.mean(torch.linalg.norm(y_pred - y_true))\n# return torch.mean(torch.cdist(y_true, y_pred, p=2))\n return torch.div(torch.sum(torch.pairwise_distance(y_true, y_pred)), len(y_true))\n# return torch.div(torch.linalg.norm(y_pred - y_true, ord=None), len(y_true))\n# return torch.div(torch.linalg.norm(y_pred - y_true), len(y_true))", "_____no_output_____" ], [ "class Net(nn.Module):\n\n def __init__(self, num_units):\n super(Net, self).__init__()\n self.input_layer = nn.Linear(10,num_units)\n self.output = nn.Linear(num_units,2)\n\n def forward(self, x):\n x = torch.sigmoid(self.input_layer(x))\n x = self.output(x)\n return x", "_____no_output_____" ], [ "from skorch import NeuralNetRegressor\nfrom skorch.callbacks import EarlyStopping\ntest_net = Net(100,)\nnett = NeuralNetRegressor(test_net, max_epochs=1000,\n lr=0.01,\n batch_size=64,\n optimizer=optim.SGD,\n optimizer__momentum=0.8,\n optimizer__weight_decay=0.0001,\n optimizer__nesterov = True,\n criterion=MEE,\n callbacks=[EarlyStopping(patience=100)]\n )\n# Training\nnett.fit(Xtrain, ytrain)", " epoch train_loss valid_loss dur\n------- ------------ ------------ ------\n 1 \u001b[36m51.1258\u001b[0m \u001b[32m39.5325\u001b[0m 0.2066\n 2 \u001b[36m32.4036\u001b[0m \u001b[32m21.6235\u001b[0m 0.0443\n 3 \u001b[36m17.0926\u001b[0m \u001b[32m10.2772\u001b[0m 0.0462\n 4 \u001b[36m9.0690\u001b[0m \u001b[32m8.3958\u001b[0m 0.0414\n 5 \u001b[36m8.5086\u001b[0m \u001b[32m8.0937\u001b[0m 0.0413\n 6 \u001b[36m8.2209\u001b[0m \u001b[32m7.8052\u001b[0m 0.0443\n 7 \u001b[36m7.9375\u001b[0m \u001b[32m7.5136\u001b[0m 0.0409\n 8 \u001b[36m7.6347\u001b[0m \u001b[32m7.1964\u001b[0m 0.0425\n 9 \u001b[36m7.3007\u001b[0m \u001b[32m6.8480\u001b[0m 0.0414\n 10 \u001b[36m6.9365\u001b[0m \u001b[32m6.4799\u001b[0m 0.0412\n 11 \u001b[36m6.5543\u001b[0m \u001b[32m6.1039\u001b[0m 0.0449\n 12 \u001b[36m6.1784\u001b[0m \u001b[32m5.7627\u001b[0m 0.0444\n 13 \u001b[36m5.8394\u001b[0m \u001b[32m5.4834\u001b[0m 0.0436\n 14 \u001b[36m5.5683\u001b[0m \u001b[32m5.2898\u001b[0m 0.0382\n 15 \u001b[36m5.3716\u001b[0m \u001b[32m5.1756\u001b[0m 0.0418\n 16 \u001b[36m5.2378\u001b[0m \u001b[32m5.1157\u001b[0m 0.0405\n 17 \u001b[36m5.1476\u001b[0m \u001b[32m5.0764\u001b[0m 0.0415\n 18 \u001b[36m5.0827\u001b[0m \u001b[32m5.0474\u001b[0m 0.0415\n 19 \u001b[36m5.0296\u001b[0m \u001b[32m5.0212\u001b[0m 0.0419\n 20 \u001b[36m4.9828\u001b[0m \u001b[32m4.9951\u001b[0m 0.0433\n 21 \u001b[36m4.9397\u001b[0m \u001b[32m4.9682\u001b[0m 0.0411\n 22 \u001b[36m4.8987\u001b[0m \u001b[32m4.9404\u001b[0m 0.0459\n 23 \u001b[36m4.8593\u001b[0m \u001b[32m4.9118\u001b[0m 0.0358\n 24 \u001b[36m4.8210\u001b[0m \u001b[32m4.8826\u001b[0m 0.0522\n 25 \u001b[36m4.7836\u001b[0m \u001b[32m4.8530\u001b[0m 0.0446\n 26 \u001b[36m4.7470\u001b[0m \u001b[32m4.8234\u001b[0m 0.0354\n 27 \u001b[36m4.7111\u001b[0m \u001b[32m4.7930\u001b[0m 0.0386\n 28 \u001b[36m4.6758\u001b[0m \u001b[32m4.7621\u001b[0m 0.0335\n 29 \u001b[36m4.6411\u001b[0m \u001b[32m4.7309\u001b[0m 0.0351\n 30 \u001b[36m4.6068\u001b[0m \u001b[32m4.6995\u001b[0m 0.0340\n 31 \u001b[36m4.5731\u001b[0m \u001b[32m4.6683\u001b[0m 0.0340\n 32 \u001b[36m4.5395\u001b[0m \u001b[32m4.6371\u001b[0m 0.0336\n 33 \u001b[36m4.5070\u001b[0m \u001b[32m4.6055\u001b[0m 0.0374\n 34 \u001b[36m4.4750\u001b[0m \u001b[32m4.5747\u001b[0m 0.0354\n 35 \u001b[36m4.4435\u001b[0m \u001b[32m4.5475\u001b[0m 0.0350\n 36 \u001b[36m4.4112\u001b[0m \u001b[32m4.5146\u001b[0m 0.0348\n 37 \u001b[36m4.3823\u001b[0m \u001b[32m4.4871\u001b[0m 0.0334\n 38 \u001b[36m4.3513\u001b[0m \u001b[32m4.4562\u001b[0m 0.0363\n 39 \u001b[36m4.3235\u001b[0m \u001b[32m4.4285\u001b[0m 0.0392\n 40 \u001b[36m4.2946\u001b[0m \u001b[32m4.3994\u001b[0m 0.0348\n 41 \u001b[36m4.2673\u001b[0m \u001b[32m4.3711\u001b[0m 0.0344\n 42 \u001b[36m4.2402\u001b[0m \u001b[32m4.3431\u001b[0m 0.0330\n 43 \u001b[36m4.2139\u001b[0m \u001b[32m4.3155\u001b[0m 0.0342\n 44 \u001b[36m4.1882\u001b[0m \u001b[32m4.2884\u001b[0m 0.0361\n 45 \u001b[36m4.1631\u001b[0m \u001b[32m4.2618\u001b[0m 0.0344\n 46 \u001b[36m4.1387\u001b[0m \u001b[32m4.2359\u001b[0m 0.0347\n 47 \u001b[36m4.1148\u001b[0m \u001b[32m4.2105\u001b[0m 0.0344\n 48 \u001b[36m4.0916\u001b[0m \u001b[32m4.1857\u001b[0m 0.0339\n 49 \u001b[36m4.0690\u001b[0m \u001b[32m4.1616\u001b[0m 0.0339\n 50 \u001b[36m4.0469\u001b[0m \u001b[32m4.1382\u001b[0m 0.0336\n 51 \u001b[36m4.0256\u001b[0m \u001b[32m4.1154\u001b[0m 0.0331\n 52 \u001b[36m4.0048\u001b[0m \u001b[32m4.0933\u001b[0m 0.0338\n 53 \u001b[36m3.9847\u001b[0m \u001b[32m4.0718\u001b[0m 0.0332\n 54 \u001b[36m3.9654\u001b[0m \u001b[32m4.0521\u001b[0m 0.0350\n 55 \u001b[36m3.9468\u001b[0m \u001b[32m4.0348\u001b[0m 0.0356\n 56 \u001b[36m3.9291\u001b[0m \u001b[32m4.0153\u001b[0m 0.0353\n 57 \u001b[36m3.9120\u001b[0m \u001b[32m3.9965\u001b[0m 0.0356\n 58 \u001b[36m3.8953\u001b[0m \u001b[32m3.9785\u001b[0m 0.0338\n 59 \u001b[36m3.8791\u001b[0m \u001b[32m3.9611\u001b[0m 0.0341\n 60 \u001b[36m3.8634\u001b[0m \u001b[32m3.9444\u001b[0m 0.0350\n 61 \u001b[36m3.8482\u001b[0m \u001b[32m3.9284\u001b[0m 0.0352\n 62 \u001b[36m3.8335\u001b[0m \u001b[32m3.9129\u001b[0m 0.0347\n 63 \u001b[36m3.8192\u001b[0m \u001b[32m3.8981\u001b[0m 0.0346\n 64 \u001b[36m3.8054\u001b[0m \u001b[32m3.8837\u001b[0m 0.0351\n 65 \u001b[36m3.7920\u001b[0m \u001b[32m3.8699\u001b[0m 0.0356\n 66 \u001b[36m3.7790\u001b[0m \u001b[32m3.8565\u001b[0m 0.0347\n 67 \u001b[36m3.7663\u001b[0m \u001b[32m3.8437\u001b[0m 0.0343\n 68 \u001b[36m3.7540\u001b[0m \u001b[32m3.8312\u001b[0m 0.0339\n 69 \u001b[36m3.7421\u001b[0m \u001b[32m3.8192\u001b[0m 0.0375\n 70 \u001b[36m3.7304\u001b[0m \u001b[32m3.8075\u001b[0m 0.0332\n 71 \u001b[36m3.7191\u001b[0m \u001b[32m3.7962\u001b[0m 0.0343\n 72 \u001b[36m3.7080\u001b[0m \u001b[32m3.7853\u001b[0m 0.0345\n 73 \u001b[36m3.6972\u001b[0m \u001b[32m3.7747\u001b[0m 0.0339\n 74 \u001b[36m3.6866\u001b[0m \u001b[32m3.7644\u001b[0m 0.0363\n 75 \u001b[36m3.6763\u001b[0m \u001b[32m3.7543\u001b[0m 0.0339\n 76 \u001b[36m3.6662\u001b[0m \u001b[32m3.7445\u001b[0m 0.0346\n 77 \u001b[36m3.6564\u001b[0m \u001b[32m3.7349\u001b[0m 0.0342\n 78 \u001b[36m3.6467\u001b[0m \u001b[32m3.7255\u001b[0m 0.0337\n 79 \u001b[36m3.6374\u001b[0m \u001b[32m3.7162\u001b[0m 0.0329\n 80 \u001b[36m3.6282\u001b[0m \u001b[32m3.7072\u001b[0m 0.0335\n 81 \u001b[36m3.6192\u001b[0m \u001b[32m3.6984\u001b[0m 0.0329\n 82 \u001b[36m3.6104\u001b[0m \u001b[32m3.6899\u001b[0m 0.0335\n 83 \u001b[36m3.6017\u001b[0m \u001b[32m3.6816\u001b[0m 0.0354\n 84 \u001b[36m3.5932\u001b[0m \u001b[32m3.6736\u001b[0m 0.0369\n 85 \u001b[36m3.5848\u001b[0m \u001b[32m3.6657\u001b[0m 0.0336\n 86 \u001b[36m3.5766\u001b[0m \u001b[32m3.6581\u001b[0m 0.0338\n 87 \u001b[36m3.5684\u001b[0m \u001b[32m3.6507\u001b[0m 0.0333\n 88 \u001b[36m3.5604\u001b[0m \u001b[32m3.6434\u001b[0m 0.0324\n 89 \u001b[36m3.5526\u001b[0m \u001b[32m3.6363\u001b[0m 0.0337\n 90 \u001b[36m3.5449\u001b[0m \u001b[32m3.6293\u001b[0m 0.0330\n 91 \u001b[36m3.5373\u001b[0m \u001b[32m3.6225\u001b[0m 0.0332\n 92 \u001b[36m3.5299\u001b[0m \u001b[32m3.6157\u001b[0m 0.0340\n 93 \u001b[36m3.5225\u001b[0m \u001b[32m3.6091\u001b[0m 0.0328\n 94 \u001b[36m3.5153\u001b[0m \u001b[32m3.6026\u001b[0m 0.0329\n 95 \u001b[36m3.5082\u001b[0m \u001b[32m3.5962\u001b[0m 0.0363\n 96 \u001b[36m3.5011\u001b[0m \u001b[32m3.5899\u001b[0m 0.0328\n 97 \u001b[36m3.4941\u001b[0m \u001b[32m3.5838\u001b[0m 0.0333\n 98 \u001b[36m3.4872\u001b[0m \u001b[32m3.5777\u001b[0m 0.0334\n 99 \u001b[36m3.4804\u001b[0m \u001b[32m3.5718\u001b[0m 0.0334\n 100 \u001b[36m3.4738\u001b[0m \u001b[32m3.5659\u001b[0m 0.0328\n 101 \u001b[36m3.4673\u001b[0m \u001b[32m3.5602\u001b[0m 0.0406\n 102 \u001b[36m3.4610\u001b[0m \u001b[32m3.5545\u001b[0m 0.0332\n 103 \u001b[36m3.4548\u001b[0m \u001b[32m3.5489\u001b[0m 0.0336\n 104 \u001b[36m3.4487\u001b[0m \u001b[32m3.5434\u001b[0m 0.0334\n 105 \u001b[36m3.4427\u001b[0m \u001b[32m3.5380\u001b[0m 0.0357\n 106 \u001b[36m3.4368\u001b[0m \u001b[32m3.5326\u001b[0m 0.0351\n 107 \u001b[36m3.4310\u001b[0m \u001b[32m3.5273\u001b[0m 0.0331\n 108 \u001b[36m3.4253\u001b[0m \u001b[32m3.5221\u001b[0m 0.0343\n 109 \u001b[36m3.4196\u001b[0m \u001b[32m3.5170\u001b[0m 0.0323\n 110 \u001b[36m3.4139\u001b[0m \u001b[32m3.5119\u001b[0m 0.0328\n 111 \u001b[36m3.4082\u001b[0m \u001b[32m3.5070\u001b[0m 0.0325\n 112 \u001b[36m3.4026\u001b[0m \u001b[32m3.5021\u001b[0m 0.0341\n 113 \u001b[36m3.3971\u001b[0m \u001b[32m3.4974\u001b[0m 0.0323\n 114 \u001b[36m3.3916\u001b[0m \u001b[32m3.4927\u001b[0m 0.0329\n 115 \u001b[36m3.3859\u001b[0m \u001b[32m3.4885\u001b[0m 0.0372\n 116 \u001b[36m3.3808\u001b[0m \u001b[32m3.4843\u001b[0m 0.0631\n 117 \u001b[36m3.3757\u001b[0m \u001b[32m3.4798\u001b[0m 0.0325\n 118 \u001b[36m3.3704\u001b[0m \u001b[32m3.4755\u001b[0m 0.0333\n 119 \u001b[36m3.3652\u001b[0m \u001b[32m3.4712\u001b[0m 0.0339\n 120 \u001b[36m3.3602\u001b[0m \u001b[32m3.4671\u001b[0m 0.0368\n 121 \u001b[36m3.3551\u001b[0m \u001b[32m3.4630\u001b[0m 0.0375\n 122 \u001b[36m3.3502\u001b[0m \u001b[32m3.4589\u001b[0m 0.0356\n 123 \u001b[36m3.3452\u001b[0m \u001b[32m3.4549\u001b[0m 0.0352\n 124 \u001b[36m3.3404\u001b[0m \u001b[32m3.4509\u001b[0m 0.0354\n 125 \u001b[36m3.3355\u001b[0m \u001b[32m3.4469\u001b[0m 0.0339\n 126 \u001b[36m3.3307\u001b[0m \u001b[32m3.4428\u001b[0m 0.0366\n 127 \u001b[36m3.3260\u001b[0m \u001b[32m3.4388\u001b[0m 0.0346\n 128 \u001b[36m3.3214\u001b[0m \u001b[32m3.4345\u001b[0m 0.0329\n 129 \u001b[36m3.3165\u001b[0m \u001b[32m3.4310\u001b[0m 0.0365\n 130 \u001b[36m3.3130\u001b[0m \u001b[32m3.4250\u001b[0m 0.0336\n 131 \u001b[36m3.3070\u001b[0m \u001b[32m3.4229\u001b[0m 0.0342\n" ], [ "train_loss = nett.history[:, 'train_loss']\nvalid_loss = nett.history[:, 'valid_loss']\n\nplt.plot(train_loss, '-', label='training')\nplt.plot(valid_loss, '--', label='validation')\nplt.ylim(2,4)\nplt.xlim(50,1000)\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "prova = nett.predict(Xval)", "_____no_output_____" ], [ "x1 = prova[:,0]\ny1 = prova[:,1]\nplt.scatter(x1, y1)", "_____no_output_____" ], [ "x_real = yval[:,0]\ny_real = yval[:,1]\nplt.scatter(x_real, y_real)", "_____no_output_____" ], [ "mean_euclidean_error(prova, yval)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc51024703d35c82ae0a5cbf709809270a645da
3,844
ipynb
Jupyter Notebook
content/lessons/06/End-To-End-Example/ETEE-Tip-Calculator.ipynb
MahopacHS/spring-2020-oubinam0717
5b35579e658e34cbb07c3477a9fce13ce01830af
[ "MIT" ]
14
2017-02-23T21:00:46.000Z
2021-03-19T09:29:40.000Z
content/lessons/06/End-To-End-Example/ETEE-Tip-Calculator.ipynb
MahopacHS/spring-2020-oubinam0717
5b35579e658e34cbb07c3477a9fce13ce01830af
[ "MIT" ]
null
null
null
content/lessons/06/End-To-End-Example/ETEE-Tip-Calculator.ipynb
MahopacHS/spring-2020-oubinam0717
5b35579e658e34cbb07c3477a9fce13ce01830af
[ "MIT" ]
38
2017-02-03T13:49:19.000Z
2021-08-15T16:47:56.000Z
24.641026
144
0.530177
[ [ [ "# End-To-End Example: Tip Calculator\n\nThe following code calculates the amount of tip you should leave based on the amount of the check and percentage you would like to tip.\n\n", "_____no_output_____" ] ], [ [ "total = float(input(\"Enter Amount of Check: \"))\ntip = float(input(\"Enter the Tip Percentage: \"))\ntip_amount = total * tip\nprint (\"You should leave this amount for a tip $%.2f\" % (tip_amount))", "Enter Amount of Check: 100\nEnter the Tip Percentage: 15\nYou should leave this amount for a tip $1500.00\n" ] ], [ [ "## The Issues\n\nThe issue with this program is that its not smart with the tip percentage. When I enter 15 is assumes 1500% not 15%. \n\nWith our knowledge of strings and parsing we can make this program more intelligent:\n\n- When you enter `0.15` it uses `0.15`\n- When you enter `15` it assumes you meant `0.15` (divides by 100)\n- When you enter `15%` it assumes you meant `0.15` (removes the %, then divides by 100)\n\n\nLikewise we should do the same for currency input. Assuming the user might enter a $", "_____no_output_____" ] ], [ [ "# exploring how to parse percentages\nx = \"15 %\"\ny = float(x.replace('%',''))\nif y >=1:\n y = y/100\nprint(y)", "_____no_output_____" ], [ "## Function: percentage - parses string input into a float as a percentage\n## Arguments: text\n## Returns float\ndef percentage(text):\n number = float(text.replace('%',''))\n if number >1:\n number = number /100\n return number\n\n## Function: currency - parses string input into a float as currency\n## Arguments: text\n## Returns float\ndef currency(text):\n number = float(text.replace('$',''))\n return number", "_____no_output_____" ], [ "total = currency(input(\"Enter Amount of Check: \"))\ntip = percentage(input(\"Enter the Tip Percentage: \"))\ntip_amount = total * tip\nprint (\"You should leave this amount for a tip $%.2f\" % (tip_amount))", "Enter Amount of Check: 100\nEnter the Tip Percentage: .15\nYou should leave this amount for a tip $15.00\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cbc51eaccb4717affb86f7a82e2f6b97a6f69b1c
32,466
ipynb
Jupyter Notebook
model/CHF_Experiment.ipynb
irenetrampoline/clustering-interval-censored
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
[ "MIT" ]
1
2022-02-03T08:47:45.000Z
2022-02-03T08:47:45.000Z
model/CHF_Experiment.ipynb
irenetrampoline/clustering-interval-censored
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
[ "MIT" ]
null
null
null
model/CHF_Experiment.ipynb
irenetrampoline/clustering-interval-censored
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
[ "MIT" ]
null
null
null
28.231304
209
0.52113
[ [ [ "1. Split into train and test data\n2. Train model on train data normally\n3. Take test data and duplicate into test prime \n4. Drop first visit from test prime data\n5. Get predicted delta from test prime data. Compare to delta from test data. We know the difference (epsilon) because we dropped actual visits. What percent of time is test delta < test prime delta? \n6. Restrict it only to patients with lot of visits. Is this better?", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pickle\n\ndef clean_plot():\n ax = plt.subplot(111) \n ax.spines[\"top\"].set_visible(False) \n ax.spines[\"bottom\"].set_visible(False) \n ax.spines[\"right\"].set_visible(False) \n ax.spines[\"left\"].set_visible(False) \n \n ax.get_xaxis().tick_bottom() \n ax.get_yaxis().tick_left() \n plt.grid()\n\nimport matplotlib.pylab as pylab\nparams = {'legend.fontsize': 'x-large',\n# 'figure.figsize': (10,6),\n 'axes.labelsize': 'x-large',\n 'axes.titlesize':'x-large',\n 'xtick.labelsize':'x-large',\n 'ytick.labelsize':'x-large'}\npylab.rcParams.update(params)", "_____no_output_____" ], [ "import sys\nimport torch\n\nsys.path.append('../data')\nfrom load import chf\nfrom data_utils import parse_data\nfrom synthetic_data import load_piecewise_synthetic_data\n\n\nsys.path.append('../model')\nfrom models import Sublign\nfrom run_experiments import get_hyperparameters\n", "_____no_output_____" ], [ "def make_test_prime(test_data_dict_raw, drop_first_T=1.):\n # drop first year\n test_data_dict = copy.deepcopy(test_data_dict_raw)\n eps_lst = list()\n \n X = test_data_dict['obs_t_collect']\n Y = test_data_dict['Y_collect']\n M = test_data_dict['mask_collect']\n \n N_patients = X.shape[0]\n N_visits = X.shape[1]\n \n for i in range(N_patients):\n eps_i = X[i,1,0] - X[i,0,0]\n \n first_visit = X[i,1,0]\n # move all visits down (essentially destroying the first visit)\n for j in range(N_visits-gap):\n \n X[i,j,0] = X[i,j+gap,0] - first_visit\n Y[i,j,:] = Y[i,j+gap,:]\n M[i,j,:] = M[i,j+gap,:]\n \n for g in range(1,gap+1):\n X[i,N_visits-g,0] = int(-1000)\n Y[i,N_visits-g,:] = int(-1000)\n M[i,N_visits-g,:] = 0.\n \n eps_lst.append(eps_i)\n return test_data_dict, eps_lst", "_____no_output_____" ], [ "data = chf()\nmax_visits = 38\nshuffle = True\nnum_output_dims = data.shape[1] - 4\n\ndata_loader, collect_dict, unique_pid = parse_data(data.values, max_visits=max_visits)\ntrain_data_loader, train_data_dict, test_data_loader, test_data_dict, test_pid, unique_pid = parse_data(data.values, \n max_visits=max_visits, test_per=0.2, \n shuffle=shuffle)\n\n# model = Sublign(10, 20, 50, dim_biomarkers=num_output_dims, sigmoid=True, reg_type='l1', auto_delta=True, \n# max_delta=5, learn_time=True, device=torch.device('cuda'))\n# # model.fit(data_loader, data_loader, args.epochs, 0.01, verbose=args.verbose,fname='runs/chf.pt',eval_freq=25)\n\n# fname='../model/chf_good.pt'\n# model.load_state_dict(torch.load(fname,map_location=torch.device('cuda')))\n\n\ntest_p_data_dict, eps_lst = make_test_prime(test_data_dict, gap=1)\n\n\n# test_deltas = model.get_deltas(test_data_dict).detach().numpy()\n# test_p_deltas = model.get_deltas(test_p_data_dict).detach().numpy()", "_____no_output_____" ], [ "print(num_output_dims)", "_____no_output_____" ], [ "# def make_test_prime(test_data_dict_raw, drop_first_T=1.):\ndrop_first_T = 0.5\n# drop first year\n\ntest_data_dict_new = copy.deepcopy(test_data_dict)\neps_lst = list()\n\nX = test_data_dict_new['obs_t_collect']\nY = test_data_dict_new['Y_collect']\nM = test_data_dict_new['mask_collect']\n\nN_patients = X.shape[0]\nN_visits = X.shape[1]\n\nremove_idx = list()\n\nX[X == -1000] = np.nan\n\nfor i in range(N_patients):\n N_visits_under_thresh = (X[i] < 0.5).sum()\n gap = N_visits_under_thresh\n \n first_valid_visit = X[i,N_visits_under_thresh,0]\n \n eps_i = X[i,N_visits_under_thresh,0]\n \n for j in range(N_visits-N_visits_under_thresh):\n X[i,j,0] = X[i,j+gap,0] - first_valid_visit\n Y[i,j,:] = Y[i,j+gap,:]\n M[i,j,:] = M[i,j+gap,:]\n\n for g in range(1,N_visits_under_thresh+1):\n X[i,N_visits-g,0] = np.nan\n Y[i,N_visits-g,:] = np.nan\n M[i,N_visits-g,:] = 0.\n\n if np.isnan(X[i]).all():\n remove_idx.append(i)\n else:\n eps_lst.append(eps_i)\n\nkeep_idx = [i for i in range(N_patients) if i not in remove_idx]\nX = X[keep_idx]\nY = Y[keep_idx]\nM = M[keep_idx]\n\nprint('Removed %d entries' % len(remove_idx))\nX[np.isnan(X)] = -1000\n\n# eps_lst.append(eps_i)\n# return test_data_dict_new, eps_lst", "_____no_output_____" ], [ "eps_lst", "_____no_output_____" ], [ "X[0]", "_____no_output_____" ], [ "first_valid_visit", "_____no_output_____" ], [ "test_data_dict_new = copy.deepcopy(test_data_dict)\n\nX = test_data_dict_new['obs_t_collect']\nY = test_data_dict_new['Y_collect']\nM = test_data_dict_new['mask_collect']\nX[X == -1000] = np.nan\n\ni = 1\nN_visits_under_thresh = (X[i] < 0.5).sum()\n\n\n\n# for j in range(N_visits-N_visits_under_thresh):\n# X[i,j,0] = X[i,j+gap,0] - first_visit\n# Y[i,j,:] = Y[i,j+gap,:]\n# M[i,j,:] = M[i,j+gap,:]\n\n# for g in range(1,N_visits_under_thresh+1):\n# X[i,N_visits-g,0] = np.nan\n# Y[i,N_visits-g,:] = np.nan\n# M[i,N_visits-g,:] = 0.\n\n# if np.isnan(X[i]).all():\n# print('yes')\n# remove_idx.append(i)\n", "_____no_output_____" ], [ "(X[1] < 0.5).sum()", "_____no_output_____" ], [ "N_visits_under_thresh", "_____no_output_____" ], [ "N_visits_under_thresh", "_____no_output_____" ], [ "len(remove_idx)", "_____no_output_____" ], [ "X[X == -1000] = np.nan\nfor i in range(10):\n print(X[i].flatten())", "_____no_output_____" ], [ "remove_idx", "_____no_output_____" ], [ "X[0][:10]", "_____no_output_____" ], [ "plt.hist(X.flatten())", "_____no_output_____" ], [ "X.max()", "_____no_output_____" ], [ "Y[1][:10]", "_____no_output_____" ], [ "test_data_dict_new['']", "_____no_output_____" ], [ "f = open('chf_experiment_results.pk', 'rb')\nresults = pickle.load(f)\ntest_deltas = results['test_deltas']\ntest_p_deltas = results['test_p_deltas']\neps_lst = results['eps_lst']\ntest_data_dict = results['test_data_dict']\nf.close()", "_____no_output_____" ], [ "test_data_dict['obs_t_collect'][0].shape", "_____no_output_____" ], [ "# get num of visits per patient\nnum_visits_patient_lst = list()\nfor i in test_data_dict['obs_t_collect']:\n num_visits = (i!=-1000).sum()\n num_visits_patient_lst.append(num_visits)\n\nnum_visits_patient_lst = np.array(num_visits_patient_lst)", "_____no_output_____" ], [ "freq_visit_idx = np.where(num_visits_patient_lst > 10)[0]", "_____no_output_____" ], [ "test_p_deltas[freq_visit_idx]", "_____no_output_____" ], [ "test_deltas[freq_visit_idx]", "_____no_output_____" ], [ "np.mean(np.array(test_p_deltas - test_deltas) > 0)", "_____no_output_____" ], [ "test_p_deltas[:20]", "_____no_output_____" ], [ "clean_plot()\nplt.plot(eps_lst, test_p_deltas - test_deltas, '.')\nplt.xlabel('Actual eps')\nplt.ylabel('Estimated eps')\n# plt.savefig('')", "_____no_output_____" ], [ "import copy \n\ndef make_test_prime(test_data_dict_raw, gap=1):\n test_data_dict = copy.deepcopy(test_data_dict_raw)\n eps_lst = list()\n \n X = test_data_dict['obs_t_collect']\n Y = test_data_dict['Y_collect']\n M = test_data_dict['mask_collect']\n \n N_patients = X.shape[0]\n N_visits = X.shape[1]\n \n for i in range(N_patients):\n eps_i = X[i,1,0] - X[i,0,0]\n \n first_visit = X[i,1,0]\n # move all visits down (essentially destroying the first visit)\n for j in range(N_visits-gap):\n \n X[i,j,0] = X[i,j+gap,0] - first_visit\n Y[i,j,:] = Y[i,j+gap,:]\n M[i,j,:] = M[i,j+gap,:]\n \n for g in range(1,gap+1):\n X[i,N_visits-g,0] = int(-1000)\n Y[i,N_visits-g,:] = int(-1000)\n M[i,N_visits-g,:] = 0.\n \n eps_lst.append(eps_i)\n return test_data_dict, eps_lst", "_____no_output_____" ], [ "t_prime_dict, eps_lst = make_test_prime(test_data_dict)", "_____no_output_____" ], [ "t_prime_dict['Y_collect'][1,:,0]", "_____no_output_____" ], [ "test_data_dict['Y_collect'][1,:,0]", "_____no_output_____" ] ], [ [ "## Plot successful model", "_____no_output_____" ] ], [ [ "import argparse\nimport numpy as np\nimport pickle\nimport sys\nimport torch\nimport copy\n\nfrom scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\n\nfrom run_experiments import get_hyperparameters\nfrom models import Sublign\n\nsys.path.append('../data')\n\nfrom data_utils import parse_data\nfrom load import load_data_format\n\nsys.path.append('../evaluation')\nfrom eval_utils import swap_metrics", "_____no_output_____" ], [ "train_data_dict['Y_collect'].shape\n", "_____no_output_____" ], [ "train_data_dict['t_collect'].shape", "_____no_output_____" ], [ "new_Y = np.zeros((600,101,3))", "_____no_output_____" ], [ "val_idx_dict = {'%.1f' % j: i for i,j in enumerate(np.linspace(0,10,101))}", "_____no_output_____" ], [ "train_data_dict['obs_t_collect'].max()", "_____no_output_____" ], [ "rounded_t = np.round(train_data_dict['t_collect'],1)\nN, M, _ = rounded_t.shape\n\nfor i in range(N):\n for j in range(M):\n val = rounded_t[i,j,0]\n# try:\n idx = val_idx_dict['%.1f' % val]\n for k in range(3):\n new_Y[i,idx,k] = train_data_dict['Y_collect'][i,j,k]\n# except:\n# print(val)", "_____no_output_____" ], [ "new_Y.shape", "_____no_output_____" ], [ "(new_Y == 0).sum() / (600*101*3)", "_____no_output_____" ], [ "# save the files for comparing against SPARTan baseline\n\nfor i in range(3):\n a = new_Y[:,:,i]\n np.savetxt(\"data1_dim%d.csv\" % i, a, deliREDACTEDer=\",\")", "_____no_output_____" ], [ "true_labels = train_data_dict['s_collect'][:,0]\nguess_labels = np.ones(600)\n\nadjusted_rand_score(true_labels,guess_labels)", "_____no_output_____" ], [ "from sklearn.metrics import adjusted_rand_score\n# a.shape", "_____no_output_____" ], [ "data_format_num = 1\n# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)\nanneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)\nC\ndata = load_data_format(data_format_num, 0, cache=True)\n\ntrain_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=4, test_per=0.2, valid_per=0.2, shuffle=False)\n\nmodel = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=False, max_delta=0, learn_time=False, beta=0.00)\nmodel.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d_chf_experiment.pt' % (data_format_num), eval_freq=25)\n\nz = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])\n# fname='runs/data%d_chf_experiment.pt' % (data_format_num)\n# model.load_state_dict(torch.load(fname))\nnolign_results = model.score(train_data_dict, test_data_dict)\nprint('ARI: %.3f' % nolign_results['ari'])", "_____no_output_____" ], [ "print(anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr)", "_____no_output_____" ], [ "data_format_num = 1\n# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)\nanneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)\n\nmodel = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=5, learn_time=True, beta=0.01)\nmodel.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d.pt' % (data_format_num), eval_freq=25)\n\nz = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])\n# fname='runs/data%d_chf_experiment.pt' % (data_format_num)\n# model.load_state_dict(torch.load(fname))\nresults = model.score(train_data_dict, test_data_dict)\nprint('ARI: %.3f' % results['ari'])", "_____no_output_____" ], [ "# model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=5, learn_time=True, b_vae=0.)\n# model.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d_chf_experiment.pt' % (data_format_num), eval_freq=25)\n\n# z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])\n# # fname='runs/data%d_chf_experiment.pt' % (data_format_num)\n# # model.load_state_dict(torch.load(fname))\n# results = model.score(train_data_dict, test_data_dict)\n# print('ARI: %.3f' % results['ari'])\n", "_____no_output_____" ], [ "# Visualize latent space (change configs above)\nX = test_data_dict['obs_t_collect']\nY = test_data_dict['Y_collect']\nM = test_data_dict['mask_collect']\n\n\ntest_z, _ = model.get_mu(X,Y)\ntest_z = test_z.detach().numpy()\n\ntest_subtypes = test_data_dict['s_collect']\n\nfrom sklearn.manifold import TSNE\nz_tSNE = TSNE(n_components=2).fit_transform(test_z)\n\ntest_s0_idx = np.where(test_subtypes==0)[0]\ntest_s1_idx = np.where(test_subtypes==1)[0]\n\nclean_plot()\nplt.plot(z_tSNE[test_s0_idx,0],z_tSNE[test_s0_idx,1],'.')\nplt.plot(z_tSNE[test_s1_idx,0],z_tSNE[test_s1_idx,1],'.')\n\n# plt.title('\\nNELBO (down): %.3f, ARI (up): %.3f\\n Config: %s\\nColors = true subtypes' % \n# (nelbo, ari, configs))\nplt.show()", "_____no_output_____" ], [ "def sigmoid_f(x, beta0, beta1):\n result = 1. / (1+np.exp(-(beta0 + beta1*x)))\n return result\n\ntrue_betas = [[[-4, 1],\n [-1,1.],\n [-8,8]\n ],\n [\n [-1,1.],\n [-8,8],\n [-25, 3.5]\n ]]", "_____no_output_____" ], [ "# xs = np.linspace(0,10,100)\n\nfor dim_i in range(3):\n xs = np.linspace(0,10,100)\n \n plt.figure()\n clean_plot()\n plt.grid(True)\n ys = [sigmoid_f(xs_i, true_betas[0][dim_i][0], true_betas[0][dim_i][1]) for xs_i in xs]\n plt.plot(xs,ys, ':', color='gray', linewidth=5, label='True function')\n\n ys = [sigmoid_f(xs_i, true_betas[1][dim_i][0], true_betas[1][dim_i][1]) for xs_i in xs]\n plt.plot(xs,ys, ':', color='gray', linewidth=5)\n\n for subtype_j in range(2):\n \n\n xs = np.linspace(0,10,100)\n ys = [sigmoid_f(xs_i, nolign_results['cent_lst'][subtype_j,dim_i,0], \n nolign_results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]\n if subtype_j == 0:\n plt.plot(xs,ys,linewidth=4, label='SubNoLign subtype', linestyle='-.', color='tab:green')\n else:\n plt.plot(xs,ys,linewidth=4, linestyle='--', color='tab:green')\n\n ys = [sigmoid_f(xs_i, results['cent_lst'][subtype_j,dim_i,0], \n results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]\n if subtype_j == 0:\n plt.plot(xs,ys,linewidth=4, label='SubLign subtype', linestyle='-', color='tab:purple')\n else:\n plt.plot(xs,ys,linewidth=4, linestyle='-', color='tab:purple')\n\n\n \n plt.xlabel('Disease stage')\n plt.ylabel('Biomarker')\n plt.legend()\n plt.savefig('subnolign_data1_subtypes_dim%d.pdf' % dim_i, bbox_inches='tight')\n ", "_____no_output_____" ], [ "# # number dimensions\n# fig, axs = plt.subplots(1,3, figsize=(8,4))\n# for dim_i in range(3):\n# ax = axs[dim_i]\n# # number subtypes\n# for subtype_j in range(2):\n# xs = np.linspace(0,10,100)\n# ys = [sigmoid_f(xs_i, model1_results['cent_lst'][subtype_j,dim_i,0], \n# model1_results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]\n \n\n# ax.plot(xs,ys)\n# ys = [sigmoid_f(xs_i, true_betas[0][dim_i][0], true_betas[0][dim_i][1]) for xs_i in xs]\n# ax.plot(xs,ys, color='gray')\n \n# ys = [sigmoid_f(xs_i, true_betas[1][dim_i][0], true_betas[1][dim_i][1]) for xs_i in xs]\n# ax.plot(xs,ys, color='gray')\n \n# fig.suptitle('True data generating function (gray), learned models (orange, blue)')\n# plt.savefig('learned_models.pdf',bbox_inches='tight')", "_____no_output_____" ] ], [ [ "## Plot CHF Delta distributions", "_____no_output_____" ] ], [ [ "data = pickle.load(open('../clinical_runs/chf_v3_1000.pk', 'rb'))\nclean_plot()\nplt.hist(data['deltas'], bins=20)\nplt.xlabel('Inferred Alignment $\\delta_i$ Value')\nplt.ylabel('Number Heart Failure Patients')\nplt.savefig('Delta_dist_chf.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "## Make piecewise data to measure model misspecification", "_____no_output_____" ] ], [ [ "from scipy import interpolate", "_____no_output_____" ], [ "x = np.arange(0, 2*np.pi+np.pi/4, 2*np.pi/8)\ny = np.sin(x)\ntck = interpolate.splrep(x, y, s=0)\nxnew = np.arange(0, 2*np.pi, np.pi/50)\nynew = interpolate.splev(xnew, tck, der=0)", "_____no_output_____" ], [ "xvals = np.array([9.3578453 , 4.9814664 , 7.86530539, 8.91318433, 2.00779188])[sort_idx]\nyvals = np.array([0.35722491, 0.12512101, 0.20054626, 0.38183604, 0.58836923])[sort_idx]\ntck = interpolate.splrep(xvals, yvals, s=0)", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "N_subtypes,D,N_pts,_ = subtype_points.shape\n\nfig, axes = plt.subplots(ncols=3,nrows=1)\n\nfor d, ax in enumerate(axes.flat):\n# ax.set_xlim(0,10)\n# ax.set_ylim(0,1)\n for k in range(N_subtypes):\n xs = subtype_points[k,d,:,0]\n ys = subtype_points[k,d,:,1]\n sort_idx = np.argsort(xs)\n ax.plot(xs[sort_idx],ys[sort_idx])\n\nplt.show()\n\n# for d in range(D):\n ", "_____no_output_____" ], [ "%%time\nN_epochs = 800\nN_trials = 5\nuse_sigmoid = True\n\nsublign_results = {\n 'ari':[],\n 'pear': [],\n 'swaps': []\n}\nsubnolign_results = {'ari': []}\n\nfor trial in range(N_trials):\n data_format_num = 1\n # C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)\n anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)\n # C\n # data = load_data_format(data_format_num, 0, cache=True)\n\n use_sigmoid = False\n\n data, subtype_points = load_piecewise_synthetic_data(subtypes=2, increasing=use_sigmoid, \n D=3, N=2000,M=4, noise=0.25, N_pts=5)\n\n train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=4, test_per=0.2, valid_per=0.2, shuffle=False)\n\n model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=use_sigmoid, reg_type='l1', \n auto_delta=False, max_delta=5, learn_time=True, beta=1.)\n model.fit(train_data_loader, test_data_loader, N_epochs, lr, fname='runs/data%d_spline.pt' % (data_format_num), eval_freq=25)\n\n # z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])\n # fname='runs/data%d_chf_experiment.pt' % (data_format_num)\n # model.load_state_dict(torch.load(fname))\n results = model.score(train_data_dict, test_data_dict)\n print('Sublign results: ARI: %.3f; Pear: %.3f; Swaps: %.3f' % (results['ari'],results['pear'],results['swaps']))\n sublign_results['ari'].append(results['ari'])\n sublign_results['pear'].append(results['pear'])\n sublign_results['swaps'].append(results['swaps'])\n \n model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=use_sigmoid, reg_type='l1', \n auto_delta=False, max_delta=0, learn_time=False, beta=1.)\n model.fit(train_data_loader, test_data_loader, N_epochs, lr, fname='runs/data%d_spline.pt' % (data_format_num), eval_freq=25)\n nolign_results = model.score(train_data_dict, test_data_dict)\n print('SubNoLign results: ARI: %.3f' % (nolign_results['ari']))\n subnolign_results['ari'].append(nolign_results['ari'])", "_____no_output_____" ], [ "data_str = 'Increasing' if use_sigmoid else 'Any'\nprint('SubLign-%s & %.2f $\\\\pm$ %.2f & %.2f $\\\\pm$ %.2f & %.2f $\\\\pm$ %.2f \\\\\\\\' % (\n data_str,\n np.mean(sublign_results['ari']), np.std(sublign_results['ari']),\n np.mean(sublign_results['pear']), np.std(sublign_results['pear']),\n np.mean(sublign_results['swaps']), np.std(sublign_results['swaps'])\n))\n\nprint('SubNoLign-%s & %.2f $\\\\pm$ %.2f & -- & -- \\\\\\\\' % (\n data_str,\n np.mean(sublign_results['ari']), np.std(sublign_results['ari']),\n))", "_____no_output_____" ], [ "results = model.score(train_data_dict, test_data_dict)\nprint('Sublign results: ARI: %.3f; Pear: %.3f; Swaps: %.3f' % (results['ari'],results['pear'],results['swaps']))\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc523111fda37b7818fa3641e119494cf36c139
5,647
ipynb
Jupyter Notebook
HIGH/SASH_2D.ipynb
peterm790/MASTERS
4c57b25252af138a40415697dc50dbc389f844b3
[ "MIT" ]
1
2021-08-04T09:59:37.000Z
2021-08-04T09:59:37.000Z
HIGH/SASH_2D.ipynb
peterm790/MASTERS
4c57b25252af138a40415697dc50dbc389f844b3
[ "MIT" ]
null
null
null
HIGH/SASH_2D.ipynb
peterm790/MASTERS
4c57b25252af138a40415697dc50dbc389f844b3
[ "MIT" ]
null
null
null
26.38785
130
0.493005
[ [ [ "import numpy as np\nimport pandas as pd\nimport xarray as xr\nimport zarr\nimport math\nimport glob\nimport pickle\nimport statistics\nimport scipy.stats as stats\nfrom sklearn.neighbors import KernelDensity\nimport dask\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs", "_____no_output_____" ], [ "def get_files():\n models = glob.glob(\"/terra/data/cmip5/global/historical/*\")\n avail={}\n for model in models:\n zg = glob.glob(str(model)+\"/r1i1p1/day/2deg/zg*\")\n try:\n test = zg[0]\n avail[model.split('/')[-1]] = zg\n except:\n pass\n return avail", "_____no_output_____" ], [ "files = get_files()", "_____no_output_____" ], [ "files['NOAA'] = glob.glob(\"/home/pmarsh/NOAA_2deg/NOAA_zg/*.nc\")\nfiles['ERA5'] = glob.glob(\"/home/pmarsh/NOAA_2deg/ERA5_zg/*.nc\")\nfiles.pop('MIROC-ESM')", "_____no_output_____" ], [ "def contourise(x):\n x = x.fillna(0)\n x = x.where((x>=limit))\n x = x/x\n return x", "_____no_output_____" ], [ "results={}\nfor model in files.keys():\n print(model)\n x = xr.open_mfdataset(files[model])\n if model == 'NOAA':\n x = x.rename({'hgt':'zg'})\n x = x.rename({'level':'plev'})\n x = x.sel(plev=850)\n x = x.sel(time=slice('1950','2005'))\n elif model == 'ERA5':\n x = x.rename({'level':'plev'})\n x = x.sel(plev=850)\n x = x.sel(time=slice('1979','2005'))\n else:\n x = x.sel(plev=85000)\n x = x.sel(time=slice('1950','2005'))\n x = x.load()\n x = x.sel(lat=slice(-60,0))\n x = x[['zg']]\n x = x.assign_coords(lon=(((x.lon + 180) % 360) - 180))\n with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n x = x.sortby(x.lon)\n x = x.sel(lon=slice(-50,20))\n x = x.resample(time=\"QS-DEC\").mean(dim=\"time\",skipna=True)\n x = x.load()\n limit = np.nanquantile(x.zg.values,0.9)\n results[model]={}\n for seas in ['DJF','MAM','JJA','SON']:\n mean_seas = x.where(x.time.dt.season==str(seas)).dropna(dim='time')\n mean_seas = contourise(mean_seas).zg.fillna(0).mean(dim='time')\n results[model][seas] = mean_seas.fillna(0)\n x.close()", "IPSL-CM5A-LR\n" ], [ "pickle.dump(results, open( \"../HIGH_OUT/SASH_track_2D.p\", \"wb\" ) )", "_____no_output_____" ], [ "weights = np.cos(np.deg2rad(results['NOAA']['DJF'].lat)) #area weighted", "_____no_output_____" ], [ "#mean absolute error calc\nscores=[]\nfor index in results:\n MAE={}\n for season in ['DJF','MAM','JJA','SON']:\n ref = results['NOAA'][season]\n x = results[index][season]\n MAE[season] = (np.abs(ref - x)).weighted(weights).sum(('lat','lon'))\n scores.append([index,np.mean(MAE['DJF'].values + MAE['MAM'].values + MAE['JJA'].values + MAE['SON'].values)])", "_____no_output_____" ], [ "resultsdf = pd.DataFrame(np.array(scores),columns=['model','score'])\nresultsdf = resultsdf.sort_values('score').set_index('model')['score']", "_____no_output_____" ], [ "pickle.dump( resultsdf, open( \"../HIGH_OUT/scores_2D.p\", \"wb\" ) )\nresultsdf.to_csv(\"../HIGH_OUT/scores_2D.csv\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc530acb2bbfeffd93635a90ebe019ade10b9e2
77,979
ipynb
Jupyter Notebook
notebooks/sequencing_kos.ipynb
mgalardini/2018koyeast
8b82c567c3dfbfa3c1571911fe6b8bd59a681105
[ "Apache-2.0" ]
null
null
null
notebooks/sequencing_kos.ipynb
mgalardini/2018koyeast
8b82c567c3dfbfa3c1571911fe6b8bd59a681105
[ "Apache-2.0" ]
null
null
null
notebooks/sequencing_kos.ipynb
mgalardini/2018koyeast
8b82c567c3dfbfa3c1571911fe6b8bd59a681105
[ "Apache-2.0" ]
1
2019-01-16T13:22:11.000Z
2019-01-16T13:22:11.000Z
193.496278
49,652
0.892138
[ [ [ "kos = '../out/rev_sequencing_kos.tsv'", "_____no_output_____" ], [ "%matplotlib inline\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_style('white')\n\nplt.rc('font', size=12)", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "d = {'S288C': 'S288C',\n 'Y55': 'Y55',\n 'UWOPS87': 'UWOP',\n 'YPS606': 'YPS'}", "_____no_output_____" ], [ "inter = pd.read_csv(kos, sep='\\t')", "_____no_output_____" ], [ "inter['size'] = inter['stop'] - inter['start']", "_____no_output_____" ], [ "inter = inter[inter['size'] > 100]", "_____no_output_____" ], [ "ci = inter.groupby(['target', 'strain'])['set'].nunique()\nci = ci[ci == 2]", "_____no_output_____" ], [ "inter = inter.set_index(['target', 'strain']).loc[ci.index].reset_index()", "_____no_output_____" ], [ "niter = inter.groupby(['set', 'strain', 'gene'])['sample'].count().loc['new']\noiter = inter.groupby(['set', 'strain', 'gene'])['sample'].count().loc['original']", "_____no_output_____" ], [ "m = niter.to_frame().join(oiter.to_frame(),\n how='outer',\n lsuffix='_new',\n rsuffix='_original')\nm[np.isnan(m)] = 0.0", "_____no_output_____" ], [ "plt.figure(figsize=(4, 4))\n\nplt.plot(m['sample_original'],\n m['sample_new'],\n 'k.',\n alpha=0.3)\nplt.plot([-0.5, 13],\n [-0.5, 13],\n '--',\n color='grey',\n alpha=0.5)\n\nplt.xlabel('Number of genes with no coverage\\n(Original mutants)')\nplt.ylabel('Number of genes with no coverage\\n(New mutants)')\n\nplt.title('All strains')\n\nplt.xlim(-0.5, 13)\nplt.ylim(-0.5, 13)\n\nplt.savefig('ko_sequencing.png',\n dpi=300, bbox_inches='tight',\n transparent=True)\nplt.savefig('ko_sequencing.svg',\n dpi=300, bbox_inches='tight',\n transparent=True);", "_____no_output_____" ], [ "plt.figure(figsize=(8, 8))\n\nfor i, strain in enumerate(['S288C',\n 'Y55',\n 'YPS606',\n 'UWOPS87']):\n plt.subplot(2, 2, i+1)\n plt.plot(m.loc[strain]['sample_original'],\n m.loc[strain]['sample_new'],\n 'k.',\n alpha=0.3,\n label='_')\n \n plt.xlabel('Number of genes with no coverage\\n(Original mutants)')\n plt.ylabel('Number of genes with no coverage\\n(New mutants)')\n\n plt.title(d[strain])\n \n plt.xlim(-0.5, 13)\n plt.ylim(-0.5, 13)\n \n plt.plot([-0.5, 13],\n [-0.5, 13],\n '--',\n color='grey',\n alpha=0.5)\n\nplt.tight_layout()\n\nplt.savefig('ko_sequencing_all.png',\n dpi=300, bbox_inches='tight',\n transparent=True)\nplt.savefig('ko_sequencing_all.svg',\n dpi=300, bbox_inches='tight',\n transparent=True);", "_____no_output_____" ], [ "g = None\nfor gene in ['URA3', 'CAN1', 'LYP1', 'LEU2', 'MET17']:\n x = inter[inter['name'].isin([gene])\n ].groupby(['strain', 'set'])['sample'\n ].nunique() / inter.groupby(['strain', 'set'])[\n 'sample'].nunique()\n x[np.isnan(x)] = 0.0\n x.name = gene\n if g is None:\n g = x.to_frame()\n else:\n g = g.join(x.to_frame(), how='outer')", "_____no_output_____" ], [ "g", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc544783e3cf153172151c38fe737eae2136421
568,105
ipynb
Jupyter Notebook
Development Indicators Project/python notebooks/AR_GDP_Growth.ipynb
autodidact-m/Projects
f4c0473adba42f3a629b62eb09d3b1df91982f46
[ "Apache-2.0" ]
null
null
null
Development Indicators Project/python notebooks/AR_GDP_Growth.ipynb
autodidact-m/Projects
f4c0473adba42f3a629b62eb09d3b1df91982f46
[ "Apache-2.0" ]
null
null
null
Development Indicators Project/python notebooks/AR_GDP_Growth.ipynb
autodidact-m/Projects
f4c0473adba42f3a629b62eb09d3b1df91982f46
[ "Apache-2.0" ]
null
null
null
303.474893
60,670
0.895834
[ [ [ "# Not completed. ", "_____no_output_____" ] ], [ [ "import json\nimport requests\nimport csv\nimport pandas as pd\nimport os\nimport matplotlib.pylab as plt\nimport numpy as np\n%matplotlib inline\npd.options.mode.chained_assignment = None\nfrom statsmodels.tsa.arima_model import ARIMA\nimport statsmodels.api as sm\nimport operator\nfrom statsmodels.tsa.stattools import acf \nfrom statsmodels.tsa.stattools import pacf\nfrom pandas.tools.plotting import autocorrelation_plot", "_____no_output_____" ], [ "dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')\nindicator_data = pd.read_csv('P:\\\\ADS\\\\Final\\\\Indicators_Cleaned.csv',header=0,parse_dates=True,index_col='Year',date_parser=dateparse, low_memory=False) \nindicator_data.head()", "_____no_output_____" ], [ "indicator_data.reset_index()\nindicator_data.head()", "_____no_output_____" ], [ "argentina_df_ind = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \\\n (indicator_data['CountryCode'] == 'AR')]", "_____no_output_____" ], [ "argentina_df_ind.index", "_____no_output_____" ], [ "argentina_df_ind", "_____no_output_____" ], [ "ts = argentina_df_ind['Value']\nts1 = argentina_df_ind[['Value']].copy()\nts1['Value']=ts1['Value']+20\nts1.head()", "_____no_output_____" ], [ "plt.plot(ts1)", "_____no_output_____" ], [ "from statsmodels.tsa.stattools import adfuller\ndef test_stationarity(timeseries):\n \n #Determing rolling statistics\n rolmean = pd.rolling_mean(timeseries, window=12)\n rolstd = pd.rolling_std(timeseries, window=12)\n\n #Plot rolling statistics:\n orig = plt.plot(timeseries, color='blue',label='Original')\n mean = plt.plot(rolmean, color='red', label='Rolling Mean')\n std = plt.plot(rolstd, color='black', label = 'Rolling Std')\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation')\n plt.show(block=False)\n \n #Perform Dickey-Fuller test:\n print ('Results of Dickey-Fuller Test:')\n dftest = adfuller(timeseries, autolag='AIC')\n dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])\n for key,value in dftest[4].items():\n dfoutput['Critical Value (%s)'%key] = value\n print (dfoutput)\ntest_stationarity(ts1.Value)", "C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n" ], [ "decomposition = sm.tsa.seasonal_decompose(ts1, model='additive')\nfig = decomposition.plot()\nplt.show()", "_____no_output_____" ], [ "def logTransform(df):\n ts_log = np.log(df)\n plt.plot(ts_log)\n return ts_log", "_____no_output_____" ], [ "ts1_log = logTransform(ts1)", "_____no_output_____" ], [ "#test_stationarity(ts1_log.Value)", "_____no_output_____" ], [ "def logFirstDifference(ts1_log):\n ts1_log_diff = ts1_log - ts1_log.shift()\n ts1_log_diff.dropna(inplace=True)\n return ts1_log_diff", "_____no_output_____" ], [ "ts1_log_diff = logFirstDifference(ts1_log)\ntest_stationarity(ts1_log_diff.Value)", "C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n" ], [ "def firstDifference(df):\n ts_first_diff = df - df.shift()\n ts_first_diff.dropna(inplace=True)\n return ts_first_diff", "_____no_output_____" ], [ "ts1_first_diff = firstDifference(ts1)\ntest_stationarity(ts1_first_diff.Value)", "C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n" ], [ "lag_acf = acf(ts1_log_diff, nlags=10)\nlag_pacf = pacf(ts1_log_diff, nlags=10, method='ols')", "_____no_output_____" ], [ "fig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(ts1_log_diff, lags=10, ax=ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(ts1_log_diff, lags=10, ax=ax2)", "_____no_output_____" ] ], [ [ "- As seen from the graph above both ACF and PACF are geometric hence this is an ARMA model", "_____no_output_____" ] ], [ [ "autocorrelation_plot(ts1_log_diff)\nplt.show()", "_____no_output_____" ], [ "plt.subplot(122)\nplt.plot(lag_pacf)\nplt.axhline(y=0,linestyle='--',color='gray')\nplt.axhline(y=-1.96/np.sqrt(len(ts1_log_diff)),linestyle='--',color='gray')\nplt.axhline(y=1.96/np.sqrt(len(ts1_log_diff)),linestyle='--',color='gray')\nplt.title('Partial Autocorrelation Function')\nplt.tight_layout()", "_____no_output_____" ], [ "aic_metric = pd.DataFrame({'Modelname':[],'AIC':[]})\naic_dict = {}", "_____no_output_____" ], [ "def cal_aic_metric(modelname,model):\n global aic_metric\n AIC = model.aic\n aic_dict[modelname] = AIC\n df_error = pd.DataFrame({'Modelname':[modelname],'AIC':[AIC]})\n aic_metric = pd.concat([aic_metric,df_error])\n return aic_metric", "_____no_output_____" ], [ "def AR_Model(ts):\n model = ARIMA(ts, order=(1, 1, 0))\n results_AR = model.fit(disp=0)\n cal_aic_metric('ARIMA(ts, order=(1, 0, 0))',results_AR)\n print('Lag: %s' % results_AR.k_ar)\n print('Coefficients: %s' % results_AR.params)\n #print(results_AR.summary())\n predict_MA_HPI = np.exp(results_AR.predict(10, 10, dynamic=True))\n print(predict_MA_HPI)\n plt.plot(ts1_log)\n plt.plot(results_AR.fittedvalues, color='red')\n #print(np.exp(results_AR.fittedvalues))\n print(results_AR.aic)\n return results_AR", "_____no_output_____" ], [ "model_AR = AR_Model(ts1_log_diff)", "Lag: 1\nCoefficients: const 0.000427\nar.L1.D.Value -0.519326\ndtype: float64\n2005-01-01 1.122225\nFreq: -1AS-JAN, dtype: float64\n90.86254418330691\n" ], [ "def MA_Model(ts):\n model = ARIMA(ts, order=(0,1, 1)) \n results_MA = model.fit(disp=0)\n cal_aic_metric('ARIMA(ts, order=(2, 1, 2))',results_MA)\n print('Lag: %s' % results_MA.k_ar)\n print('Coefficients: %s' % results_MA.params)\n print(results_MA.summary())\n plt.plot(ts)\n plt.plot(results_MA.fittedvalues, color='red')\n return results_MA", "_____no_output_____" ], [ "model_MA = MA_Model(ts1_log_diff)", "Lag: 0\nCoefficients: const -0.000116\nma.L1.D.Value -0.999947\ndtype: float64\n ARIMA Model Results \n==============================================================================\nDep. Variable: D.Value No. Observations: 55\nModel: ARIMA(0, 1, 1) Log Likelihood -26.240\nMethod: css-mle S.D. of innovations 0.376\nDate: Mon, 14 Aug 2017 AIC 58.480\nTime: 23:32:36 BIC 64.502\nSample: 01-01-2014 HQIC 60.809\n - 01-01-1960 \n=================================================================================\n coef std err z P>|z| [95.0% Conf. Int.]\n---------------------------------------------------------------------------------\nconst -0.0001 0.003 -0.037 0.970 -0.006 0.006\nma.L1.D.Value -0.9999 0.045 -22.097 0.000 -1.089 -0.911\n Roots \n=============================================================================\n Real Imaginary Modulus Frequency\n-----------------------------------------------------------------------------\nMA.1 1.0001 +0.0000j 1.0001 0.0000\n-----------------------------------------------------------------------------\n" ], [ "def Combined_Model(ts):\n model = ARIMA(ts, order=(2, 1, 1)) \n results_ARIMA = model.fit(disp=0)\n cal_aic_metric('ARIMA(ts, order=(2,1, 3))',results_ARIMA)\n print('Lag: %s' % results_ARIMA.k_ar)\n print('Coefficients: %s' % results_ARIMA.params)\n print(results_ARIMA.summary())\n plt.plot(ts)\n plt.plot(results_ARIMA.fittedvalues, color='red')\n return results_ARIMA", "_____no_output_____" ], [ "model_Combined = Combined_Model(ts1_log_diff)", "Lag: 2\nCoefficients: const -0.000129\nar.L1.D.Value -0.458611\nar.L2.D.Value -0.408477\nma.L1.D.Value -0.999925\ndtype: float64\n ARIMA Model Results \n==============================================================================\nDep. Variable: D.Value No. Observations: 55\nModel: ARIMA(2, 1, 1) Log Likelihood -18.273\nMethod: css-mle S.D. of innovations 0.320\nDate: Mon, 14 Aug 2017 AIC 46.546\nTime: 23:32:44 BIC 56.582\nSample: 01-01-2014 HQIC 50.427\n - 01-01-1960 \n=================================================================================\n coef std err z P>|z| [95.0% Conf. Int.]\n---------------------------------------------------------------------------------\nconst -0.0001 0.001 -0.088 0.930 -0.003 0.003\nar.L1.D.Value -0.4586 0.122 -3.761 0.000 -0.698 -0.220\nar.L2.D.Value -0.4085 0.121 -3.380 0.001 -0.645 -0.172\nma.L1.D.Value -0.9999 0.048 -20.802 0.000 -1.094 -0.906\n Roots \n=============================================================================\n Real Imaginary Modulus Frequency\n-----------------------------------------------------------------------------\nAR.1 -0.5614 -1.4605j 1.5646 -0.3084\nAR.2 -0.5614 +1.4605j 1.5646 0.3084\nMA.1 1.0001 +0.0000j 1.0001 0.0000\n-----------------------------------------------------------------------------\n" ], [ "best_model = min(aic_dict.items(),key=operator.itemgetter(1))[0]\nprint('Best Model is ', best_model)", "Best Model is ARIMA(ts, order=(2,1, 3))\n" ], [ "aic_metric", "_____no_output_____" ], [ "#Forecast using Best Model\ndef forecast(model,numSteps):\n #model.forecast(steps=numSteps)\n output = model.forecast(steps=numSteps)[0]\n output.tolist()\n output = np.exp(output)\n #print(output)\n return normal(output)", "_____no_output_____" ], [ "def forC(n):\n output_forecast = forecast(model_Combined,57)\n return output_forecast[:n]\n\nforC(57)", "_____no_output_____" ], [ "def FittedValues(model):\n fittedVal=model.fittedvalues\n PredictedVal=np.exp(fittedVal)\n np.savetxt('PredictedValues.csv', PredictedVal, delimiter=\",\")\n print('Predicted existing values are:')\n return PredictedVal", "_____no_output_____" ] ], [ [ "# Taking it to normal scale", "_____no_output_____" ] ], [ [ "def normal(predictions_ARIMA_diff):\n #predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)\n predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()\n predictions_ARIMA_log = pd.Series(ts1_log.ix[0], index=ts1_log.index)\n #print(predictions_ARIMA_diff_cumsum.shape,\" \",predictions_ARIMA_log.shape)\n predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)\n #predictions_ARIMA = np.exp(predictions_ARIMA_log)\n predictions_ARIMA_log = predictions_ARIMA_log -20\n return predictions_ARIMA_log", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cbc55ef093ebe6d07a8a5cdae4ee6439872566e0
27,366
ipynb
Jupyter Notebook
notebooks/if_statements.ipynb
dlivingstone/IntroToPython
4ba2bfe43d75043cf6857f6d45df4833801b087e
[ "MIT" ]
null
null
null
notebooks/if_statements.ipynb
dlivingstone/IntroToPython
4ba2bfe43d75043cf6857f6d45df4833801b087e
[ "MIT" ]
null
null
null
notebooks/if_statements.ipynb
dlivingstone/IntroToPython
4ba2bfe43d75043cf6857f6d45df4833801b087e
[ "MIT" ]
null
null
null
30.039517
456
0.567346
[ [ [ "If Statements\n===\nBy allowing you to respond selectively to different situations and conditions, if statements open up whole new possibilities for your programs. In this section, you will learn how to test for certain conditions, and then respond in appropriate ways to those conditions.", "_____no_output_____" ], [ "[Previous: Introducing Functions](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/introducing_functions.ipynb) | \n[Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) |\n[Next: While Loops and Input](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/while_input.ipynb)", "_____no_output_____" ], [ "Contents\n===\n- [What is an *if* statement?](#What-is-an-*if*-statement?)\n - [Example](#Example)\n- [Logical tests](#Logical-tests)\n - [Equality](#Equality)\n - [Inequality](#Inequality)\n - [Other inequalities](#Other-inequalities)\n - [Checking if an item is in a list](#Checking-if-an-item-is-in-a-list)\n - [Exercises](#Exercises-logical)\n- [The if-elif...else chain](#The-if-elif...else-chain)\n - [Simple if statements](#Simple-if-statements)\n - [if-else statements](#if-else-statements)\n - [if-elif...else chains](#if-elif...else-chains)\n - [Exercises](#Exercises-elif)\n- [More than one passing test](#More-than-one-passing-test)\n- [True and False values](#True-and-False-values)\n- [Overall Challenges](#Overall-Challenges)", "_____no_output_____" ], [ "What is an *if* statement?\n===\nAn *if* statement tests for a condition, and then responds to that condition. If the condition is true, then whatever action is listed next gets carried out. You can test for multiple conditions at the same time, and respond appropriately to each condition.\n\nExample\n---\nHere is an example that shows a number of the desserts I like. It lists those desserts, but lets you know which one is my favorite.", "_____no_output_____" ] ], [ [ "# A list of desserts I like.\ndesserts = ['ice cream', 'chocolate', 'rhubarb crisp', 'cookies']\nfavorite_dessert = 'apple crisp'\n\n# Print the desserts out, but let everyone know my favorite dessert.\nfor dessert in desserts:\n if dessert == favorite_dessert:\n # This dessert is my favorite, let's let everyone know!\n print(\"%s is my favorite dessert!\" % dessert.title())\n else:\n # I like these desserts, but they are not my favorite.\n print(\"I like %s.\" % dessert)", "_____no_output_____" ] ], [ [ "#### What happens in this program?\n\n- The program starts out with a list of desserts, and one dessert is identified as a favorite.\n- The for loop runs through all the desserts.\n- Inside the for loop, each item in the list is tested.\n - If the current value of *dessert* is equal to the value of *favorite_dessert*, a message is printed that this is my favorite.\n - If the current value of *dessert* is not equal to the value of *favorite_dessert*, a message is printed that I just like the dessert.\n \nYou can test as many conditions as you want in an if statement, as you will see in a little bit.", "_____no_output_____" ], [ "Logical Tests\n===\nEvery if statement evaluates to *True* or *False*. *True* and *False* are Python keywords, which have special meanings attached to them. You can test for the following conditions in your if statements:\n\n- [equality](#equality) (==)\n- [inequality](#inequality) (!=)\n- [other inequalities](#other_inequalities)\n - greater than (>)\n - greater than or equal to (>=)\n - less than (<)\n - less than or equal to (<=)\n- [You can test if an item is **in** a list.](#in_list)\n\n### Whitespace\nRemember [learning about](http://introtopython.org/lists_tuples.html#pep8) PEP 8? There is a [section of PEP 8](http://www.python.org/dev/peps/pep-0008/#other-recommendations) that tells us it's a good idea to put a single space on either side of all of these comparison operators. If you're not sure what this means, just follow the style of the examples you see below.", "_____no_output_____" ], [ "Equality\n---\nTwo items are *equal* if they have the same value. You can test for equality between numbers, strings, and a number of other objects which you will learn about later. Some of these results may be surprising, so take a careful look at the examples below.\n\nIn Python, as in many programming languages, two equals signs tests for equality.\n\n**Watch out!** Be careful of accidentally using one equals sign, which can really throw things off because that one equals sign actually sets your item to the value you are testing for!", "_____no_output_____" ] ], [ [ "5 == 5", "_____no_output_____" ], [ "3 == 5 ", "_____no_output_____" ], [ "5 == 5.0", "_____no_output_____" ], [ "'eric' == 'eric'", "_____no_output_____" ], [ "'Eric' == 'eric'", "_____no_output_____" ], [ "'Eric'.lower() == 'eric'.lower()", "_____no_output_____" ], [ "'5' == 5", "_____no_output_____" ], [ "'5' == str(5)", "_____no_output_____" ] ], [ [ "Inequality\n---\nTwo items are *inequal* if they do not have the same value. In Python, we test for inequality using the exclamation point and one equals sign.\n\nSometimes you want to test for equality and if that fails, assume inequality. Sometimes it makes more sense to test for inequality directly.", "_____no_output_____" ] ], [ [ "3 != 5", "_____no_output_____" ], [ "5 != 5", "_____no_output_____" ], [ "'Eric' != 'eric'", "_____no_output_____" ] ], [ [ "Other Inequalities\n---\n### greater than", "_____no_output_____" ] ], [ [ "5 > 3", "_____no_output_____" ] ], [ [ "### greater than or equal to", "_____no_output_____" ] ], [ [ "5 >= 3", "_____no_output_____" ], [ "3 >= 3", "_____no_output_____" ] ], [ [ "### less than", "_____no_output_____" ] ], [ [ "3 < 5", "_____no_output_____" ] ], [ [ "### less than or equal to", "_____no_output_____" ] ], [ [ "3 <= 5", "_____no_output_____" ], [ "3 <= 3", "_____no_output_____" ] ], [ [ "Checking if an item is **in** a list\n---\nYou can check if an item is in a list using the **in** keyword.", "_____no_output_____" ] ], [ [ "vowels = ['a', 'e', 'i', 'o', 'u']\n'a' in vowels", "_____no_output_____" ], [ "vowels = ['a', 'e', 'i', 'o', 'u']\n'b' in vowels", "_____no_output_____" ] ], [ [ "<a id=\"Exercises-logical\"></a>\nExercises\n---\n#### True and False\n- Write a program that consists of at least ten lines, each of which has a logical statement on it. The output of your program should be 5 **True**s and 5 **False**s.\n- Note: You will probably need to write `print(5 > 3)`, not just `5 > 3`.", "_____no_output_____" ], [ "The if-elif...else chain\n===\nYou can test whatever series of conditions you want to, and you can test your conditions in any combination you want.", "_____no_output_____" ], [ "Simple if statements\n---\nThe simplest test has a single **if** statement, and a single statement to execute if the condition is **True**.", "_____no_output_____" ] ], [ [ "dogs = ['willie', 'hootz', 'peso', 'juno']\n\nif len(dogs) > 3:\n print(\"Wow, we have a lot of dogs here!\")", "_____no_output_____" ] ], [ [ "In this situation, nothing happens if the test does not pass.", "_____no_output_____" ] ], [ [ "###highlight=[2]\ndogs = ['willie', 'hootz']\n\nif len(dogs) > 3:\n print(\"Wow, we have a lot of dogs here!\")", "_____no_output_____" ] ], [ [ "Notice that there are no errors. The condition `len(dogs) > 3` evaluates to False, and the program moves on to any lines after the **if** block.", "_____no_output_____" ], [ "if-else statements\n---\nMany times you will want to respond in two possible ways to a test. If the test evaluates to **True**, you will want to do one thing. If the test evaluates to **False**, you will want to do something else. The **if-else** structure lets you do that easily. Here's what it looks like:", "_____no_output_____" ] ], [ [ "dogs = ['willie', 'hootz', 'peso', 'juno']\n\nif len(dogs) > 3:\n print(\"Wow, we have a lot of dogs here!\")\nelse:\n print(\"Okay, this is a reasonable number of dogs.\")", "_____no_output_____" ] ], [ [ "Our results have not changed in this case, because if the test evaluates to **True** only the statements under the **if** statement are executed. The statements under **else** area only executed if the test fails:", "_____no_output_____" ] ], [ [ "###highlight=[2]\ndogs = ['willie', 'hootz']\n\nif len(dogs) > 3:\n print(\"Wow, we have a lot of dogs here!\")\nelse:\n print(\"Okay, this is a reasonable number of dogs.\")", "_____no_output_____" ] ], [ [ "The test evaluated to **False**, so only the statement under `else` is run.", "_____no_output_____" ], [ "if-elif...else chains\n---\nMany times, you will want to test a series of conditions, rather than just an either-or situation. You can do this with a series of if-elif-else statements\n\nThere is no limit to how many conditions you can test. You always need one if statement to start the chain, and you can never have more than one else statement. But you can have as many elif statements as you want.", "_____no_output_____" ] ], [ [ "dogs = ['willie', 'hootz', 'peso', 'monty', 'juno', 'turkey']\n\nif len(dogs) >= 5:\n print(\"Holy mackerel, we might as well start a dog hostel!\")\nelif len(dogs) >= 3:\n print(\"Wow, we have a lot of dogs here!\")\nelse:\n print(\"Okay, this is a reasonable number of dogs.\")", "_____no_output_____" ] ], [ [ "It is important to note that in situations like this, only the first test is evaluated. In an if-elif-else chain, once a test passes the rest of the conditions are ignored.", "_____no_output_____" ] ], [ [ "###highlight=[2]\ndogs = ['willie', 'hootz', 'peso', 'monty']\n\nif len(dogs) >= 5:\n print(\"Holy mackerel, we might as well start a dog hostel!\")\nelif len(dogs) >= 3:\n print(\"Wow, we have a lot of dogs here!\")\nelse:\n print(\"Okay, this is a reasonable number of dogs.\")", "_____no_output_____" ] ], [ [ "The first test failed, so Python evaluated the second test. That test passed, so the statement corresponding to `len(dogs) >= 3` is executed.", "_____no_output_____" ] ], [ [ "###highlight=[2]\ndogs = ['willie', 'hootz']\n\nif len(dogs) >= 5:\n print(\"Holy mackerel, we might as well start a dog hostel!\")\nelif len(dogs) >= 3:\n print(\"Wow, we have a lot of dogs here!\")\nelse:\n print(\"Okay, this is a reasonable number of dogs.\")", "_____no_output_____" ] ], [ [ "In this situation, the first two tests fail, so the statement in the else clause is executed. Note that this statement would be executed even if there are no dogs at all:", "_____no_output_____" ] ], [ [ "###highlight=[2]\ndogs = []\n\nif len(dogs) >= 5:\n print(\"Holy mackerel, we might as well start a dog hostel!\")\nelif len(dogs) >= 3:\n print(\"Wow, we have a lot of dogs here!\")\nelse:\n print(\"Okay, this is a reasonable number of dogs.\")", "_____no_output_____" ] ], [ [ "Note that you don't have to take any action at all when you start a series of if statements. You could simply do nothing in the situation that there are no dogs by replacing the `else` clause with another `elif` clause:", "_____no_output_____" ] ], [ [ "###highlight=[8]\ndogs = []\n\nif len(dogs) >= 5:\n print(\"Holy mackerel, we might as well start a dog hostel!\")\nelif len(dogs) >= 3:\n print(\"Wow, we have a lot of dogs here!\")\nelif len(dogs) >= 1:\n print(\"Okay, this is a reasonable number of dogs.\")", "_____no_output_____" ] ], [ [ "In this case, we only print a message if there is at least one dog present. Of course, you could add a new `else` clause to respond to the situation in which there are no dogs at all:", "_____no_output_____" ] ], [ [ "###highlight=[10,11]\ndogs = []\n\nif len(dogs) >= 5:\n print(\"Holy mackerel, we might as well start a dog hostel!\")\nelif len(dogs) >= 3:\n print(\"Wow, we have a lot of dogs here!\")\nelif len(dogs) >= 1:\n print(\"Okay, this is a reasonable number of dogs.\")\nelse:\n print(\"I wish we had a dog here.\")", "_____no_output_____" ] ], [ [ "As you can see, the if-elif-else chain lets you respond in very specific ways to any given situation.", "_____no_output_____" ], [ "<a id=\"Exercises-elif\"></a>\nExercises\n---\n#### Three is a Crowd\n- Make a list of names that includes at least four people.\n- Write an if test that prints a message about the room being crowded, if there are more than three people in your list.\n- Modify your list so that there are only two people in it. Use one of the methods for removing people from the list, don't just redefine the list.\n- Run your if test again. There should be no output this time, because there are less than three people in the list.\n- **Bonus:** Store your if test in a function called something like `crowd_test`.\n\n#### Three is a Crowd - Part 2\n- Save your program from *Three is a Crowd* under a new name.\n- Add an `else` statement to your if tests. If the `else` statement is run, have it print a message that the room is not very crowded.\n\n#### Six is a Mob\n- Save your program from *Three is a Crowd - Part 2* under a new name.\n- Add some names to your list, so that there are at least six people in the list.\n- Modify your tests so that\n - If there are more than 5 people, a message is printed about there being a mob in the room.\n - If there are 3-5 people, a message is printed about the room being crowded.\n - If there are 1 or 2 people, a message is printed about the room not being crowded.\n - If there are no people in the room, a message is printed abou the room being empty.", "_____no_output_____" ], [ "More than one passing test\n===\nIn all of the examples we have seen so far, only one test can pass. As soon as the first test passes, the rest of the tests are ignored. This is really good, because it allows our code to run more efficiently. Many times only one condition can be true, so testing every condition after one passes would be meaningless.\n\nThere are situations in which you want to run a series of tests, where every single test runs. These are situations where any or all of the tests could pass, and you want to respond to each passing test. Consider the following example, where we want to greet each dog that is present:", "_____no_output_____" ] ], [ [ "dogs = ['willie', 'hootz']\n\nif 'willie' in dogs:\n print(\"Hello, Willie!\")\nif 'hootz' in dogs:\n print(\"Hello, Hootz!\")\nif 'peso' in dogs:\n print(\"Hello, Peso!\")\nif 'monty' in dogs:\n print(\"Hello, Monty!\")", "_____no_output_____" ] ], [ [ "If we had done this using an if-elif-else chain, only the first dog that is present would be greeted:", "_____no_output_____" ] ], [ [ "###highlight=[6,7,8,9,10,11]\ndogs = ['willie', 'hootz']\n\nif 'willie' in dogs:\n print(\"Hello, Willie!\")\nelif 'hootz' in dogs:\n print(\"Hello, Hootz!\")\nelif 'peso' in dogs:\n print(\"Hello, Peso!\")\nelif 'monty' in dogs:\n print(\"Hello, Monty!\")", "_____no_output_____" ] ], [ [ "Of course, this could be written much more cleanly using lists and for loops. See if you can follow this code.", "_____no_output_____" ] ], [ [ "dogs_we_know = ['willie', 'hootz', 'peso', 'monty', 'juno', 'turkey']\ndogs_present = ['willie', 'hootz']\n\n# Go through all the dogs that are present, and greet the dogs we know.\nfor dog in dogs_present:\n if dog in dogs_we_know:\n print(\"Hello, %s!\" % dog.title())", "_____no_output_____" ] ], [ [ "This is the kind of code you should be aiming to write. It is fine to come up with code that is less efficient at first. When you notice yourself writing the same kind of code repeatedly in one program, look to see if you can use a loop or a function to make your code more efficient.", "_____no_output_____" ], [ "True and False values\n===\nEvery value can be evaluated as True or False. The general rule is that any non-zero or non-empty value will evaluate to True. If you are ever unsure, you can open a Python terminal and write two lines to find out if the value you are considering is True or False. Take a look at the following examples, keep them in mind, and test any value you are curious about. I am using a slightly longer test just to make sure something gets printed each time.", "_____no_output_____" ] ], [ [ "if 0:\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ], [ "if 1:\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ], [ "# Arbitrary non-zero numbers evaluate to True.\nif 1253756:\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ], [ "# Negative numbers are not zero, so they evaluate to True.\nif -1:\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ], [ "# An empty string evaluates to False.\nif '':\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ], [ "# Any other string, including a space, evaluates to True.\nif ' ':\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ], [ "# Any other string, including a space, evaluates to True.\nif 'hello':\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ], [ "# None is a special object in Python. It evaluates to False.\nif None:\n print(\"This evaluates to True.\")\nelse:\n print(\"This evaluates to False.\")", "_____no_output_____" ] ], [ [ "Overall Challenges\n===\n#### Alien Points\n- Make a list of ten aliens, each of which is one color: 'red', 'green', or 'blue'.\n - You can shorten this to 'r', 'g', and 'b' if you want, but if you choose this option you have to include a comment explaining what r, g, and b stand for.\n- Red aliens are worth 5 points, green aliens are worth 10 points, and blue aliens are worth 20 points.\n- Use a for loop to determine the number of points a player would earn for destroying all of the aliens in your list.\n- [hint](#hint_alien_points)", "_____no_output_____" ], [ "- - -\n[Previous: Introducing Functions](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/introducing_functions.ipynb) | \n[Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) |\n[Next: While Loops and Input](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/while_input.ipynb)", "_____no_output_____" ], [ "Hints\n===\nThese are placed at the bottom, so you can have a chance to solve exercises without seeing any hints.\n\n#### Alien Invaders\n- After you define your list of aliens, set a variable called `current_score` or `current_points` equal to 0.\n- Inside your for loop, write a series of if tests to determine how many points to add to the current score.\n- To keep a running total, use the syntax `current_score = current_score + points`, where *points* is the number of points for the current alien.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cbc5609fe623bb3c603aaefb688c228b9c0b419d
295,860
ipynb
Jupyter Notebook
Mini_Project_Logistic_Regression.ipynb
twillstw/Springboard-Projects
e0c6f151b7861679a3887d4b90cb84f5f58abc21
[ "MIT" ]
null
null
null
Mini_Project_Logistic_Regression.ipynb
twillstw/Springboard-Projects
e0c6f151b7861679a3887d4b90cb84f5f58abc21
[ "MIT" ]
null
null
null
Mini_Project_Logistic_Regression.ipynb
twillstw/Springboard-Projects
e0c6f151b7861679a3887d4b90cb84f5f58abc21
[ "MIT" ]
null
null
null
224.47648
78,352
0.889059
[ [ [ "# Classification\n$$\n\\renewcommand{\\like}{{\\cal L}}\n\\renewcommand{\\loglike}{{\\ell}}\n\\renewcommand{\\err}{{\\cal E}}\n\\renewcommand{\\dat}{{\\cal D}}\n\\renewcommand{\\hyp}{{\\cal H}}\n\\renewcommand{\\Ex}[2]{E_{#1}[#2]}\n\\renewcommand{\\x}{{\\mathbf x}}\n\\renewcommand{\\v}[1]{{\\mathbf #1}}\n$$", "_____no_output_____" ], [ "**Note:** We've adapted this Mini Project from [Lab 5 in the CS109](https://github.com/cs109/2015lab5) course. Please feel free to check out the original lab, both for more exercises, as well as solutions.", "_____no_output_____" ], [ "We turn our attention to **classification**. Classification tries to predict, which of a small set of classes, an observation belongs to. Mathematically, the aim is to find $y$, a **label** based on knowing a feature vector $\\x$. For instance, consider predicting gender from seeing a person's face, something we do fairly well as humans. To have a machine do this well, we would typically feed the machine a bunch of images of people which have been labelled \"male\" or \"female\" (the training set), and have it learn the gender of the person in the image from the labels and the *features* used to determine gender. Then, given a new photo, the trained algorithm returns us the gender of the person in the photo.\n\nThere are different ways of making classifications. One idea is shown schematically in the image below, where we find a line that divides \"things\" of two different types in a 2-dimensional feature space. The classification show in the figure below is an example of a maximum-margin classifier where construct a decision boundary that is far as possible away from both classes of points. The fact that a line can be drawn to separate the two classes makes the problem *linearly separable*. Support Vector Machines (SVM) are an example of a maximum-margin classifier.\n\n![Splitting using a single line](images/onelinesplit.png)\n\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport scipy as sp\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\nimport pandas as pd\npd.set_option('display.width', 500)\npd.set_option('display.max_columns', 100)\npd.set_option('display.notebook_repr_html', True)\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\nsns.set_context(\"poster\")\nimport sklearn.model_selection\n\nc0=sns.color_palette()[0]\nc1=sns.color_palette()[1]\nc2=sns.color_palette()[2]\n\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\ncm = plt.cm.RdBu\ncm_bright = ListedColormap(['#FF0000', '#0000FF'])\n\ndef points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=True, colorscale=cmap_light, \n cdiscrete=cmap_bold, alpha=0.1, psize=10, zfunc=False, predicted=False):\n h = .02\n X=np.concatenate((Xtr, Xte))\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),\n np.linspace(y_min, y_max, 100))\n\n #plt.figure(figsize=(10,6))\n if zfunc:\n p0 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]\n p1 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n Z=zfunc(p0, p1)\n else:\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n ZZ = Z.reshape(xx.shape)\n if mesh:\n plt.pcolormesh(xx, yy, ZZ, cmap=cmap_light, alpha=alpha, axes=ax)\n if predicted:\n showtr = clf.predict(Xtr)\n showte = clf.predict(Xte)\n else:\n showtr = ytr\n showte = yte\n ax.scatter(Xtr[:, 0], Xtr[:, 1], c=showtr-1, cmap=cmap_bold, \n s=psize, alpha=alpha,edgecolor=\"k\")\n # and testing points\n ax.scatter(Xte[:, 0], Xte[:, 1], c=showte-1, cmap=cmap_bold, \n alpha=alpha, marker=\"s\", s=psize+10)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n return ax,xx,yy\n\ndef points_plot_prob(ax, Xtr, Xte, ytr, yte, clf, colorscale=cmap_light, \n cdiscrete=cmap_bold, ccolor=cm, psize=10, alpha=0.1):\n ax,xx,yy = points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=False, \n colorscale=colorscale, cdiscrete=cdiscrete, \n psize=psize, alpha=alpha, predicted=True) \n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=ccolor, alpha=.2, axes=ax)\n cs2 = plt.contour(xx, yy, Z, cmap=ccolor, alpha=.6, axes=ax)\n plt.clabel(cs2, fmt = '%2.1f', colors = 'k', fontsize=14, axes=ax)\n return ax ", "_____no_output_____" ] ], [ [ "## A Motivating Example Using `sklearn`: Heights and Weights", "_____no_output_____" ], [ "We'll use a dataset of heights and weights of males and females to hone our understanding of classifiers. We load the data into a dataframe and plot it.", "_____no_output_____" ] ], [ [ "dflog = pd.read_csv(\"data/01_heights_weights_genders.csv\")\ndflog.head()", "_____no_output_____" ] ], [ [ "Remember that the form of data we will use always is\n\n![dataform](images/data.png)\n\nwith the \"response\" or \"label\" $y$ as a plain array of 0s and 1s for binary classification. Sometimes we will also see -1 and +1 instead. There are also *multiclass* classifiers that can assign an observation to one of $K > 2$ classes and the label may then be an integer, but we will not be discussing those here.\n\n`y = [1,1,0,0,0,1,0,1,0....]`.", "_____no_output_____" ], [ "<div class=\"span5 alert alert-info\">\n<h3>Checkup Exercise Set I</h3>\n\n<ul>\n <li> <b>Exercise:</b> Create a scatter plot of Weight vs. Height\n <li> <b>Exercise:</b> Color the points differently by Gender\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "# your turn\n_ = sns.scatterplot('Height', 'Weight', data=dflog, hue='Gender', alpha=0.3, legend='brief')\n_ = plt.legend(loc='lower right', fontsize=14)\nplt.show()", "_____no_output_____" ] ], [ [ "### Training and Test Datasets\n\nWhen fitting models, we would like to ensure two things:\n\n* We have found the best model (in terms of model parameters).\n* The model is highly likely to generalize i.e. perform well on unseen data.\n\n<br/>\n<div class=\"span5 alert alert-success\">\n<h4>Purpose of splitting data into Training/testing sets</h4>\n<ul>\n <li> We built our model with the requirement that the model fit the data well. </li>\n <li> As a side-effect, the model will fit <b>THIS</b> dataset well. What about new data? </li>\n <ul>\n <li> We wanted the model for predictions, right?</li>\n </ul>\n <li> One simple solution, leave out some data (for <b>testing</b>) and <b>train</b> the model on the rest </li>\n <li> This also leads directly to the idea of cross-validation, next section. </li> \n</ul>\n</div>", "_____no_output_____" ], [ "First, we try a basic Logistic Regression:\n\n* Split the data into a training and test (hold-out) set\n* Train on the training set, and test for accuracy on the testing set", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\n# Split the data into a training and test set.\nXlr, Xtestlr, ylr, ytestlr = train_test_split(dflog[['Height','Weight']].values, \n (dflog.Gender == \"Male\").values,random_state=5)\n\nclf = LogisticRegression(solver='lbfgs')\n# Fit the model on the trainng data.\nclf.fit(Xlr, ylr)\n# Print the accuracy from the testing data.\nprint(accuracy_score(clf.predict(Xtestlr), ytestlr))", "0.9252\n" ] ], [ [ "### Tuning the Model", "_____no_output_____" ], [ "The model has some hyperparameters we can tune for hopefully better performance. For tuning the parameters of your model, you will use a mix of *cross-validation* and *grid search*. In Logistic Regression, the most important parameter to tune is the *regularization parameter* `C`. Note that the regularization parameter is not always part of the logistic regression model. \n\nThe regularization parameter is used to control for unlikely high regression coefficients, and in other cases can be used when data is sparse, as a method of feature selection.\n\nYou will now implement some code to perform model tuning and selecting the regularization parameter $C$.", "_____no_output_____" ], [ "We use the following `cv_score` function to perform K-fold cross-validation and apply a scoring function to each test fold. In this incarnation we use accuracy score as the default scoring function.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import KFold\nfrom sklearn.metrics import accuracy_score\n\ndef cv_score(clf, x, y, score_func=accuracy_score):\n result = 0\n nfold = 5\n for train, test in KFold(nfold).split(x): # split data into train/test groups, 5 times\n clf.fit(x[train], y[train]) # fit\n result += score_func(clf.predict(x[test]), y[test]) # evaluate score function on held-out data\n return result / nfold # average", "_____no_output_____" ] ], [ [ "Below is an example of using the `cv_score` function for a basic logistic regression model without regularization.", "_____no_output_____" ] ], [ [ "clf = LogisticRegression(solver='lbfgs')\nscore = cv_score(clf, Xlr, ylr)\nprint(score)", "0.9172\n" ] ], [ [ "<div class=\"span5 alert alert-info\">\n<h3>Checkup Exercise Set II</h3>\n\n<b>Exercise:</b> Implement the following search procedure to find a good model\n<ul>\n<li> You are given a list of possible values of `C` below\n<li> For each C:\n <ol>\n <li> Create a logistic regression model with that value of C\n <li> Find the average score for this model using the `cv_score` function **only on the training set** `(Xlr, ylr)`\n </ol>\n<li> Pick the C with the highest average score\n</ul>\nYour goal is to find the best model parameters based *only* on the training set, without showing the model test set at all (which is why the test set is also called a *hold-out* set).\n</div>", "_____no_output_____" ] ], [ [ "#the grid of parameters to search over\nCs = [0.001, 0.1, 1, 10, 100]\n\n# your turn\nscores = []\nfor c in Cs:\n cv_clf = LogisticRegression(C=c, solver='lbfgs', random_state=8)\n scores.append(cv_score(cv_clf, Xlr, ylr))\n\n#compile respective scores into a data frame\nd = {'Cs': Cs, 'Scores': scores}\nscore_grid = pd.DataFrame.from_dict(d)\n\nscore_grid", "_____no_output_____" ] ], [ [ "<div class=\"span5 alert alert-info\">\n<h3>Checkup Exercise Set III</h3>\n**Exercise:** Now you want to estimate how this model will predict on unseen data in the following way:\n<ol>\n<li> Use the C you obtained from the procedure earlier and train a Logistic Regression on the training data\n<li> Calculate the accuracy on the test data\n</ol>\n\n<p>You may notice that this particular value of `C` may or may not do as well as simply running the default model on a random train-test split. </p>\n\n<ul>\n<li> Do you think that's a problem? \n<li> Why do we need to do this whole cross-validation and grid search stuff anyway?\n</ul>\n\n</div>", "_____no_output_____" ] ], [ [ "# your turn", "_____no_output_____" ] ], [ [ "According to the cross-validation exercise above, the scores hardly varied based on different values of *C*. For the current exercise, in order to try something other than the default, a c-value of 0.1 is used.", "_____no_output_____" ] ], [ [ "clf = LogisticRegression(C=0.1, solver='lbfgs')\n\n# Fit the model on the trainng data.\nclf.fit(Xlr, ylr)\n\n# Print the accuracy from the testing data.\nprint(accuracy_score(clf.predict(Xtestlr), ytestlr))", "0.9252\n" ] ], [ [ "As the cross-validation indicated, the accuracy score for this iteration is the same as running the default from before. That's not necessarily a problem, it just shows that this particular dataset is not overly affected by values of *C*. That doesn't mean that cross-validation is not useful.", "_____no_output_____" ], [ "### Black Box Grid Search in `sklearn`", "_____no_output_____" ], [ "Scikit-learn, as with many other Python packages, provides utilities to perform common operations so you do not have to do it manually. It is important to understand the mechanics of each operation, but at a certain point, you will want to use the utility instead to save time...", "_____no_output_____" ], [ "<div class=\"span5 alert alert-info\">\n<h3>Checkup Exercise Set IV</h3>\n\n<b>Exercise:</b> Use scikit-learn's [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.GridSearchCV.html) tool to perform cross validation and grid search. \n\n* Instead of writing your own loops above to iterate over the model parameters, can you use GridSearchCV to find the best model over the training set? \n* Does it give you the same best value of `C`?\n* How does this model you've obtained perform on the test set?</div>", "_____no_output_____" ] ], [ [ "# your turn\n\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = {'C': Cs}\n\ngrid_clf = LogisticRegression(solver='lbfgs')\n\nlog_cv = GridSearchCV(grid_clf, param_grid, cv=5, return_train_score=True)\n\nlog_cv.fit(Xlr, ylr)\n\nres = pd.DataFrame(log_cv.cv_results_)\nres = res.iloc[:, [4,6,7,8,9,10,11,13,14,15,16,17,18,19]]\nres", "_____no_output_____" ], [ "print('The best value of C is {}'.format(log_cv.best_params_))\nprint('The best test score is {}'.format(log_cv.best_score_))", "The best value of C is {'C': 1}\nThe best test score is 0.9168\n" ] ], [ [ "## A Walkthrough of the Math Behind Logistic Regression", "_____no_output_____" ], [ "### Setting up Some Demo Code", "_____no_output_____" ], [ "Let's first set some code up for classification that we will need for further discussion on the math. We first set up a function `cv_optimize` which takes a classifier `clf`, a grid of hyperparameters (such as a complexity parameter or regularization parameter) implemented as a dictionary `parameters`, a training set (as a samples x features array) `Xtrain`, and a set of labels `ytrain`. The code takes the traning set, splits it into `n_folds` parts, sets up `n_folds` folds, and carries out a cross-validation by splitting the training set into a training and validation section for each foldfor us. It prints the best value of the parameters, and retuens the best classifier to us.", "_____no_output_____" ] ], [ [ "def cv_optimize(clf, parameters, Xtrain, ytrain, n_folds=5):\n gs = sklearn.model_selection.GridSearchCV(clf, param_grid=parameters, cv=n_folds)\n gs.fit(Xtrain, ytrain)\n print(\"BEST PARAMS\", gs.best_params_)\n best = gs.best_estimator_\n return best", "_____no_output_____" ] ], [ [ "We then use this best classifier to fit the entire training set. This is done inside the `do_classify` function which takes a dataframe `indf` as input. It takes the columns in the list `featurenames` as the features used to train the classifier. The column `targetname` sets the target. The classification is done by setting those samples for which `targetname` has value `target1val` to the value 1, and all others to 0. We split the dataframe into 80% training and 20% testing by default, standardizing the dataset if desired. (Standardizing a data set involves scaling the data so that it has 0 mean and is described in units of its standard deviation. We then train the model on the training set using cross-validation. Having obtained the best classifier using `cv_optimize`, we retrain on the entire training set and calculate the training and testing accuracy, which we print. We return the split data and the trained classifier.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\ndef do_classify(clf, parameters, indf, featurenames, targetname, target1val, standardize=False, train_size=0.8):\n subdf=indf[featurenames]\n if standardize:\n subdfstd=(subdf - subdf.mean())/subdf.std()\n else:\n subdfstd=subdf\n X=subdfstd.values\n y=(indf[targetname].values==target1val)*1\n Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=train_size)\n clf = cv_optimize(clf, parameters, Xtrain, ytrain)\n clf=clf.fit(Xtrain, ytrain)\n training_accuracy = clf.score(Xtrain, ytrain)\n test_accuracy = clf.score(Xtest, ytest)\n print(\"Accuracy on training data: {:0.2f}\".format(training_accuracy))\n print(\"Accuracy on test data: {:0.2f}\".format(test_accuracy))\n return clf, Xtrain, ytrain, Xtest, ytest", "_____no_output_____" ] ], [ [ "## Logistic Regression: The Math", "_____no_output_____" ], [ "We could approach classification as linear regression, there the class, 0 or 1, is the target variable $y$. But this ignores the fact that our output $y$ is discrete valued, and futhermore, the $y$ predicted by linear regression will in general take on values less than 0 and greater than 1. Additionally, the residuals from the linear regression model will *not* be normally distributed. This violation means we should not use linear regression.\n\nBut what if we could change the form of our hypotheses $h(x)$ instead?\n\nThe idea behind logistic regression is very simple. We want to draw a line in feature space that divides the '1' samples from the '0' samples, just like in the diagram above. In other words, we wish to find the \"regression\" line which divides the samples. Now, a line has the form $w_1 x_1 + w_2 x_2 + w_0 = 0$ in 2-dimensions. On one side of this line we have \n\n$$w_1 x_1 + w_2 x_2 + w_0 \\ge 0,$$\n\nand on the other side we have \n\n$$w_1 x_1 + w_2 x_2 + w_0 < 0.$$ \n\nOur classification rule then becomes:\n\n\\begin{eqnarray*}\ny = 1 &\\mbox{if}& \\v{w}\\cdot\\v{x} \\ge 0\\\\\ny = 0 &\\mbox{if}& \\v{w}\\cdot\\v{x} < 0\n\\end{eqnarray*}\n\nwhere $\\v{x}$ is the vector $\\{1,x_1, x_2,...,x_n\\}$ where we have also generalized to more than 2 features.\n\nWhat hypotheses $h$ can we use to achieve this? One way to do so is to use the **sigmoid** function:\n\n$$h(z) = \\frac{1}{1 + e^{-z}}.$$\n\nNotice that at $z=0$ this function has the value 0.5. If $z > 0$, $h > 0.5$ and as $z \\to \\infty$, $h \\to 1$. If $z < 0$, $h < 0.5$ and as $z \\to -\\infty$, $h \\to 0$. As long as we identify any value of $y > 0.5$ as 1, and any $y < 0.5$ as 0, we can achieve what we wished above.\n\nThis function is plotted below:", "_____no_output_____" ] ], [ [ "h = lambda z: 1. / (1 + np.exp(-z))\nzs=np.arange(-5, 5, 0.1)\nplt.plot(zs, h(zs), alpha=0.5);", "_____no_output_____" ] ], [ [ "So we then come up with our rule by identifying:\n\n$$z = \\v{w}\\cdot\\v{x}.$$\n\nThen $h(\\v{w}\\cdot\\v{x}) \\ge 0.5$ if $\\v{w}\\cdot\\v{x} \\ge 0$ and $h(\\v{w}\\cdot\\v{x}) \\lt 0.5$ if $\\v{w}\\cdot\\v{x} \\lt 0$, and:\n\n\\begin{eqnarray*}\ny = 1 &if& h(\\v{w}\\cdot\\v{x}) \\ge 0.5\\\\\ny = 0 &if& h(\\v{w}\\cdot\\v{x}) \\lt 0.5.\n\\end{eqnarray*}\n\nWe will show soon that this identification can be achieved by minimizing a loss in the ERM framework called the **log loss** :\n\n$$ R_{\\cal{D}}(\\v{w}) = - \\sum_{y_i \\in \\cal{D}} \\left ( y_i \\log(h(\\v{w}\\cdot\\v{x})) + ( 1 - y_i) \\log(1 - h(\\v{w}\\cdot\\v{x})) \\right )$$\n\nWe will also add a regularization term:\n\n$$ R_{\\cal{D}}(\\v{w}) = - \\sum_{y_i \\in \\cal{D}} \\left ( y_i \\log(h(\\v{w}\\cdot\\v{x})) + ( 1 - y_i) \\log(1 - h(\\v{w}\\cdot\\v{x})) \\right ) + \\frac{1}{C} \\v{w}\\cdot\\v{w},$$\n\nwhere $C$ is the regularization strength (equivalent to $1/\\alpha$ from the Ridge case), and smaller values of $C$ mean stronger regularization. As before, the regularization tries to prevent features from having terribly high weights, thus implementing a form of feature selection. \n\nHow did we come up with this loss? We'll come back to that, but let us see how logistic regression works out. \n", "_____no_output_____" ] ], [ [ "dflog.head()", "_____no_output_____" ], [ "clf_l, Xtrain_l, ytrain_l, Xtest_l, ytest_l = do_classify(LogisticRegression(solver='lbfgs'), \n {\"C\": [0.01, 0.1, 1, 10, 100]}, \n dflog, ['Weight', 'Height'], 'Gender','Male')", "C:\\Users\\Code_apps\\Anaconda3\\lib\\site-packages\\sklearn\\model_selection\\_split.py:2179: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified.\n FutureWarning)\n" ], [ "plt.figure()\nax=plt.gca()\npoints_plot(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, alpha=0.2);", "_____no_output_____" ] ], [ [ "In the figure here showing the results of the logistic regression, we plot the actual labels of both the training(circles) and test(squares) samples. The 0's (females) are plotted in red, the 1's (males) in blue. We also show the classification boundary, a line (to the resolution of a grid square). Every sample on the red background side of the line will be classified female, and every sample on the blue side, male. Notice that most of the samples are classified well, but there are misclassified people on both sides, as evidenced by leakage of dots or squares of one color ontothe side of the other color. Both test and traing accuracy are about 92%.", "_____no_output_____" ], [ "### The Probabilistic Interpretaion", "_____no_output_____" ], [ "Remember we said earlier that if $h > 0.5$ we ought to identify the sample with $y=1$? One way of thinking about this is to identify $h(\\v{w}\\cdot\\v{x})$ with the probability that the sample is a '1' ($y=1$). Then we have the intuitive notion that lets identify a sample as 1 if we find that the probabilty of being a '1' is $\\ge 0.5$.\n\nSo suppose we say then that the probability of $y=1$ for a given $\\v{x}$ is given by $h(\\v{w}\\cdot\\v{x})$?\n\nThen, the conditional probabilities of $y=1$ or $y=0$ given a particular sample's features $\\v{x}$ are:\n\n\\begin{eqnarray*}\nP(y=1 | \\v{x}) &=& h(\\v{w}\\cdot\\v{x}) \\\\\nP(y=0 | \\v{x}) &=& 1 - h(\\v{w}\\cdot\\v{x}).\n\\end{eqnarray*}\n\nThese two can be written together as\n\n$$P(y|\\v{x}, \\v{w}) = h(\\v{w}\\cdot\\v{x})^y \\left(1 - h(\\v{w}\\cdot\\v{x}) \\right)^{(1-y)} $$\n\nThen multiplying over the samples we get the probability of the training $y$ given $\\v{w}$ and the $\\v{x}$:\n\n$$P(y|\\v{x},\\v{w}) = P(\\{y_i\\} | \\{\\v{x}_i\\}, \\v{w}) = \\prod_{y_i \\in \\cal{D}} P(y_i|\\v{x_i}, \\v{w}) = \\prod_{y_i \\in \\cal{D}} h(\\v{w}\\cdot\\v{x_i})^{y_i} \\left(1 - h(\\v{w}\\cdot\\v{x_i}) \\right)^{(1-y_i)}$$\n\nWhy use probabilities? Earlier, we talked about how the regression function $f(x)$ never gives us the $y$ exactly, because of noise. This hold for classification too. Even with identical features, a different sample may be classified differently. \n\nWe said that another way to think about a noisy $y$ is to imagine that our data $\\dat$ was generated from a joint probability distribution $P(x,y)$. Thus we need to model $y$ at a given $x$, written as $P(y|x)$, and since $P(x)$ is also a probability distribution, we have:\n\n$$P(x,y) = P(y | x) P(x)$$\n\nand can obtain our joint probability $P(x, y)$.\n\nIndeed its important to realize that a particular training set can be thought of as a draw from some \"true\" probability distribution (just as we did when showing the hairy variance diagram). If for example the probability of classifying a test sample as a '0' was 0.1, and it turns out that the test sample was a '0', it does not mean that this model was necessarily wrong. After all, in roughly a 10th of the draws, this new sample would be classified as a '0'! But, of-course its more unlikely than its likely, and having good probabilities means that we'll be likely right most of the time, which is what we want to achieve in classification. And furthermore, we can quantify this accuracy.\n\nThus its desirable to have probabilistic, or at the very least, ranked models of classification where you can tell which sample is more likely to be classified as a '1'. There are business reasons for this too. Consider the example of customer \"churn\": you are a cell-phone company and want to know, based on some of my purchasing habit and characteristic \"features\" if I am a likely defector. If so, you'll offer me an incentive not to defect. In this scenario, you might want to know which customers are most likely to defect, or even more precisely, which are most likely to respond to incentives. Based on these probabilities, you could then spend a finite marketing budget wisely.", "_____no_output_____" ], [ "### Maximizing the Probability of the Training Set", "_____no_output_____" ], [ "Now if we maximize $P(y|\\v{x},\\v{w})$, we will maximize the chance that each point is classified correctly, which is what we want to do. While this is not exactly the same thing as maximizing the 1-0 training risk, it is a principled way of obtaining the highest probability classification. This process is called **maximum likelihood** estimation since we are maximising the **likelihood of the training data y**, \n\n$$\\like = P(y|\\v{x},\\v{w}).$$ \n\nMaximum likelihood is one of the corenerstone methods in statistics, and is used to estimate probabilities of data. \n\nWe can equivalently maximize \n\n$$\\loglike = \\log{P(y|\\v{x},\\v{w})}$$ \n\nsince the natural logarithm $\\log$ is a monotonic function. This is known as maximizing the **log-likelihood**. Thus we can equivalently *minimize* a risk that is the negative of $\\log(P(y|\\v{x},\\v{w}))$:\n\n$$R_{\\cal{D}}(h(x)) = -\\loglike = -\\log \\like = -\\log{P(y|\\v{x},\\v{w})}.$$\n\n\nThus\n\n\\begin{eqnarray*}\nR_{\\cal{D}}(h(x)) &=& -\\log\\left(\\prod_{y_i \\in \\cal{D}} h(\\v{w}\\cdot\\v{x_i})^{y_i} \\left(1 - h(\\v{w}\\cdot\\v{x_i}) \\right)^{(1-y_i)}\\right)\\\\\n &=& -\\sum_{y_i \\in \\cal{D}} \\log\\left(h(\\v{w}\\cdot\\v{x_i})^{y_i} \\left(1 - h(\\v{w}\\cdot\\v{x_i}) \\right)^{(1-y_i)}\\right)\\\\ \n &=& -\\sum_{y_i \\in \\cal{D}} \\log\\,h(\\v{w}\\cdot\\v{x_i})^{y_i} + \\log\\,\\left(1 - h(\\v{w}\\cdot\\v{x_i}) \\right)^{(1-y_i)}\\\\\n &=& - \\sum_{y_i \\in \\cal{D}} \\left ( y_i \\log(h(\\v{w}\\cdot\\v{x})) + ( 1 - y_i) \\log(1 - h(\\v{w}\\cdot\\v{x})) \\right )\n\\end{eqnarray*}\n \nThis is exactly the risk we had above, leaving out the regularization term (which we shall return to later) and was the reason we chose it over the 1-0 risk. \n\nNotice that this little process we carried out above tells us something very interesting: **Probabilistic estimation using maximum likelihood is equivalent to Empiricial Risk Minimization using the negative log-likelihood**, since all we did was to minimize the negative log-likelihood over the training samples.\n\n`sklearn` will return the probabilities for our samples, or for that matter, for any input vector set $\\{\\v{x}_i\\}$, i.e. $P(y_i | \\v{x}_i, \\v{w})$:", "_____no_output_____" ] ], [ [ "clf_l.predict_proba(Xtest_l)", "_____no_output_____" ] ], [ [ "### Discriminative vs Generative Classifier", "_____no_output_____" ], [ "Logistic regression is what is known as a **discriminative classifier** as we learn a soft boundary between/among classes. Another paradigm is the **generative classifier** where we learn the distribution of each class. For more examples of generative classifiers, look [here](https://en.wikipedia.org/wiki/Generative_model). \n\nLet us plot the probabilities obtained from `predict_proba`, overlayed on the samples with their true labels:", "_____no_output_____" ] ], [ [ "plt.figure()\nax = plt.gca()\npoints_plot_prob(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, psize=20, alpha=0.1);", "C:\\Users\\Code_apps\\Anaconda3\\lib\\site-packages\\matplotlib\\contour.py:1000: UserWarning: The following kwargs were not used by contour: 'axes'\n s)\nC:\\Users\\Code_apps\\Anaconda3\\lib\\site-packages\\matplotlib\\contour.py:1000: UserWarning: The following kwargs were not used by contour: 'axes'\n s)\n" ] ], [ [ "Notice that lines of equal probability, as might be expected are stright lines. What the classifier does is very intuitive: if the probability is greater than 0.5, it classifies the sample as type '1' (male), otherwise it classifies the sample to be class '0'. Thus in the diagram above, where we have plotted predicted values rather than actual labels of samples, there is a clear demarcation at the 0.5 probability line.\n\nAgain, this notion of trying to obtain the line or boundary of demarcation is what is called a **discriminative** classifier. The algorithm tries to find a decision boundary that separates the males from the females. To classify a new sample as male or female, it checks on which side of the decision boundary the sample falls, and makes a prediction. In other words we are asking, given $\\v{x}$, what is the probability of a given $y$, or, what is the likelihood $P(y|\\v{x},\\v{w})$?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cbc56d058a32f3e7118936112deb6441d67d33c1
231,395
ipynb
Jupyter Notebook
docs/content/probability/1_univariate_prob.ipynb
alejandroschuler/CSL
43505276f7b8d53e086fd35bf850629f7efc797b
[ "MIT" ]
null
null
null
docs/content/probability/1_univariate_prob.ipynb
alejandroschuler/CSL
43505276f7b8d53e086fd35bf850629f7efc797b
[ "MIT" ]
1
2020-02-28T02:00:57.000Z
2020-02-28T02:00:57.000Z
docs/content/probability/1_univariate_prob.ipynb
alejandroschuler/CSL
43505276f7b8d53e086fd35bf850629f7efc797b
[ "MIT" ]
null
null
null
33.98869
10,118
0.561023
[ [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "# Univariate Probability", "_____no_output_____" ], [ "In the example above, we demonstrated some code that generates fake data $X$ and $Y$. On the other hand, real data comes from the real world, not from some python code. For every dataset, there is an immensely complex network of causal interactions that ultimately \"produces\" the data. \n\nFor example, in our blood pressure example, a patient's pre-treatment vital signs are caused by their physiological state: their genetics, life history, what they ate for breakfast that morning, whether or not they just ran up a flight of stairs, and so on and so forth. Taking a drug influences the levels of certain chemicals in the blood, which are taken up at particular rates in certain organs by certain enzymes, the levels of which are impacted by the patient's genetics and prior physiological state, which was influenced by their life history, etc. Thus the impact of the drug on cellular processes is mediated by these factors. The cells respond by increasing or decreasing their production of some proteins or metabolites, which, in combination with the immediate condition of the patient when the measurement is taken, determines the post-treatment blood pressure. \n\nOr, let's say we're trying to determine whether or not there is a cat in a photograph. The cat being in front of the camera when the photo was taken ($y_i$) could be caused by a huge number of factors, and the values of the pixels in the photograph ($x_i$) are caused by the reflection of photons emitted from sources of light off the cat (and other objects) and the mechanics of the detection of light inside the camera.\n\nIn a nutshell, the world is complicated. There is no way that mere mortals could ever write code accurate enough to perfectly simulate the exact processes that produce data about complex real-world phenomena.\n\nBut, despite the complexity, you should start thinking about that complex web of causality as \"code\" that's being run in some cosmic simulation. Maybe you can imagine that there are \"data gods\" that write and are running this code. We'll never see their code, and we'll never be able to understand it, but somewhere, out there, that metaphysical code is running, and it's generating the observations that we see in our data.", "_____no_output_____" ], [ "You can think of that code as a little \"factory\" that pumps out observations of $x_i$ and $y_i$, one at a time. The factory is behind a curtain that we can't ever look behind, but we can see the pile of $x_i$s and $y_i$s that come out of it, which are our $X$ and $Y$.", "_____no_output_____" ], [ "![](factory.png)", "_____no_output_____" ], [ "If we had that code, we'd be able to reverse engineer it to find the most likely value of $y_i$ given $x_i$ as accurately as would be possible with those predictors. In practice, however, we can only build a *model* of that code. Our model will never capture the complexities of reality, the same way that a model plane doesn't even begin to approach the complexity of a real aircraft. But, ideally, it will be similar enough in ways that are important for the task at hand: if we're using a model plane just to demonstrate what an aircraft might look like, we don't need the model to have functioning jet engines. And if all we need to do is estimate $y_i$ for a new $x_i$, we don't exactly need to understand the complex web of causality linking the two together.", "_____no_output_____" ], [ "We do, however, need a way to talk about the relationship that $x_i$ and $y_i$ might have. And to do that, we need a way to talk abstractly about the \"code\" or \"data factory\" that's behind the curtain, the same way we developed abstract terms to describe our data. Thankfully, the language of probability works perfectly for that.", "_____no_output_____" ], [ "## Random variables are factories that generate data", "_____no_output_____" ], [ "The data factories we're interested in are the kind that output $x_i$s and $y_i$s, but to understand how these factories work it's better to consider a simpler factory that produces one number at a time, instead of one vector $x_i$ and one number $y_i$. \n\nWe'll call our factory $\\mathbf Z$. This factory pushes out one value $z_i$ at a time. Furthermore, let's say that half the time you get a $1$ and half the time you get a $0$; those are the only values that the $\\mathbf Z$ factory can produce. And the factory is built to reset itself between producing each value, so whatever $z_i$ is has no impact on $z_{i+1}$.", "_____no_output_____" ], [ "In the language of probability theory, $z_i$ are **realizations** from $\\mathbf Z$, which has a **distribution**:\n\n$$\n\\begin{array}{rcl}\nP(\\mathbf Z = 0) &=& 1/2 \\\\ \nP(\\mathbf Z = 1) &=& 1/2\n\\end{array}\n\\quad \\quad \\text{or} \\quad \\quad\nP(\\mathbf Z=z) =\n\\begin{cases}\n1/2 & \\text{for }z=0 \\\\\n1/2 & \\text{for }z=1\n\\end{cases}\n$$", "_____no_output_____" ], [ "What we've been loosely calling a \"factory\" is a **random variable** in the language of probability theory. But that's just a name. You can keep thinking of them as factories, or code, that generate data.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> \nRandom variables are often written in uppercase, (e.g. Z) and their realizations in lowercase (z). We're going to be using uppercase for matrices (and sets), so I'm going to use boldface in conjunction with uppercase ($\\mathbf Z$) to denote random variables.\n</div>", "_____no_output_____" ], [ "Ok, so if the random variable is a factory, and the realizations of the random variable are the output of that factory (the data we get to see), then how do we read a statement like $P(\\mathbf Z = 0) = 1/2$? Well, that just means that the value $z$ that $\\mathbf Z$ produces is $0$ half of the time. But what exactly do we mean by \"half the time\"? While we usually don't have to think deeper than this, you'll see later that it is sometimes necessary to have a more rigorous definition of probability.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nRemember that the entire purpose of talking about these factories is so that we can imagine what's behind the curtain, producing the data that we observe. Think of a real-world scenario where we could pretend that the data we observe was generated by $\\mathbf Z$. In other words, what's something we could measure in the real world that we might model using $\\bf Z$?\n \n</div>", "_____no_output_____" ], [ "Let's build that definition. We'll start with some raw materials. All factories have raw materials that go into them, which end up being turned into the finished product. In a similar way, random variables have inputs which get mapped to realized values. We'll call them \"data ore\": the unrefined precursor that gets transformed by our factory (random variable $\\mathbf Z$) into the data product $z$. The data ore exists in units (data ore nuggets). The factory takes one nugget at a time and transforms it into a realization.\n\nThe nuggets are kept in an big silo called $\\Omega$ before they go to $\\mathbf Z$. This silo is filled to the brim with *all* of the possible nuggets that could be fed into the factory, one of each of them. It's also a magic silo, so when you take out a nugget, another one exactly like it is mined out of the depths of the cosmos to take its place in the silo.", "_____no_output_____" ], [ "![](factory_rv.png)", "_____no_output_____" ], [ "Each nugget is gets transformed into a value of $z$, but the process isn't random. For instance, if a nugget named \"Karl\" turned into a 1 when fed through $\\mathbf Z$, then we would *always* get a 1 when Karl goes into $\\mathbf Z$. But we know that sometimes $\\mathbf Z$ produces 0s, so there must be other nuggets whose destiny is to become 0s, just like Karl's destiny is to be a 1. The \"randomness\" in $\\mathbf Z$ isn't caused by what's in the factory, it's caused by randomly picking a nugget to throw into it.", "_____no_output_____" ], [ "We can even code up our little example, imagining that we have 10 nuggets, boringly named \"0\", \"1\", \"2\"... \"9\":", "_____no_output_____" ] ], [ [ "def Z(ω): # factory (random variable)\n if ω in set([1,4,5,8,9]): # these are the outcomes (nuggets) that map to the value 1 \n return 1\n if ω in set([0,2,3,6,7]): # these are the outcomes (nuggets) that map to the value 0 \n return 0\nZ.Ω = set([0,1,2,3,4,5,6,7,8,9]) # sample space (silo) of outcomes (ore nuggets) attached to Z\n \nimport random\ndef realize(rand_var): # run the assembly line!\n ω = random.sample(rand_var.Ω, 1)[0] # grab a single nugget out of the silo at random\n return rand_var(ω) # push it through the factory", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-warning\">\n<b>Python Tip:</b> \n \n`random.sample(x,n)` grabs `n` values at random out of the set `x` and returns them as a list.\n</div>", "_____no_output_____" ], [ "Here are 20 observations $z=[z_1, z_2, \\dots z_{20}]$, fresh off the assembly line of the $\\mathbf Z$ factory:", "_____no_output_____" ] ], [ [ "z = [realize(Z) for i in range(20)] \nz", "_____no_output_____" ] ], [ [ "Now we're ready to define probability: the probability of an realization (a particular value $z$) is just the proportion of the silo that's taken up by nuggets that are destined to become that value $z$ when fed through $\\mathbf Z$. That's it. We denote that proportion with the notation $P(\\mathbf Z = z)$. In our example above, saying $P(\\mathbf Z = 1) = 1/2$ means that half of all the possible nuggets that could go into $\\mathbf Z$ would produce a 1, assuming each nugget takes up the same amount of space.", "_____no_output_____" ], [ "That's a definition we can code up:", "_____no_output_____" ] ], [ [ "def P(rand_var, realization):\n A = set(ω for ω in rand_var.Ω if rand_var(ω) in realization) # what are all the nuggets that map to the value(s) in question?\n return len(A)/len(rand_var.Ω) # what is the \"volume\" of those nuggets relative to the volume of the silo Ω? (assuming each takes up the same amount of space)", "_____no_output_____" ], [ "P(Z,[0]), P(Z,[1]) # P(z=0), P(z=1)", "_____no_output_____" ] ], [ [ "So to build a factory that makes 0s and 1s in even proportions, all I had to do was evenly split up the number of nuggets that are destined to produce each value. It also doesn't matter what I call the nuggets. For example, here is equally good code to implement $\\mathbf Z$:", "_____no_output_____" ] ], [ [ "def Z(ω): # factory (random variable)\n if ω in set([-1234]): # these are the outcomes (nuggets) that map to the value 1 \n return 1\n if ω in set([980123]): # these are the outcomes (nuggets) that map to the value 0 \n return 0\nZ.Ω = set([980123, -1234]) # sample space (silo) of outcomes (ore nuggets) attached to Z\n\n[realize(Z) for i in range(20)] ", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nWrite code for a new random variable $\\mathbf W$ that behaves like this:\n \n$$\nP(\\mathbf W=w) =\n\\begin{cases}\n0.1 \\dots & \\text{for }w=-1 \\\\\n0.4 \\dots & \\text{for }w=0 \\\\\n0.2 & \\text{for }w=1 \\\\\n0.3 & \\text{for }w=2\n\\end{cases}\n$$\n \nYou'll need to make your own nugget silo `Ω` and define the function `W(ω)`. Test it out using the `realize()` and `P()` functions we wrote. Use `P()` to calculate $P(\\mathbf W =0)$.\n \n</div>", "_____no_output_____" ], [ "### A mathematical description of random variables", "_____no_output_____" ], [ "![](https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRTCXQ098kIFldwWw8VEPSR_q9Tbk1BFjuhFH8V8NVPskxrtVj7&s)", "_____no_output_____" ], [ "If you're looking at this and thinking that I can't possibly be serious, that the foundations of statistics and machine learning can't possibly be built up from imagining data factories and magical silos... well, you're wrong. Sure, I've concocted a somewhat elaborate metaphor, but it's a metaphor that accurately describes how these otherwise very abstract concepts relate to each other. If you can look at something like $P(\\mathbf Z = z) := \\mathbb P(\\{\\omega \\in \\Omega \\vert \\mathbf Z(w)=z\\})$ and immediately come away with an understanding of what that means, all the more power to you. But I don't. At least not without first building up an intuition for each of the components.", "_____no_output_____" ], [ "In probability theory, the silo $\\Omega$ is called a **sample space** and the data ore nuggets $\\omega$ are called **outcomes** (not to be confused with what we call the variable we want to predict in machine learning). A random variable $\\mathbf Z$ is defined as a function that maps an element $\\omega$ of $\\Omega$ to a realization $z$. The probability of a realization $z$ is the **measure** (volume, or proportion of total volume) of the set of outcomes (data ore nuggets) that map to $z$ (are destined to be transformed to $z$ by $\\mathbf Z$). ", "_____no_output_____" ], [ "![](prob_preimage.png)", "_____no_output_____" ], [ "When I talk about these things outside of the context of explaining them, I do call them by their real names (e.g. random variable, sample space, etc.) because that's what people have called them for nearly a century. But when I close my eyes and *reason* about these concepts, I'm thinking about something tangible, like a factory. As we go on I'm going to introduce more mathematical notation as we need it, and I'm going to wean off the factory metaphor, but I encourage you to keep building your intuition about these concepts instead of thinking about them as abstract symbols on a page. The symbols are just a convenient shorthand for the ideas. The only reason to know the standard names and symbols is to be able to read and understand what others have written. If you find yourself skimming over an equation- stop. Read it slowly and think about what each part means.\n\nSo now that we're here, let's demystify the notation in that equation I dropped up above! Here it is again:\n\n$$P(\\mathbf Z = z) := \\mathbb P(\\{\\omega \\in \\Omega \\vert \\mathbf Z(w)=z\\})$$\n\nTo start, the $:=$ means \"the thing on the left is defined as the thing on the right\". So we're saying that when we write \"$P(\\mathbf Z = z)$\", we really mean whatever \"$\\mathbb P(\\{\\omega \\in \\Omega \\vert \\mathbf Z(\\omega)=z\\})$\" is. Ok, next up is [set-builder notation](https://www.mathsisfun.com/sets/set-builder-notation.html): you can read $\\{a\\in A | f(a) = 1\\}$ as \"the collection of all the elements $a$ in the set $A$ *such that* $f(a)=1$\". So $\\{\\omega \\in \\Omega \\vert \\mathbf Z(\\omega)=z\\}$ is the set of outcomes $\\omega$ that become $z$ when passed through the random variable $\\mathbf Z$. There may be many such outcomes, or just one, or none, so the set can be big, small, or nonexistent. We will write the name of that set a little more compactly using the notation $\\mathbf Z^{-1}(z) = \\{\\omega \\in \\Omega \\vert \\mathbf Z(w)=z\\}$ since usually $f^{-1}(y)$ denotes the element $x$ such that $f(x)=y$. We call this the **preimage** of $z$ under $\\mathbf Z$.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> \n \nPreimages aren't just for random variables- you can define preimages for any function. If the function is $y=f(x)$, the preimage of a set $A$ (denoted $f^{-1}(A)$) is a set of all of the values $x$ that become one of the $y$ values in $A$ when shoved through $f$. The set $A$ is called the image of $f^{-1}(A)$ under $f$.\n \nFor example, if $f(x) = x^2$ and $A$ is the set of numbers between 0 and 4, then $f^{-1}(A)$ is the set of numbers between -2 and 2, since every number between -2 and 2, when squared, is between 0 and 4, and these are the only numbers for which that is the case. Another example: if $f(x) = \\cos(x)$ and $A=\\{1\\}$, then $f^{-1}(A) = \\{\\dots, -4\\pi, -2\\pi, 0, 2\\pi, 4\\pi, 6\\pi, \\dots\\}$. Plot or draw a picture of $\\cos(x)$ and mark the points where $\\cos(x) = 1$ to see why.\n \n</div>", "_____no_output_____" ], [ "Finally, we have $\\mathbb P()$, which is the [**probability measure**](https://en.wikipedia.org/wiki/Probability_measure). Think of it as a function that measures the proportion of all of the outcomes in $\\Omega$ that are contained in the subset $\\mathbf Z^{-1}(z)$. This is basically the volume of space that the nuggets in $\\mathbf Z^{-1}(z)$ take up in the silo $\\Omega$. By convention, we say that $\\Omega$ has volume 1 so that the volume of $\\mathbf Z^{-1}(z)$ is also the proportion of volume that $\\mathbf Z^{-1}(z)$ takes up in $\\Omega$. In the figure above, that's represented by the area of the shaded gray region.", "_____no_output_____" ], [ "\nIf you put all of that together, you'll see that it's exactly the same as the definition we put together using our factory analogy. ", "_____no_output_____" ], [ "We can also talk about the probability of sets of realizations instead of just single realization $z$. For instance, what's the probability that $z$ is 0 *or* 1? We write that like $P(\\mathbf Z \\in A)$, where $A$ is the set of possible realizations, like $\\{0,1\\}$. That's more general than the probability of a single realization $z$: $P(\\mathbf Z = z)$. The definition is the same though: $P(\\mathbf Z \\in A) := \\mathbb P(\\mathbf Z^{-1}(A))$. All we need to do is count up the volume of all the nugets that produce any of the values that are in $A$, instead of just the nuggets that produce $z$.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<b>REMEMBER:</b> \n\nIf your eyes glaze over every time you see mathematical notation, don't worry. Remember, that's normal. Just slow down and read it again. Try and think about what it <i>means</i>.\n</div>", "_____no_output_____" ], [ "If you look at the code we wrote before, you'll notice it can already calculate probabilities for sets of realizations:", "_____no_output_____" ] ], [ [ "def P(rand_var, realization):\n A = set(ω for ω in rand_var.Ω if rand_var(ω) in realization) # what are all the nuggets that map to the value(s) in question?\n return len(A)/len(rand_var.Ω) # what is the \"volume\" of those nuggets relative to the volume of the silo Ω? (assuming each takes up the same amount of space)", "_____no_output_____" ], [ "P(Z, [0])", "_____no_output_____" ], [ "P(Z, [0,1])", "_____no_output_____" ] ], [ [ "### Properties of probability", "_____no_output_____" ], [ "Ok- I promised that it would be useful to define probability in a more rigorous way than \"$z$ happens $x$% of the time\". Now we're going to see why. \n\nTo start with, let's \"derive\" a relatively simple fact: for any subset of possible realizations $A$, \n\n$$P(\\mathbf Z \\in A) \\in [0,1]$$\n\nThis is a compact way of writing that for any subset of realizations, the volume of the subset of outcomes $\\mathbf Z^{-1}(A)$ that map to those realzations is a number between 0 and 1. Why? Well, if the volume of our silo $\\Omega$ is 1, the volume of any subset of that has to be less than or equal to 1. And there is no subset that can occupy negative space, so the volume has to be greater than or equal to 0.", "_____no_output_____" ], [ "Here's a trickier one: if two sets of realizations $A$ and $B$ have no realizations in common, then the probability of a realization from either of them is the sum of the probabilities of a realization from each of them. Mathematically:\n\n$$A \\cap B = 0 \\rightarrow P(\\mathbf Z \\in A \\cup B) = P(\\mathbf Z \\in A) + P(\\mathbf Z \\in B)$$\n\n$A \\cap B$ is read as \"the intersection of the sets $A$ and $B$\", which is the set of elements that are in both sets. It's the middle part of a Venn diagram. $A \\cup B$ is read as \"the union of $A$ and $B$\", which is all of the elements in either set- that's the entirety of the Venn diagram. \n\nThat also seems cryptic until you think about it in terms of quantities of ore nuggets that produce certain values when fed through the factory. If you take all the ore nuggets that end up becoming any of the values in $A$ (call that set of nuggets $\\mathbf Z^{-1}(A)$), and all the nuggets that end up becoming values in $B$ (call that $\\mathbf Z^{-1}(B)$), then the total volume that end up becoming values in either $A$ or $B$ is the sum of the volumes that become $A$ and those that become $B$. This is true as long as there are no nuggets that become both a realization in $A$ and a realization in $B$ because we would double-count these. But we've also ensured that these do not exist since each nugget is destined to become only a single value, and we made sure that there is no overlap between $A$ and $B$.\n\nIf there is overlap, the proposition doesn't hold. For instance, if $A= \\{0,1\\}$ and $B = \\{0\\}$, every element of $B$ is also an element of $A$, so the volume of $Z^{-1}(A \\cup B)$ is the volume of $Z^{-1}(A)$, which is not the volume of $Z^{-1}(A)$ plus the volume of $Z^{-1}(B)$.", "_____no_output_____" ], [ "We can even use our code from before to demonstrate this:", "_____no_output_____" ] ], [ [ "A = set([0])\nB = set([1])\nP(Z,A) + P(Z,B) == P(Z,A|B) # in python, set union ∪ is written | because an element is in A∪B if it is in A OR B (A|B)", "_____no_output_____" ], [ "A = set([0,1])\nB = set([0])\nP(Z,A) + P(Z,B) == P(Z,A|B)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nDraw a picture based on the figure above that helps explain why $A \\cap B = 0 \\rightarrow P(\\mathbf Z \\in A \\cup B) = P(\\mathbf Z \\in A) + P(\\mathbf Z \\in B)$\n\n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nLet's say the sets $A$ and $B$ have some overlap. Can you come up with a formula to calculate $P(\\mathbf Z \\in A \\cup B)$ given $P(\\mathbf Z \\in A)$, $P(\\mathbf Z \\in B)$, and $P(\\mathbf Z \\in A \\cap B)$?\n\n</div>", "_____no_output_____" ], [ "The upshot of this is that the probability of a set of outcomes is the same as the sum of their probabilities:", "_____no_output_____" ], [ "$$\nP(\\mathbf Z \\in A) \n=\n\\mathbb P (\\mathbf Z^{-1}(A))\n=\n\\sum_{\\omega \\in Z^{-1}(A)} \\mathbb P(\\omega)\n= \n\\sum_{Z^{-1}(A)} \\mathbb P(\\omega)\n$$", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nIn all our code so far we've been using a finite sample set with $n$ outcomes and we've chosen to use $\\mathbb P(B)= |B|/n$ where $|B|$ denotes the number of elements in $B$. That's called the <b>counting measure</b> It helps your understanding, however, to know that it isn't the only probability measure we could use. We could instead say that some outcomes take up twice as much space in the silo, or that they all have totally different volumes. As long as whatever $\\mathbb P$ we come up with satisfies $\\mathbb P(\\Omega)=1$ and $\\mathbb P(\\bigcup B_i)= \\sum \\mathbb P(B_i)$ for non-overlapping sets $B_i$ (of outcomes), it's a legitimate choice.\n \nLet's go back to this version of $\\mathbf Z$:\n \n```\ndef Z(ω): # factory (random variable)\n if ω in set([1,4,5,8,9]): # these are the outcomes (nuggets) that map to the value 1 \n return 1\n if ω in set([0,2,3,6,7]): # these are the outcomes (nuggets) that map to the value 0 \n return 0\nZ.Ω = set([0,1,2,3,4,5,6,7,8,9]) # sample space (silo) of outcomes (ore nuggets) attached to Z\n```\n \n\nChange the code for `P(rand_var, realization)` so that $\\mathbb P(\\omega) = 0.25$ if $\\omega \\in \\{0,1,2,3\\}$ and 0 otherwise. The idea is that now nuggets 0, 1, 2, and 3 each take up a quarter of the space in the silo, while the other nuggets take up none. What is $P(Z=1)$ now?\n\n</div>", "_____no_output_____" ], [ "### Continuous sample spaces", "_____no_output_____" ], [ "So far, all the random variables we've talked about have produced outputs from a finite, discrete set (e.g. $\\{0,1\\}$ or $\\{-1,0,1,2\\}$). If we're imagining a factory that might produce the data we observe when flipping a coin, a binary output is all we need. Similarly, if we want to imagine the factory that assigns an \"apple\", \"orange\", or \"banana\" label to a photograph of a fruit, it just needs to output a discrete set of three values. But if we want to imagine the kind of factory that could produce the prices of different apartments in New York, we need something that can output a continuous range of values.", "_____no_output_____" ], [ "Let's think up a random variable (call it $\\bf Z$ again) that can take any value between 0 and 10. How many numbers are there between 0 and 10? Well, an infinite number: for any two numbers in that interval, you can find a number that's right between them. Since one nugget from the silo always prodcues the same realization when pushed through the factory, there need to be an infinite number of nuggets in the silo to be able to produce an infinite number of realizations. That means that our old code, where we manually enumerated all of the elements in $\\Omega$, is not going to work anymore. What we can do instead is imagine that $\\Omega$ is itself an interval, like all the numbers between 0 and 1. So, to pick a random nugget to throw into the factory, we just pick a random number between 0 and 1. Here's an example:", "_____no_output_____" ] ], [ [ "def Z(ω): \n return 10*(ω**2) # when ω goes into the factory, the factory makes ω^2\nZ.Ω = random.random # returns a single number between 0 and 1 when called\n\ndef realize_cont(rand_var): # run the assembly line!\n ω = Z.Ω() # returns a single number between 0 and 1\n return rand_var(ω) # push it through the factory", "_____no_output_____" ], [ "[realize_cont(Z) for i in range(5)]", "_____no_output_____" ] ], [ [ "So $\\mathbf Z$ is defined by $\\mathbf Z(\\omega) = 10\\omega^2$ with $\\omega \\in [0,1]$. Great. But now what does $P(\\mathbf Z = z)$ mean? We just apply the same old definition of probability: it's the proportion of nuggets in the silo that are destined to become the value $z$. In notation: $\\mathbb P(\\mathbf Z^{-1}(z))$. Same as before. ", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> \n\nNotation like $[a,b]$ is often used to concisely write intervals- this just means \"all the numbers between $a$ and $b$, including those endpoints\". We use parentheses like $(a,b)$ to indicate that the endpoints should not be included. $(a,b]$ and $[a,b)$ have one of the two endpoints included, with the bracket indicating the endpoint that's included and the parenthesis indicating which isn't.\n</div>", "_____no_output_____" ], [ "The issue is that now we need a probability measure that works with continuous sets. For example, let's say we're looking for $P(\\mathbf Z = 2.5)$. As $\\mathbf Z(\\omega) = 10\\omega^2$ is defined in the code above, the only value of $\\omega$ that makes $z=2.5$ is $\\omega = \\sqrt{2.5/10} = 0.5$. Any other value of $\\omega$ would produce a different value of $z$. So $\\mathbf Z^{-1}(z) = 0.5$. What \"volume\" does the single number $0.5$ take up in the interval $[0,1]$? In other words, how are we going to define a probability measure to use here?", "_____no_output_____" ], [ "The most commonly used measure in this case is based on the \"length\" of the set relative to the length of $\\Omega$. In our case, the length of $\\Omega$ is 1, so the probability measure of any interval $(a,b)$ or $[a,b]$ is $b-a$. For sets more complicated than an interval, we have to find the smallest collection of intervals, in terms of total length, that contains the set in question. We say the length of that set is the total length of the collection of intervals that covers it. Using length as a notion of measure makes good sense because if two sets don't overlap, then the length of their union is the sum of their lengths. This measure is called the **Lebesgue measure**, but I only mention the name so you can recognize it elsewhere.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> \n\nWhen you get down to the nitty gritty math, it turns out there actually are <a href=https://en.wikipedia.org/wiki/Vitali_set>some really messed up sets</a> where this notion of \"length\" breaks down, in that the \"length\" of the union of two disjoint sets might not be the sum of their lengths. These are not sets you would ever come across in any real-world context. The technical solution is to only allow random variables where the preimage of any interval is not one of these messed up sets. This really isn't something you should think or worry about. This note is only here to satisfy nosy probabilists or measure theorists who were offended by the above paragraph.\n\n</div>", "_____no_output_____" ], [ "Ok, back to our problem: what's $\\mathbb P(0.5)$? Well, $\\{0.5\\} = [0.5, 0.5]$, so its length is $0.5-0.5=0$! In fact, for any single element $\\omega$, $\\mathbb P(\\omega)= 0$ for the same reason. That's a problem if we want to use discrete sums to calculate probabilities over sets:", "_____no_output_____" ], [ "$$\nP(\\mathbf Z \\in A) \n\\overset{?}{=}\n\\sum_{Z^{-1}(A)} \\mathbb P(\\omega)\n= \n\\sum_{Z^{-1}(A)} 0\n=\n0\n$$", "_____no_output_____" ], [ "But if $Z^{-1}(A)$ is an interval with finite length, then the probability has to be the length of that interval, not 0!\n\nThe reason this doesn't make any sense is that we're trying to use a discrete sum to add up a continuous infinity of 0s. Basically, we're trying to break down $\\sum_{Z^{-1}(A)}$ into each of its component $\\omega$s and measuring each of those. Instead of doing that, though, we can *integrate* over infinitesimal units of \"$d \\omega$\":", "_____no_output_____" ], [ "$$\nP(\\mathbf Z \\in A) \n=\n\\int_{Z^{-1}(A)} \\mathbb P(d\\omega)\n$$", "_____no_output_____" ], [ "This thing is called a **Lebesgue integral**. What we're doing here is adding up all of the infinitesimal lengths $\\mathbb P(d\\omega)$ for all $\\omega$s in the set $\\mathbf Z^{-1}(A)$. We'll write this as $\\int d \\mathbb P$ for short. It has all the same rules as a standard integral (just write $d \\mathbb P$ instead of $dx$), so the integral of a sum is the sum of integrals, etc. And it always agrees with the integrals you're used to from calculus:\n\n$$\n\\int_{[a,b]} f(\\omega) d\\mathbb P = \\int_a^b f(x) dx\n$$\n\nThe neat thing is that it actually works no matter what $\\mathbb P$ is, as long as it satisfies all the properties of a measure. In fact, if $\\mathbb P$ is the discrete counting measure that we were using before, then", "_____no_output_____" ], [ "$$\n\\int_{Z^{-1}(A)} d \\mathbb P\n= \n\\sum_{Z^{-1}(A)} \\mathbb P(\\omega)\n$$", "_____no_output_____" ], [ "If you have no idea why any of this matters, don't worry, just keep going. We're not going to get into the theory of Lebesgue integration. I really went back and forth on whether to include this at all, but I did because having this unifying formalism in your back pocket makes it really easy to prove a lot of things later, even if you don't really understand the theoretical details. You'll be fine if you just think of a Lebesgue integral as a tool to find the volume of outcomes in arbitrary sets that happens to follow all the rules of a normal integral. In other words: no matter how you're measuring stuff, you can use the Lebesgue integral to figure out how much space different sets of outcomes take up.", "_____no_output_____" ], [ "## Probability distributions", "_____no_output_____" ], [ "The formal definition of a random variable as a function from a sample space to some set of numbers is really useful for proving useful relationships, but ultimately the sample space is totally imaginary: all we get to see are the realizations. So we're going to build some tools that will let us avoid talking about the sample space so much if we don't need to.", "_____no_output_____" ], [ "As perhaps you've noticed, neither the exact nature of what is in the sample space nor which of its elements map to which realizations change the observable behavior of a random variable as long as the total measure of all the outcomes mapping to each realization are the same. For example, we looked at two equivalent ways to implement our random variable $\\mathbf Z$:", "_____no_output_____" ] ], [ [ "def Z(ω): \n if ω in set([1,4,5,8,9]): \n return 1\n if ω in set([0,2,3,6,7]): \n return 0\nZ.Ω = set([0,1,2,3,4,5,6,7,8,9]) ", "_____no_output_____" ], [ "def Z(ω): \n if ω in set([-1234]): \n return 1\n if ω in set([980123]): \n return 0\nZ.Ω = set([980123, -1234]) ", "_____no_output_____" ] ], [ [ "These are technically two different random variables because they have different sample spaces and different mappings to the realizations, but they behave exactly the same. When this is the case, we say they have the same **probability distribution**. The probability distribution describes how the factory should *behave* from the perspective of someone who can only see its products $z_i$: half the time you get a 0, half the time you get a 1. There is no need to mention the silo of ore nuggets, give them names, and specify which nuggets are destined to be 0s and which are destined to be 1s. We know they're back there, and we know what total *measure* are destined to be 0s and 1s (since that's what the probability means), but we don't need the details of who is who and what goes where. In fact, unless you're a probability theorist, you will never need to think about the sample space to solve a problem. The only reason you need to know about it is so that you can understand useful identities, which we will continue to derive as we go along.", "_____no_output_____" ], [ "The discrete probability distribution is function of the factory product $z$. For each unique value of $z$, it tells us the total volume of the nuggets in the silo that map to that outcome. We can visualize that by sorting all the nuggets in the silo into piles according to which value they are destined to become. The relative heights of each pile are proportional to the volume of space (measure) that each group of nuggets take up in the silo. Let's demonstrate with a new random variable $\\bf V$:", "_____no_output_____" ] ], [ [ "def V(ω): \n if ω in set([1]): \n return 2\n if ω in set([2,3]): \n return 0\n if ω in set([4,5,6]): \n return -1\n if ω in set([7,8,9,0]):\n return 1\nV.Ω = set(range(10)) ", "_____no_output_____" ], [ "vs = [-1,0,1,2] # all the values v can take\nps = [P(V,[v]) for v in vs] # calculate the probability of each, assuming the counting measure\nimport altair as alt # for plotting\nimport pandas as pd # to make dataframes\n\ndistribution = pd.DataFrame({'v':vs, 'p':ps})\nalt.Chart(distribution, height=100, width=400).mark_bar().encode(x='v:O', y='p')", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-warning\">\n<b>Python Tip:</b> \n\n`altair` is a useful python package for visualization. It's optimized to work with dataframes from the `pandas` package. Feel free to browse the documentation for these packages, but you don't need to be an expert to continue on in this book.\n</div>", "_____no_output_____" ], [ "This is the graphical representation of the probability distribution\n\n$$\n\\phi(v) \n=\nP(V=v)\n=\n\\mathbb P(\\mathbf V^{-1}(v))\n=\n\\begin{cases}\n0.3 & \\text{for }v=-1 \\\\\n0.2 & \\text{for }v=0 \\\\\n0.4 & \\text{for }v=1 \\\\\n0.1 & \\text{for }v=2\n\\end{cases}\n$$", "_____no_output_____" ], [ "$\\phi(v)$ is called a **probability mass function**. If we have multiple random variables floating around and we want to distinguish their mass functions, we'll sometimes write $\\phi_{\\mathbf V}(v)$.", "_____no_output_____" ], [ "If we want to know the probability of a particular set of realizations, say, $P(\\mathbf V \\in \\{0,1\\})$, it's easy to get using the mass function:\n\n$$P(\\mathbf V \\in A) = \\sum_{v \\in A} \\phi(v)$$\n\nWe simply sum up the probabilities that $\\mathbf V$ is any of the realizations within the set $A$ of interest. Compare this to what we had before:\n\n$$P(\\mathbf V \\in A) = \\sum_{\\omega \\in \\mathbf V^{-1}(A)} \\mathbb P(\\omega)$$\n\nThe advantage is that we don't have to talk about outcomes or sample spaces anymore. All of the information we need to calculate any probabilities of $\\mathbf V$ is baked into the mass function $\\phi(v)$.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nLet's say $\\mathbf V$ is a random variable that maps outcomes from the interval $[0,1]$ to either 0, 1, or 2 in the following way:\n \n$$\n\\mathbf V(\\omega)\n=\n\\begin{cases}\n0 & \\text{if } \\omega \\in [0, 0.2) \\cup (0.8,1] \\\\\n1 & \\text{if } \\omega \\in [0.2, 0.3) \\cup (0.7, 0.8] \\\\\n2 & \\text{if } \\omega \\in [0.3, 0.7] \\\\\n\\end{cases}\n$$\n \nNote that $\\mathbf V$ is discrete, but with a continuous sample space.\n \nAssuming the Lebesgue measure, what is the mass function of $\\mathbf V$? In other words, for each value that $\\mathbf V$ can take, what's the total length of the set that produces each value?\n\nUse the mass function to calculate $P(\\mathbf V \\in \\{1,0\\})$. You should get 0.6.\n \n</div>", "_____no_output_____" ], [ "### Continuous random variables and densities", "_____no_output_____" ], [ "Let's say $\\mathbf Z$ is defined by $\\mathbf Z(\\omega) = 10\\omega^2$ with $\\omega \\in [0,1]$. How can we find some kind of function that we can manipulate to calculate probabilities without reference to the sample space or measure? \n\nFor starters, we do know how to calculate probabilities. For instance, if we want to know $P(\\mathbf Z \\in [0.625, 2.5])$, what we need to do is find $\\mathbb P(\\mathbf Z^{-1}([0.625, 2.5]))$, which is the \"length\" of the set $\\mathbf Z^{-1}([0.625, 2.5])$ if we're using the Lebesgue measure. So what is $\\mathbf Z^{-1}([0.625, 2.5])$? Well, $\\mathbf Z(\\omega) = 10\\omega^2 \\in [0.625, 2.5]$ is the same as saying $0.625 \\le 10\\omega^2 \\le 2.5$. Dividing by 10 and taking square roots, we're left with $0.25 \\le \\omega \\le 0.5$. So $\\mathbf Z^{-1}([0.625, 2.5]) = [0.25, 0.5]$. The length of that set is clearly 0.25, so that's the probability we're looking for.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nLet $\\mathbf Z$ be as it is above. Find a formula for $P(\\mathbf Z \\in [a,b])$ for any values $a \\le b$ and $a,b \\in [0,10]$.\n\n</div>", "_____no_output_____" ], [ "Mission accomplished? Not quite. We managed to calculate a probability given the sample space and random variable, but what we want is some kind of function that we can manipulate to calculate these probabilities without reference to the sample space at all. We don't want to have to think about what $\\Omega$ is or exactly how the different outcomes map to the different realizations.", "_____no_output_____" ], [ "So here's an idea: let's bin $z$ into 10 non-overlapping buckets, like $[0,1)$, $[1,2)$ ... $[9,10]$ and calculate the probability within each of those buckets. This is just like what we did in the discrete case. We're sorting all of the nuggets in the silo into different piles depending which set of values they are destined to become, and then measuring the volume of each pile. Here's what we get when we do that for the example random variable $\\bf z$ defined in the code above:", "_____no_output_____" ] ], [ [ "from math import sqrt\n\ndef Pz(a,b):\n return sqrt(b/10) - sqrt(a/10)\n\nzs = range(10)\nps = [Pz(z,z+1) for z in zs]\nzs_labels = [f'[{z},{z+1})' for z in zs]\n\ndistribution = pd.DataFrame({'z':zs_labels, 'p':ps})\nalt.Chart(distribution, height=100, width=400).mark_bar().encode(x='z:O', y='p')", "_____no_output_____" ] ], [ [ "And why stop at 10 buckets? Let's split it up into 100.", "_____no_output_____" ] ], [ [ "zs = np.arange(0,10,0.1)\nps = [Pz(z,z+0.1) for z in zs]\nzs_labels = [f'[{z},{z+0.1})' for z in zs]\n\ndistribution = pd.DataFrame({'z':zs_labels, 'p':ps})\nalt.Chart(distribution, height=100, width=400).mark_bar().encode(alt.X('z:O',axis=None), y='p')", "_____no_output_____" ] ], [ [ "More buckets gives us more information. If we want to know $P(\\mathbf Z \\in [0,0.5))$, for instance, we can sum up the probabilities for the buckets $[0,0.1)$, $[0.1,0.2)$, ... $[0.4,0.5)$. But we can't get *any* probability. The graph doesn't have enough information to let us calculate probabilities over intervals whose ends are between two cutpoints of the buckets. It only has resolution up to increments of $0.1$ in terms of $z$. It would be nice to have a graph that lets us read off arbitrary probabilities like $P(\\mathbf Z \\in [a,b])$ just by looking at how much \"stuff\" there is between $a$ and $b$. Something like this:", "_____no_output_____" ] ], [ [ "z = np.arange(0.1,10,0.1)\np = 1/(2*np.sqrt(10*z)) # magic, for now...\n\ndistribution = pd.DataFrame({'z':z, 'p':p})\nalt.Chart(distribution, height=100, width=400).mark_area().encode(x='z', y='p')", "_____no_output_____" ] ], [ [ "Before I explain how I managed to make this graph, which is called a **density plot**, I want to establish an intuition for what it means. We've gone from 10 buckets, to 100 buckets, to \"infinite\" buckets. I like to think of these pictures literally: all the outcomes $\\omega$ neatly piled up on top of the labels $z$ for the values they will become. So to get $P(\\mathbf Z \\in [a,b])$ from this picture, which is just the volume of outcomes that map to values between $a$ and $b$, all we need to do is see how much stuff there is piled up between $a$ and $b$ in the picture. ", "_____no_output_____" ], [ "![](https://media.giphy.com/media/xT0xeJpnrWC4XWblEk/giphy-facebook_s.jpg)", "_____no_output_____" ], [ "To do this, we turn to a useful tool from calculus: the integral. To make the picture above, we need a curve $\\phi(z)$ such that the area under $f$ between $a$ and $b$ is $P(\\mathbf Z \\in [a,b])$ for all values $a$ and $b$. In the previous exercise you should have figured out that $P(\\mathbf Z \\in [a,b]) = \\sqrt{\\frac{b}{10}} - \\sqrt{\\frac{a}{10}}$. So what we need is the curve $\\phi(z)$ that satisfies this equation:\n\n$$\\int_a^b \\phi(z) dz = P(\\mathbf Z \\in [a,b]) = \\sqrt{\\frac{b}{10}} - \\sqrt{\\frac{a}{10}}$$", "_____no_output_____" ], [ "Looking at the integral equation, it's clear that $\\Phi(z) = \\sqrt{\\frac{z}{10}}$ is the antiderivative of $\\phi(z)$, so all we need to do to get $\\phi$ is differentiate $\\Phi$:\n\n$$\\phi(z) = \\frac{d\\Phi(z)}{dz} = \\frac{d}{dz} \\sqrt{\\frac{z}{10}} = \\frac{1}{2\\sqrt{10z}}$$", "_____no_output_____" ], [ "That's why we have `ps = [1/(2*sqrt(10*z)) for z in zs]` in the code above.", "_____no_output_____" ], [ "The function $\\phi(z)$ is called a **probability density function** (PDF), which is the continuous equivalent of the probability mass function. Its integral $\\Phi(z) = \\int_{-\\infty}^z \\phi(t)dt = P(\\mathbf Z \\le z)$ is called a **cumulative density function** (CDF). Either of these functions tell you everything you need to know about probabilities of the random variable $\\mathbf Z$. The probability that $\\mathbf Z$ takes any of the values in an arbitrary set $A$ is \n\n$$P(\\mathbf Z \\in A) = \\int_{A} \\phi(z) dz$$", "_____no_output_____" ], [ "This works the same way as the probability mass function for a discrete random variable $\\mathbf V$:\n\n$$P(\\mathbf V \\in A) = \\sum_{v \\in A} \\phi(v)$$", "_____no_output_____" ] ], [ [ "A = (1<=z) & (z<=4)\ndistribution = pd.DataFrame({'z':z, 'p':p, 'A':A})\nalt.Chart(distribution, height=100, width=400).mark_area().encode(\n x='z',\n y='p'\n) + alt.Chart(distribution.query('A')).mark_area(color='orange').encode(\n x='z',\n y='p'\n)", "_____no_output_____" ] ], [ [ "For example, the probability that $\\mathbf Z$ is in the set $[1,4]$ is the area shaded in orange above.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> \n\nThe notation $\\int_{A} \\phi(z) dz$ just means $\\int_{-\\infty}^\\infty I_A(z)\\phi(z) dz$ where the <b>indicator function</b> $I_A(z)$ is 1 if $z\\in A$ and 0 else. In othe words, all we're doing is summing up the $\\phi(x)dx$s where $x \\in A$. That's analogous to summing up the $\\phi(v)$s where $v \\in A$ in the discrete case.\n\n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nFor our random variable $\\mathbf Z$ with density $\\phi(z) =\\frac{1}{2\\sqrt{10z}}$, what is $P(\\mathbf Z \\in [0, 10])$? Calculate the probability by integrating the density function. Does your answer line up with what you expect based on our original definition of $\\mathbf Z$? \n \nHere is another random variable that, like $\\mathbf Z$, maps outcomes in $\\Omega = [0,1]$ to values in $[0,10]$: $\\mathbf W(\\omega) = 10\\omega$. Calculate $P(\\mathbf W \\in [a,b])$ for some interval $[a,b]$. What is the probability density function for $\\mathbf W$? What is $P(\\mathbf W \\in [0, 10])$?\n \nFor <i>any</i> continuous random variable $\\mathbf X$, what is $\\int_{-\\infty}^{\\infty} \\phi(x) dx$ (<i>hint</i>: what probability does this represent)? What is $\\Phi(-\\infty)$? $\\Phi(\\infty)$? \n \nIs it possible to have a random variable $\\mathbf Q$ with $\\phi_{\\mathbf Q}(q) < 0$ for some $q$ that is a possible realization of $\\mathbf Q$? Why does this not make sense?\n \nFor two values $a < b$, is it possible that $\\Phi(a) > \\Phi(b)$? Why nor why not?\n \n</div>", "_____no_output_____" ], [ "At this point, talking about the outcomes $\\omega$ is kind of silly. If two random variables have the same probability mass function or the same probability density function, then, for all intents and purposes, they are the same random variable. It doens't matter exactly which outcomes map to which values, as long as the proportions are the same. We already demonstrated this in the discrete case.\n\nTo show the same concept for continuous random variables, here is a new random variable $\\mathbf Z'$ whose sample space $\\Omega$ is $[-100, 100]$ instead of $[0,1]$, but which has the same probability density function as our other random variable $\\mathbf Z$: \n\n$$\n\\mathbf Z'(\\omega) = 10\\left(\\frac{\\omega+100}{200}\\right)^2\n$$", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nProve to yourself that $\\mathbf Z'$ has the same probability density function as $\\mathbf Z$.\n \n</div>", "_____no_output_____" ], [ "For this we use the notation $\\mathbf Z \\sim \\mathbf Z'$. Technically they are not the same since the sample spaces are different, so we shouldn't write $\\mathbf Z = \\mathbf Z'$. But as far as an observer who is outside the curtain is concerned, there is no way to tell them apart. The *distribution* of the random variable is what really matters. ", "_____no_output_____" ], [ "I think about mass or density functions as convenient abstraction layers between me and the random variable. If I want to know a probability, I don't have to go to the random variable and count up the volume of something in the sample space, I just \"query\" the mass or density. The \"query engine\" happens to be an integral or sum, and the query itself is the region of space that I want to integrate over. In a nutshell:\n\n$$\n\\mathbb P(\\mathbf Z^{-1}(A)) = \\int_A \\phi_{\\mathbf Z}(z) dz\n\\quad\n\\text{or}\n\\quad\n\\mathbb P(\\mathbf Z^{-1}(A)) = \\sum_{z \\in A} \\phi_{\\mathbf Z}(z)\n$$", "_____no_output_____" ], [ "So if we have $\\phi_Z$, we don't need to worry about figuring out what $\\mathbf Z^{-1}(A)$ is or how to do the measurement of that set using $\\mathbb P$. Finding preimages and measuring them is hard. Integrating or summing distribution functions is easier.\n", "_____no_output_____" ], [ "### Histograms vs. mass and density functions", "_____no_output_____" ], [ "Many of you are probably already familiar with histograms. Histograms are a way of visualizing observed data. Each observed value is stacked up on top of its approximate label (e.g. any $z$ between 0.5 and 1.5 is labeled \"1\") and the counts are plotted:", "_____no_output_____" ] ], [ [ "def Z(ω): \n return 10*(ω**2) # when ω goes into the factory, the factory makes ω^2\nZ.Ω = random.random # returns a single number between 0 and 1 when called\n\ndef realize_cont(rand_var): # run the assembly line!\n ω = Z.Ω() # returns a single number between 0 and 1\n return rand_var(ω) # push it through the factory", "_____no_output_____" ], [ "z = [realize_cont(Z) for i in range(1000)] # 1000 draws from Z\n\nplot_df = pd.DataFrame({'z':z})\nalt.Chart(plot_df, height=100, width=400).mark_bar().encode(\n alt.X('z', bin=alt.Bin(maxbins=100)),\n y='count()'\n)", "_____no_output_____" ] ], [ [ "That looks suspicously like our bucketed density plot:", "_____no_output_____" ] ], [ [ "zs = np.arange(0,10,0.1)\nps = [Pz(z,z+0.1) for z in zs]\nzs_labels = [f'[{z},{z+0.1})' for z in zs]\n\ndistribution = pd.DataFrame({'z':zs_labels, 'p':ps})\nalt.Chart(distribution, height=100, width=400).mark_bar().encode(alt.X('z:O',axis=None), y='p')", "_____no_output_____" ] ], [ [ "So what's the difference? Think about what it is we're \"stacking up\" in the bars. In the histogram, we're sorting and stacking up a *finite number* $n$ of *observed values* $z_i$ according to what they are. In the density plot, we're sorting and stacking up *all* of the *outcomes* $\\omega$ in the silo according to the values they are destined to become, and we're measuring their relative volume, not absolute counts.\n\nIn a nutshell, the histogram is what we can actually observe, given outputs from the factory. But the density descibes the inner workings of the factory itself, which we can never actually observe.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nWhat do you expect to happen to the shape of the histogram above as the number of observations is increased from $1000$ to larger and larger numbers? Can you provide an intuitive explanation for why this happens?\n \n</div>", "_____no_output_____" ], [ "### Common Distributions", "_____no_output_____" ], [ "We've seen that, for all practical purposes, a random variable is determined by its probability distribution (mass or density function). In reality, the distribution of any particular measurement (e.g. blood pressure) is unknown- it depends on a complex web of causal factors. The true density function is almost certainly so complex it's not even something that we could write down. But, for the purposes of *modeling* that measurement, we *pretend* that the density is something we can write down. \n\nOver the centuries, people have come up with a lot of distributions that are useful as models across various scenarios. Here are a few of them:", "_____no_output_____" ], [ "#### Bernoulli distribution", "_____no_output_____" ], [ "Let's say we're interested in modeling the result of a coin flip. The actual value (heads/tails, which we code as 0/1) of the coin flip is determined by some insanely complicated physics, but we're going to pretend that the value comes out of a little factory called $\\mathbf Z$ that has the following probability mass function:\n\n\n$$\nP(\\mathbf Z=z) =\n\\begin{cases}\n1/2 & \\text{for }z=0 \\\\\n1/2 & \\text{for }z=1\n\\end{cases}\n$$", "_____no_output_____" ], [ "If we want to model a biased coin that comes up heads $(p\\times100)$% of the time, we can use a mass function like:", "_____no_output_____" ], [ "$$\nP(\\mathbf Z=z) =\n\\begin{cases}\np & \\text{for }z=0 \\\\\n1-p & \\text{for }z=1\n\\end{cases}\n$$", "_____no_output_____" ], [ "This is often written as $\\mathbf Z \\sim \\text{Bernoulli}(p)$ (read: \"$\\mathbf Z$ is Bernoulli-distributed\"). The number $p$ is said to be a **parameter** of the Bernoulli distribution. It would be more accurate to say that a random variable is distributed as a **member** of the Bernoulli **family** of distributions, since, technically, every different value of $p$ encodes a different distribution, or factory, for making data.\n\nAnother way to think about it is that there's one data factory, but it has a control panel with a knob labeled \"$p$\". If $p$ is set to 0.7, we expect about 70% of the outputs to be 1. If $p$ is set to $0.1$, 10%, and so on. It's a matter of semantics whether or not you want to say that factory is representing two different factories, or merely one factory under two different operating conditions. Both perspectives are useful.", "_____no_output_____" ], [ "#### Normal Distribution\n\nLet's say we want to model the heights of everyone on Earth. We have an intuition that people are typically a bit shorter than two meters, and taller and shorter people are more and more rare the taller and shorter they get. We can pretend that height measurements come from a **normal** distribution (also called **Gaussian** distribution):", "_____no_output_____" ], [ "$$\n\\phi(z) = \n\\frac{1}{\\sqrt{2\\pi\\sigma}} \ne^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\n$$", "_____no_output_____" ], [ "Most often you'll see this written as $\\mathbf Z \\sim \\mathcal N(\\mu, \\sigma)$ (read: \"$\\mathbf Z$ is normally distributed\"). The numbers $\\mu$ and $\\sigma$ are the parameters (control knobs) of the normal distribution. \n\n![](https://upload.wikimedia.org/wikipedia/commons/7/74/Normal_Distribution_PDF.svg)", "_____no_output_____" ], [ "As you can see in the picture, $\\mu$ controls where the \"bell curve\" is centered and $\\sigma$ controls how wide or narrow it is.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<b>Note:</b> \n \nEvery distribution is defined by its mass or density function $\\phi$. The mass or density is often a complicated function, so instead of saying someting like \"$\\phi(z) = \\frac{1}{\\sqrt{2\\pi\\sigma}} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}$\" every time we want a normally-distributed variable, we'll abbreviate that to \"$\\mathbf Z \\sim \\mathcal N (\\mu, \\sigma)$\". But they mean the same thing. \n \nEvery time you see something like $\\mathbf Z \\sim \\mathcal D(\\theta_1, \\theta_2, \\dots)$, just know there is some mass or density function that is associated with the name $\\mathcal D$ and which has parameters $\\theta_1, \\theta_2, \\dots$. You can always look it up if you need to know exactly what it is.\n \n</div>", "_____no_output_____" ], [ "#### Others ", "_____no_output_____" ], [ "There are [hundreds](https://upload.wikimedia.org/wikipedia/commons/7/74/Normal_Distribution_PDF.svg) of well-studied distributions available to choose from when modeling. The most important thing to know about a distribution is what values it can generate. This is sometimes called the **support** of the distribution, since if you were to make a density or mass plot, the support would be the region of the x-axis that has positive density or mass, so it's the region that appears to be \"supporting\" the curve or mass. \n\nFor example, varaibles that are normally-, Cauchy-, or Laplace-distributed are supported on $-\\infty$ and $\\infty$. The $\\chi^2$ distribution has support on $[0,\\infty)$. The beta and standard uniform distributions have support on $[0,1]$. The Poisson distribution has support on the counting numbers 0, 1, 2..., and the K-categorical distribution has support on a finite number of integers 0, 1, 2, ... K. \n\nIt's also totally possible to invent your own distribution by defining your own support set $S$ and mass/density function $\\phi$, as long as $\\phi(s) \\ge 0$ for all $s \\in S$ and $\\int_S \\phi(s) ds = 1$ or $\\sum_{s \\in S} \\phi(s) = 1$. These properties have to be satisfied to have a valid density or mass (see exercise in previous section).\n\nThe point of this diversity is that it is possible to model different kinds of data. Apartment rents are always positive numbers, but theoretically unbounded above (a scary thought), so perhaps $\\chi^2$ is a good choice. The number of cars that pass through an intersection in a given day is always an integer, so Poisson is a reasonable choice for that. You don't have to remember any of these specific distributions or examples- just know there are many preconstructed pretend data factories out there to play with. Also know that the real data-generating process is pretty much *never* actually one of these distributions, although, sometimes, it might be well-approximated by one.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>Exercise:</b> \n\nDefine your own density function that has support on $[0,1]$. Make it so that the probability of getting a bigger number is bigger than that of getting a smaller number. Be sure that your function integrates to $1$ and is nonnegative over its support, otherwise it's not a valid density.\n \n</div>", "_____no_output_____" ], [ "## Chapter summary", "_____no_output_____" ], [ "Data in the real world is generated by complex processes that we can't ever hope to replicate. But if we want to uncover relationships between measurements, we at least need a framework for imagining what kinds of processes might be generating our data. Random variables and probability theory do that for us. \n\nRandom variables are like factories that generate data. We don't observe them directly, but we see the data they output and we can imagine different kinds of random variables that make different kinds of data. We defined a notion of probability that posits that the probability of observing a particular realization is actually just the volume of material in the factory's silo (sample space) that is destined to become that realization. This is a pure abstraction, but it turns out to capture relationships between probabilities that we would intuitively expect to hold.\n\nIt's easier to work with the probability distribution of a random variable than it is to constantly talk about the sample space and the mapping between that space and realizations. The probability distribution is a function that, when integrated over a region of the space of realizations, gives us the volume of outcomes in the sample space that map to realizations in that region. In other words: the probability that the random variable gives a realization in that region. Random variables can be continuous or discrete, but all have a distribution function that can be integrated or summed to yield probabilities.\n\nRandom variables are most often talked about in terms of their porbability distributions. Defining a new variable is as easy as choosing a support and a mass or density function over that support. Some distributions are so commonly used that they have their own names and notations so that we don't have to write out their mass or density functions out over and over again to refer to them.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cbc56db0052cc45c5f13903a7a89889eb3ca0492
40,004
ipynb
Jupyter Notebook
Real_Estate.ipynb
SidStark29/Real_Estate
5673769c7fc2f18179425aeb422c29f8b7a448dd
[ "MIT" ]
null
null
null
Real_Estate.ipynb
SidStark29/Real_Estate
5673769c7fc2f18179425aeb422c29f8b7a448dd
[ "MIT" ]
null
null
null
Real_Estate.ipynb
SidStark29/Real_Estate
5673769c7fc2f18179425aeb422c29f8b7a448dd
[ "MIT" ]
null
null
null
40,004
40,004
0.559944
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "datasets= pd.read_csv('/content/sample_data/Real estate.csv')", "_____no_output_____" ], [ "datasets", "_____no_output_____" ], [ "X= datasets.iloc[:,1:7].values\nX", "_____no_output_____" ], [ "y=datasets.iloc[:,-1].values\ny", "_____no_output_____" ], [ "X=X.reshape(414,6)\nX", "_____no_output_____" ], [ "y=y.reshape(414,)\ny", "_____no_output_____" ], [ "#from sklearn.compose import ColumnTransformer\n#from sklearn.preprocessing import OneHotEncoder", "_____no_output_____" ], [ "#encoder = OneHotEncoder()\n\n#ct = ColumnTransformer(transformers=[('encoder',encoder, [3])], remainder='passthrough')", "_____no_output_____" ], [ "#X = ct.fit_transform(X)", "_____no_output_____" ], [ "#X", "_____no_output_____" ] ], [ [ "Splitting the dataset", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state= 25)", "_____no_output_____" ], [ "print(X_train)", " (0, 48)\t1.0\n (0, 232)\t1.0\n (0, 234)\t1.0\n (0, 238)\t1.0\n (0, 245)\t2013.5\n (0, 246)\t25.3\n (0, 247)\t1583.722\n (0, 248)\t24.96622\n (1, 181)\t1.0\n (1, 232)\t1.0\n (1, 234)\t1.0\n (1, 243)\t1.0\n (1, 245)\t2013.5\n (1, 246)\t6.5\n (1, 247)\t90.45606\n (1, 248)\t24.97433\n (2, 12)\t1.0\n (2, 232)\t1.0\n (2, 234)\t1.0\n (2, 236)\t1.0\n (2, 245)\t2012.75\n (2, 246)\t16.1\n (2, 247)\t4066.587\n (2, 248)\t24.94297\n (3, 99)\t1.0\n :\t:\n (368, 248)\t24.97795\n (369, 108)\t1.0\n (369, 232)\t1.0\n (369, 234)\t1.0\n (369, 240)\t1.0\n (369, 245)\t2013.5\n (369, 246)\t13.6\n (369, 247)\t492.2313\n (369, 248)\t24.96515\n (370, 112)\t1.0\n (370, 232)\t1.0\n (370, 234)\t1.0\n (370, 244)\t1.0\n (370, 245)\t2013.333\n (370, 246)\t7.1\n (370, 247)\t379.5575\n (370, 248)\t24.98343\n (371, 123)\t1.0\n (371, 232)\t1.0\n (371, 234)\t1.0\n (371, 240)\t1.0\n (371, 245)\t2013.167\n (371, 246)\t26.6\n (371, 247)\t482.7581\n (371, 248)\t24.97433\n" ], [ "print(y_train)", "[ 30.6 63.9 12.9 40.3 30.5 35.1 48.2 59.5 46.6 39.7 46.1 46.6\n 42.5 30.5 23.5 32.4 20. 37.7 54.4 29.3 36.3 37.4 31.9 78.\n 38.6 31.1 11.6 56.3 38.3 33.1 48. 36.5 52.5 45.1 38.8 25.6\n 42.7 59.6 13.4 46.1 44.3 34.6 48.2 31.7 47.7 25.5 47.3 42.1\n 39.6 25.7 29.5 67.7 28.5 39.3 62.9 41.9 30.7 15.6 25.3 25.6\n 41.4 37.2 51.8 28.5 46.4 48.5 54.4 26.5 39.7 55. 41.6 17.4\n 40.6 40.6 27.7 42.2 50.4 38.1 7.6 42.3 22.1 49.7 43.1 39.\n 34.1 38.5 27.3 21.4 29.3 21.8 40.9 51.4 41.5 38.1 15.4 25.\n 58. 61.9 27. 36.6 52.2 31.3 36.7 11.2 22.6 25.7 56.2 29.8\n 31.3 34.1 37.3 20.5 42.3 37.4 42.8 12.8 23.7 41. 17.7 53.3\n 23.8 43.2 35.6 43.1 15.9 24.7 28.1 28.6 15.5 45.5 63.3 53.5\n 50.7 57.4 55.3 29.3 51.6 39.5 39.7 71. 36.8 53.3 42.6 42.2\n 52.7 33.1 33.4 70.1 36.5 52.2 42. 39.6 40. 21.3 55. 59.\n 31.3 51. 22.1 44.3 18.3 54.8 40.8 36.3 42. 31.6 46. 29.7\n 32.9 46.7 40.6 26.2 39.4 23.6 44. 31.3 38.4 37.9 53. 44.\n 58.1 37.5 54.4 16.1 34.2 30. 20.7 53. 25.6 51.8 24.7 34.4\n 55.1 41.4 41.2 58.1 50.8 40.1 20.8 34. 30.9 34.2 60.7 44.9\n 21.7 43.8 30.8 45.7 15.6 24.8 14.4 51. 47. 42.3 18.2 13.2\n 39.1 26.6 26.9 32.5 22.8 51.7 48.5 22.8 41.2 15. 48.1 34.7\n 23.6 23.2 21.5 42.5 42.4 28.9 55.3 20.9 27. 25.3 12.8 48.1\n 46.6 37. 18.8 53.7 34.1 51.7 40.3 23.8 33.4 63.3 49.3 50.5\n 62.2 32.9 29.3 60.7 78.3 25.3 29.5 35.6 22.3 27. 44.8 19.1\n 40.3 22.3 40.2 40.5 40.8 31.5 13.7 44.5 33.6 52.2 40.8 28.9\n 57.1 20.9 37.4 18.8 30.6 42.5 43.2 51.6 45.1 32.1 40.3 26.5\n 22. 38.1 13.8 37.8 36.9 53.9 48. 56.8 27.7 38.9 42. 23.\n 19. 55.9 45.2 32.2 21.8 36.8 45.3 26.5 42.3 36.7 117.5 20.7\n 50.2 69.7 34.3 45.5 43.5 31.1 50. 45.4 58.8 47.3 41.1 24.6\n 53.5 24.4 37.5 12.2 73.6 28.8 39.3 47.1 30.1 24.5 21.8 40.2\n 35.3 47.1 38.4 42. 43.5 57.8 47.4 22.9 44.2 28.4 28.4 45.9\n 61.5 29.4 43.9 30.7 35.7 42.9 23. 48. 28.8 43.4 41. 40.5\n 37.9 27.3 18.3 46.8 25.9 44.7 17.4 49.5 37.5 40.1 49.8 37.5]\n" ], [ "print(X_test)", " (0, 219)\t1.0\n (0, 232)\t1.0\n (0, 234)\t1.0\n (0, 237)\t1.0\n (0, 245)\t2013.0\n (0, 246)\t13.0\n (0, 247)\t750.0704\n (0, 248)\t24.97371\n (1, 152)\t1.0\n (1, 232)\t1.0\n (1, 235)\t1.0\n (1, 245)\t2013.25\n (1, 246)\t1.1\n (1, 247)\t193.5845\n (1, 248)\t24.96571\n (2, 201)\t1.0\n (2, 232)\t1.0\n (2, 234)\t1.0\n (2, 241)\t1.0\n (2, 245)\t2013.083\n (2, 246)\t10.1\n (2, 247)\t279.1726\n (2, 248)\t24.97528\n (3, 137)\t1.0\n (3, 232)\t1.0\n :\t:\n (38, 248)\t24.94155\n (39, 121)\t1.0\n (39, 232)\t1.0\n (39, 234)\t1.0\n (39, 239)\t1.0\n (39, 245)\t2012.917\n (39, 246)\t21.2\n (39, 247)\t512.5487\n (39, 248)\t24.974\n (40, 110)\t1.0\n (40, 232)\t1.0\n (40, 234)\t1.0\n (40, 242)\t1.0\n (40, 245)\t2013.333\n (40, 246)\t3.9\n (40, 247)\t49.66105\n (40, 248)\t24.95836\n (41, 27)\t1.0\n (41, 232)\t1.0\n (41, 234)\t1.0\n (41, 238)\t1.0\n (41, 245)\t2013.083\n (41, 246)\t7.6\n (41, 247)\t2175.03\n (41, 248)\t24.96305\n" ], [ "print(len(X_test))\nprint(len(X_train))", "_____no_output_____" ], [ "print(y_test)", "[37. 49. 47.9 40.6 41. 24.7 43.7 45.4 46.2 48.6 19.2 34.6 42.2 55.\n 47. 47.7 40.9 13. 39.4 23.1 23.9 38.2 55.2 37.4 62.1 63.2 36.2 23.1\n 14.7 55.5 19.2 23.5 26.6 27.3 35.5 49.3 24.7 18.6 16.7 42.5 56.8 27.7]\n" ], [ "print(len(y_test))\nprint(len(y_train))\nX_train.shape\n", "42\n372\n" ] ], [ [ "Model Setup", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "model = LinearRegression()", "_____no_output_____" ] ], [ [ "Training", "_____no_output_____" ] ], [ [ "model.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "Predicting", "_____no_output_____" ] ], [ [ "y_pred = model.predict(X_test)", "_____no_output_____" ], [ "[y_test, y_pred]", "_____no_output_____" ] ], [ [ "Evaluating Model", "_____no_output_____" ] ], [ [ "from sklearn.metrics import r2_score", "_____no_output_____" ], [ "r2_score(y_test, y_pred)", "_____no_output_____" ], [ "X_test[0]", "_____no_output_____" ] ], [ [ "Predicting on single value", "_____no_output_____" ] ], [ [ "x1= ([[2012.917,32, 84.87, 10, 24.98, 121.54]])\ny1 = model.predict(x1)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cbc57a9dbc34e68aeaffb8663ff2487e4fc662f0
3,546
ipynb
Jupyter Notebook
real-time-mask-detction.ipynb
satyarishi/face-mask-alert-system
2358fac469aa1353587a10693a92b8283ea87c3b
[ "MIT" ]
1
2021-08-20T08:52:07.000Z
2021-08-20T08:52:07.000Z
real-time-mask-detction.ipynb
satyarishi/face-mask-alert-system
2358fac469aa1353587a10693a92b8283ea87c3b
[ "MIT" ]
null
null
null
real-time-mask-detction.ipynb
satyarishi/face-mask-alert-system
2358fac469aa1353587a10693a92b8283ea87c3b
[ "MIT" ]
1
2021-07-22T09:42:48.000Z
2021-07-22T09:42:48.000Z
27.488372
109
0.525099
[ [ [ "from keras.models import load_model\nimport cv2\nimport numpy as np", "_____no_output_____" ], [ "# Load trained deep learning model\nmodel = load_model('face-mask-image-classification-with-keras.h5')", "_____no_output_____" ], [ "# Classifier to detect face-mask\nface_det_classifier=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')", "_____no_output_____" ], [ "# Capture video for face scanning\nsource=cv2.VideoCapture(0)", "_____no_output_____" ], [ "# Dictionaries caontaing details of wearing mask and color of rectangle arund face.\n# Green for wearing and Red for not wearing face mask\nlabels_dict={0:'MASK',1:'NO MASK'}\ncolor_dict={0:(0,255,0),1:(0,0,255)}", "_____no_output_____" ], [ "while(True):\n\n ret, img=source.read()\n grayscale_img =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face_det_classifier.detectMultiScale(grayscale_img,1.3,5) \n\n for x,y,w,h in faces:\n \n face_img=grayscale_img[y:y+w,x:x+w]\n resized = cv2.resize(face_img,(100,100))\n normalized=resized/255.0\n reshaped=np.reshape(normalized,(1,100,100,1))\n result=model.predict(reshaped)\n\n label=np.argmax(result,axis=1)[0]\n \n cv2.rectangle(img,(x,y),(x+w,y+h),color_dict[label],2)\n cv2.rectangle(img,(x,y-40),(x+w,y),color_dict[label],-1)\n cv2.putText(img, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)\n #If level = 1 then it means wearing No mask and 0 means wearing mask\n if (label == 1):\n img = cv2.imwrite('nomask.png',img)\n %run mail.py nomask.png\n #messagebox.showwarning(\"Access Denied\")\n #%run mail.py\n #%run alert_notification.py\n #%run alert_email.py\n \n \n else:\n pass\n break\n cv2.imshow('Live face scanning',img)\n key=cv2.waitKey(1)\n \n if(key==27):\n break\n \ncv2.destroyAllWindows()\nsource.release() ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cbc59e915068911a47942b30a5ce0ea211832a92
26,630
ipynb
Jupyter Notebook
notebooks/awkwardbasic.ipynb
yihui-lai/CoffeaTutorial
aa104e54700f847c58ed77ff99faa7173d38a584
[ "MIT" ]
null
null
null
notebooks/awkwardbasic.ipynb
yihui-lai/CoffeaTutorial
aa104e54700f847c58ed77ff99faa7173d38a584
[ "MIT" ]
null
null
null
notebooks/awkwardbasic.ipynb
yihui-lai/CoffeaTutorial
aa104e54700f847c58ed77ff99faa7173d38a584
[ "MIT" ]
null
null
null
41.937008
993
0.613819
[ [ [ "# The basics of awkward arrays\n\nAt the front and formost of coffea is a completely new syntax for expressing analysis computations: `awkward arrays` and it's index based notation. For people coming from a\nmore traditional loop-based programming syntax, the syntax will take some getting use \nto, but this tutorial can hopefully help you understand how to understand the syntax \nand how to understand the various method.\n\nLet use begin by first understanding what you need to explore the contents of a typical\nntuple file using coffea related tools. First you can download the dummy ntuples file and\n the corresponding schema files from the main repository to your working directory:\n\n```sh \ncd <WORKINGDIRECTORY>\nwget https://raw.githubusercontent.com/UMDCMS/CoffeaTutorial/main/samples/dummy_nanoevents.root \nwget https://raw.githubusercontent.com/UMDCMS/CoffeaTutorial/main/samples/dummyschema.py\n```\n\nWe can use the usual ROOT tools to look at the contents of the `dummy_nanoevent.root` \nfile. But let us focus on using coffea tools alone. \n\nFirst import the relevent coffea objects: \n\n", "_____no_output_____" ] ], [ [ "from coffea.nanoevents import NanoEventsFactory\nfrom dummyschema import DummySchema \nimport numpy as np \nimport awkward1 as ak", "_____no_output_____" ] ], [ [ "Now we can create the event list as an awkward array using coffea tools like:\n", "_____no_output_____" ] ], [ [ "events = NanoEventsFactory.from_root( 'file:dummy_nanoevents.root', # The file, notice the prefix `file:` for local file operation\n 'Events', # Name of the tree object to open \n entry_stop=50, # Limit the number of events to process, nice for small scale debugging\n schemaclass=DummySchema\n).events() ", "_____no_output_____" ] ], [ [ "The last `schemaclass` argument will be let unexplained for now, see the schema tutorials to \nlearn more about what this is. Here we have created the events as a awkward array. To see that \nis stored in the array we can use:\n", "_____no_output_____" ] ], [ [ "print(events.fields)", "['Electron', 'Muon', 'Jet']\n" ] ], [ [ "Indicating the collections that are stored in the awkward array. To see how many events exists in in our file\nwe can use the typical python method:\n", "_____no_output_____" ] ], [ [ "print(len(events))", "50\n" ] ], [ [ "The 50 here in corresponds correctly to the `entry_stop` used in to open the file. Next we can, of course, start to explore the contents of the various object collections. One can acess the fields of the event as if it was a regular data memeber ", "_____no_output_____" ] ], [ [ "print(events.Electron.fields)\nprint(events.Jet.fields)", "['pt', 'eta', 'phi', 'mass', 'charge', 'flag', 'dxy', 'dxyErr', 'dz', 'dzErr']\n['pt', 'eta', 'phi', 'energy', 'ID', 'SubjetsCounts', 'Subjets']\n" ] ], [ [ "Ah ha! We we are starting to see numbers we can play around with. Notice that coffea was written with High energy physics analysis in mind, so even if the electron energy doesn't look like it is stored from the output of the fields, we can still access methods that we will typically associate with 4-vectors. In particular, notice that we can call the `energy` field of the electron collection, even though the energy field isn't explicitly defined. Coffea is designed with 4 vectors in mind, so the energy collection is calculated on the fly. ", "_____no_output_____" ] ], [ [ "print(events.Electron.pt)\nprint(events.Electron.energy)", "[[49.3, 38.1], [48.9, 43.9, 50.8, 43.9], ... [48.9, 42.9, 41.7, 56.3, 60.3, 35.5]]\n[[52.6, 41.8], [49.5, 45.1, 56.6, 44.1], ... [56.8, 44.9, 55.5, 56.7, 82.5, 35.6]]\n" ] ], [ [ "Now, looking at the output, we can begin to get a grasp of what awkward arrays are: the variable `events.Electron.pt` variable represents a N events times A objects array of floating point, the `events.Electon` variable represents and N events times A objects time K fields of *collection* of floating point arrays, and the `events` variable reprents ths N times a certain set of collections (in this case three collections: `['Electron', 'Muon', and 'Jet']`) is recorded. \n\nThe \"awkward\" part of the array refers to two parts, first the value of `A` is differnt for each event and for each collection. In this demonstration, our first event has 2 electrons, the second event has 4 electron and so one. The second part of each collection can have a different number of fields. In a sense, the `events`, `Electron` and `pt` variables are just a easy way for represting the final `NxA` array that we might be intested in for the analysis. In our case the `N` number of events is whatis called as the outer most **dimension** or axis of the various objects, `A` is the one inner dimesion of the of array. `K` is not a true dimesion in the sense it can be though of a book keeping object used to keep track of how many awkward arrays are present, so in this sense, we can say the the `events.Electron` is a `NxA` object/collection array, as opposed to the `events.Electron.pt` being a `NxA` data array.\n\n\n\nWe can use the usual index notation to look at a particular object of interest. For instance if we want to look at the 0-th electron of the 1-st event in our event list, we can write:", "_____no_output_____" ] ], [ [ "print(events[1].Electron[0])", "... dzErr: 0.0284, eta: 0.149, flag: 3, mass: 0.00051, phi: 0.997, pt: 48.9}\n" ] ], [ [ "But the real power of using awkward arrays is for using awkward arrays comes in when you don't explicily use a concrete index, and instead call calculation all in an abstract form", "_____no_output_____" ], [ "## Basic object and event selection \n\nLet us start with the most basic example of event selection. Say we want to select event with electrons that have $p_T > 50$ GeV and $|\\eta| < 0.5$. The awkward array allows us to write something like:", "_____no_output_____" ] ], [ [ "mask_pt = events.Electron.pt > 50\nmask_eta = np.abs(events.Electron.eta) < 0.5\nele_mask = mask_pt & mask_eta\nprint(mask_pt)\nprint(mask_eta)\nprint(ele_mask)", "[[False, False], [False, False, True, ... [False, False, False, True, True, False]]\n[[True, True], [True, True, True, True, ... [False, True, False, True, False, True]]\n[[False, False], [False, False, True, ... [False, False, False, True, False, False]]\n" ] ], [ [ "We can see that the usual logic comparision operators generate a `NxA` boolean array telling use which electron (or more specifically which electron.pt and electron etas) pass this particular selection criteia. This particular boolean array generated from a logic operation on usual arrays is typically call a `mask`. We can use the typical boolean operation `&` operation to get the intersect of multiple masks, or maybe the `|` operator for the union. Now the problem is where can we use this mask? The answer is any array that has a `NxA` structure and recieve these masks to create a reduced array!", "_____no_output_____" ] ], [ [ "print(events.Electron.pt[ele_mask])\nprint(events.Electron.eta[ele_mask])\nselectedElectrons = events.Electron[ele_mask]\nprint(selectedElectrons.pt)", "[[], [50.8], [68.5], [], [61], [54.7, ... 54.6, 58.9], [54.2], [], [54.5], [56.3]]\n[[], [0.472], [0.0729], [], [0.0954], ... 0.136], [0.155], [], [0.411], [0.125]]\n[[], [50.8], [68.5], [], [61], [54.7, ... 54.6, 58.9], [54.2], [], [54.5], [56.3]]\n" ] ], [ [ "Probably the most important place to put the mask is the directly in the `events.Electron` index, this generates a new collection of electrons that preserves the `NxA` structure, but have verious collection instances filterd out. If you are familiar with `numpy`, this sort if index-based array filtering look familiar. The difference is that because awkward arrays accecpt arrays of varying inner dimensions, it can truely preserve the structure of such selection, rather than having everything be flattend out. ", "_____no_output_____" ] ], [ [ "x = np.array([1,2,3,4,5,6,7,8,1,1,1,2])\nprint( x[x% 2 == 0])\ny = np.array([[1,2,3,4],[5,6,7,8],[1,1,1,2]])\nprint( y[y%2==0])\nz = ak.Array([[1,2,3,4],[5,6,7,8],[1,1,1,2]])\nprint(z[z%2==0])", "[2 4 6 8 2]\n[2 4 6 8 2]\n[[2, 4], [6, 8], [2]]\n" ] ], [ [ "Now suppose we only want events that have at least 1 electron selected event. What we need are a set of functions that can reduces this `NxA'` array to something of just dimesion `N`. Formally this is called **reduction** operations, and the awkward package has a large set of functions that can reduce the dimension of arrays. In our case, what we want is:", "_____no_output_____" ] ], [ [ "electron_count = ak.count(selectedElectrons.pt, axis=-1)\nevent_mask = electron_count >= 1\nprint(event_mask.__repr__)", "<bound method Array.__repr__ of <Array [False, True, True, ... True, True] type='50 * bool'>>\n" ] ], [ [ "To break this down, `ak.count`, as the method name suggests \"counts\" the number of elements along a certain axis, in our case, what we are intersted is the inner most dimension/axis, hence the typical python notation of `axis=-1`. Using this we can run the event selection using the usual masking notation:", "_____no_output_____" ] ], [ [ "selectedEvents = events[event_mask]\nprint(event_mask)\nprint(events.Electron.pt)\nprint(selectedEvents.Electron.pt)\nprint(len(selectedEvents))", "[False, True, True, False, True, True, ... False, True, True, False, True, True]\n[[49.3, 38.1], [48.9, 43.9, 50.8, 43.9], ... [48.9, 42.9, 41.7, 56.3, 60.3, 35.5]]\n[[48.9, 43.9, 50.8, 43.9], [46.8, 56.4, ... [48.9, 42.9, 41.7, 56.3, 60.3, 35.5]]\n30\n" ] ], [ [ "Here we can confirm that the first event to pass the event selection is the 1-st event in the event list, and the 0-th instance in the `selectedEvents.Electron.pt` result of the selectedEvents indeed corresponds to the values stored in the 1-st event of the orignal event list. ", "_____no_output_____" ], [ "## Object storce and collection creation\n\nHaving completed the selection, we might be rather annoyed that we didn't just store the selected Electron, since these are the objects that we are likely going to use for further calculation. Following from the code above, what we can do is add the additional selection to the `selectedElectrons` collections. This is valid since the `N` dimesional event mask \"makes sense\" performed on the `NxA'` dimesional selectedElectrons object.\n", "_____no_output_____" ] ], [ [ "our_selectedElectrons = selectedElectrons[event_mask]\nprint(our_selectedElectrons.pt)\nprint(len(our_selectedElectrons))", "[[50.8], [68.5], [61], [54.7], [59, ... [50.9, 54.6, 58.9], [54.2], [54.5], [56.3]]\n30\n" ] ], [ [ "However, this is rather undesirable, since now we have some a whole bunch of detected collections, and event lists that we need to take care of: `selectedElectrons`, `selectedEvents`, `out_selectedEvents`. And this is with just one toy object selection. One can imagine if there isn't some sort of way to store collections into events, the analysis code will get out of hands very quick. This also ties into the topic that there might be certain physics quantities that are specific to a certain analysis that would might be used for the analysis object selection and would be nice to add to the electron collection if it isn't a standard variable that is maintained by the NanoAOD development team. Here we are going to add a very artificial example of calculating the inverse of the electron pt, then selecting on the inverse pt. This very simple example will demonstrate the typical syntax used for storing variables as well as exposing one of the parculiar quirks of awkward arrays:", "_____no_output_____" ] ], [ [ "print('First attempt at adding extended variables to events')\nevents.Electron['invpt'] = 1/events.Electron.pt\nevents['selectedElectron_1'] = events.Electron[events.Electron.pt > 50]\n\nprint(events.fields)\nprint(events.Electron.fields)\nprint(events.selectedElectron_1.fields)\n\nprint('\\n\\nSecond attemp at adding extended variables to events')\nevents['myElectron'] = events.Electron[:]\nevents.myElectron['invpt'] = 1/events.myElectron.pt\nevents['selectedElectron_2'] = events.myElectron[events.myElectron.pt > 50]\n\nprint(events.fields)\nprint(events.myElectron.fields)\nprint(events.selectedElectron_2.fields)\n\nprint('\\n\\nThird attemp at adding extended variables to events')\nmyElectron = events.Electron[:]\nmyElectron['invpt'] = 1/myElectron.pt\nevents['selectedElectron_3'] = myElectron[myElectron.pt > 50]\n\nprint(events.fields)\nprint(myElectron.fields)\nprint(events.selectedElectron_3.fields)\n", "First attempt at adding extended variables to events\n['Electron', 'Muon', 'Jet', 'selectedElectron_1']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt']\n\n\nSecond attemp at adding extended variables to events\n['Electron', 'Muon', 'Jet', 'selectedElectron_1', 'myElectron', 'selectedElectron_2']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt']\n\n\nThird attemp at adding extended variables to events\n['Electron', 'Muon', 'Jet', 'selectedElectron_1', 'myElectron', 'selectedElectron_2', 'selectedElectron_3']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt', 'invpt']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt', 'invpt']\n" ] ], [ [ "\nLets get the straightforward part of the code clear up. The addition of collections looks very straight forward, one can think of the `events` as something that looks like a \"dictionary of collection with a common outer dimension\", so the addition of the two electron collections to the event has a very distionary-esque notation. What is strange is the persistence of the extended collection for the electrons. Logically, the operation looks identical, but the first attempt to add the new variable `invpt` directly to `events.Electron` fails to persist, and thus all direct extensions of `events.Electron` doesn't include the new `invpt` field. \n\nThe reason for this is rather technical regarding the mutability of objects in python and awkward. The rule-of-thumb is that collections that are directly generated from the file, (a.k.a. the collections directly obtained listed the `events.fields` immediate after opening a file) can **never** be altered, and therefore cannot have extended variables added to them. To create an extended variable to some collection, we will need to make some sort of copy of the original either by some trivial kinematic selection (ex. `myElectrons = events.Electrons[events.Electrons.pt > 0]`) or some trivial splicing (`myElectrons = events.Electrons[:]`). Another feature of mutability is that once the collection is added to the event collection, it becomes immutable. That is why the third attempt is the one that adds the both the electron extended variable and the extednded electron collection to the event.\n\nBecause of these quirks, it would typically be worth it to wrap the object selection into a function if the object selection is typical within an analysis, and it also helps with code readability", "_____no_output_____" ] ], [ [ "def SelectElectrons(electron):\n electron = electron[electron.pt > 50]\n electron['invpt'] = 1.0 / electron.pt\n return electron\n\nevents['selectedElectron_f'] = SelectElectrons(events.Electron)\nprint(events.fields)\nprint(events.selectedElectron_f.fields)", "['Electron', 'Muon', 'Jet', 'selectedElectron_1', 'myElectron', 'selectedElectron_2', 'selectedElectron_3', 'selectedElectron_f']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt', 'invpt']\n" ] ], [ [ "Once the new object collection has been added to the event collection, they will persist to arbitrary levels of event selection:\n", "_____no_output_____" ] ], [ [ "myevents = events[ak.count(events.selectedElectron_f.pt,axis=-1) > 0 ]\n\nprint(myevents.fields)\nprint(myevents.selectedElectron_f.fields)\n\nmyevents = events[ak.count(events.selectedElectron_f.pt,axis=-1) > 1 ]\n\nprint(myevents.fields)\nprint(myevents.selectedElectron_f.fields)\nmyevents = events[ak.count(events.selectedElectron_f.pt,axis=-1) > 2 ]\n\nprint(myevents.fields)\nprint(myevents.selectedElectron_f.fields)\n", "['Electron', 'Muon', 'Jet', 'selectedElectron_1', 'myElectron', 'selectedElectron_2', 'selectedElectron_3', 'selectedElectron_f']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt', 'invpt']\n['Electron', 'Muon', 'Jet', 'selectedElectron_1', 'myElectron', 'selectedElectron_2', 'selectedElectron_3', 'selectedElectron_f']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt', 'invpt']\n['Electron', 'Muon', 'Jet', 'selectedElectron_1', 'myElectron', 'selectedElectron_2', 'selectedElectron_3', 'selectedElectron_f']\n['charge', 'dxy', 'dxyErr', 'dz', 'dzErr', 'eta', 'flag', 'mass', 'phi', 'pt', 'invpt']\n" ] ], [ [ "## Summary of basics\n\nSo to put this together into a single code block, suppose our analysis consisten of selecting events that have at least 2 electron with $p_{T} > 50GeV$, $|\\eta| < 0.5$, and we want to calculate the average of all such electron's iverserse $p_{T}$ within the selected events. Our awkward array code would look something like:\n", "_____no_output_____" ] ], [ [ "events = NanoEventsFactory.from_root( 'file:dummy_nanoevents.root',\n 'Events', \n entry_stop=50, \n schemaclass=DummySchema).events() \n\n## Object selection \nselectedElectron = events.Electron[ (events.Electron.pt > 50) & \n (np.abs(events.Electron.eta)<0.5) ]\nselectedElectron['invpt'] = 1/selectedElectron.pt\nevents['selectedElectron'] = selectedElectron\n\n# Event selection \nevents = events[ak.count(events.selectedElectron.pt,axis=-1) >= 2]\n\n# Calculating the total average \nprint(ak.sum(events.selectedElectron.invpt)/ak.count(events.selectedElectron.invpt))\n\n", "0.01744755860921499\n" ] ], [ [ "On total this is 4 statements (not counting the file reading step) used to make this analysis. Compare that with the loop based notation:", "_____no_output_____" ] ], [ [ "events = NanoEventsFactory.from_root( 'file:dummy_nanoevents.root',\n 'Events', \n entry_stop=50, \n schemaclass=DummySchema).events() \n\ncount = 0 \nsuminv = 0 \nfor i in range(len(events)):\n is_good = [] \n for j in range(len(events[i].Electron)):\n if events[i].Electron[j].pt > 50 and np.abs(events[i].Electron[j].eta) < 0.5:\n is_good.append(j)\n if len(is_good) >= 2:\n for j in is_good:\n count = count +1 \n suminv += 1.0/ events[i].Electron[j].pt\n\nprint(suminv/count)", "0.017447559494651994\n" ] ], [ [ "Notice the results are only difference because the 32bit to 64 bit float conversion is happening at different places. For awkward arrays, this is happening only after the sum has been performed. For the loop based approach this happening everytime the `+=` operator is called.\n\nFor the loop based analysis, notice for such a simple analysis, many many lines of code are dedicated to just book keeping stuff: number of electrons passing criteria, adding a counter variable and sum variable... etc, instead of actualy analysis computation. The array based notation for expressing the analysis is much cleaner, if rather more unfamiliar to typical users. \n\nOf course, this isn't the end. Physics analysis are typically more involved that just basic selection and counting. In the next session, we will talk about how to perform more involed calculations with awkward arrays that involves multiple collections within an event collection.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cbc5ad0d237768f6fd14903c9bbefe73a4921130
13,614
ipynb
Jupyter Notebook
convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb
bmanikan/deep-learning-udacity
5030ac6f078337a4f7ab3ee42844fe3457a65d35
[ "MIT" ]
null
null
null
convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb
bmanikan/deep-learning-udacity
5030ac6f078337a4f7ab3ee42844fe3457a65d35
[ "MIT" ]
7
2019-12-16T22:05:29.000Z
2022-02-10T00:15:49.000Z
convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb
bmanikan/deep-learning-udacity
5030ac6f078337a4f7ab3ee42844fe3457a65d35
[ "MIT" ]
null
null
null
34.465823
349
0.572572
[ [ [ "# Multi-Layer Perceptron, MNIST\n---\nIn this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.\n\nThe process will be broken down into the following steps:\n>1. Load and visualize the data\n2. Define a neural network\n3. Train the model\n4. Evaluate the performance of our trained model on a test dataset!\n\nBefore we begin, we have to import the necessary libraries for working with data and PyTorch.", "_____no_output_____" ] ], [ [ "# import libraries\nimport torch\nimport numpy as np", "_____no_output_____" ] ], [ [ "---\n## Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)\n\nDownloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.\n\nThis cell will create DataLoaders for each of our datasets.", "_____no_output_____" ] ], [ [ "from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# choose the training and test datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\ntest_data = datasets.MNIST(root='data', train=False,\n download=True, transform=transform)\n\n# prepare data loaders\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, \n num_workers=num_workers)", "_____no_output_____" ] ], [ [ "### Visualize a Batch of Training Data\n\nThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n ax.set_title(str(labels[idx].item()))", "_____no_output_____" ] ], [ [ "### View an Image in More Detail", "_____no_output_____" ] ], [ [ "img = np.squeeze(images[1])\n\nfig = plt.figure(figsize = (12,12)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')\nwidth, height = img.shape\nthresh = img.max()/2.5\nfor x in range(width):\n for y in range(height):\n val = round(img[x][y],2) if img[x][y] !=0 else 0\n ax.annotate(str(val), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y]<thresh else 'black')", "_____no_output_____" ] ], [ [ "---\n## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)\n\nThe architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\n## TODO: Define the NN architecture\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # linear layer (784 -> 1 hidden node)\n self.fc1 = nn.Linear(28 * 28, 1)\n\n def forward(self, x):\n # flatten image input\n x = x.view(-1, 28 * 28)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc1(x))\n return x\n\n# initialize the NN\nmodel = Net()\nprint(model)", "_____no_output_____" ] ], [ [ "### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)\n\nIt's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.", "_____no_output_____" ] ], [ [ "## TODO: Specify loss and optimization functions\n\n# specify loss function\ncriterion = None\n\n# specify optimizer\noptimizer = None", "_____no_output_____" ] ], [ [ "---\n## Train the Network\n\nThe steps for training/learning from a batch of data are described in the comments below:\n1. Clear the gradients of all optimized variables\n2. Forward pass: compute predicted outputs by passing inputs to the model\n3. Calculate the loss\n4. Backward pass: compute gradient of the loss with respect to model parameters\n5. Perform a single optimization step (parameter update)\n6. Update average training loss\n\nThe following loop trains for 30 epochs; feel free to change this number. For now, we suggest somewhere between 20-50 epochs. As you train, take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data. ", "_____no_output_____" ] ], [ [ "# number of epochs to train the model\nn_epochs = 30 # suggest training between 20-50 epochs\n\nmodel.train() # prep model for training\n\nfor epoch in range(n_epochs):\n # monitor training loss\n train_loss = 0.0\n \n ###################\n # train the model #\n ###################\n for data, target in train_loader:\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update running training loss\n train_loss += loss.item()*data.size(0)\n \n # print training statistics \n # calculate average loss over an epoch\n train_loss = train_loss/len(train_loader.sampler)\n\n print('Epoch: {} \\tTraining Loss: {:.6f}'.format(\n epoch+1, \n train_loss\n ))", "_____no_output_____" ] ], [ [ "---\n## Test the Trained Network\n\nFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.\n\n#### `model.eval()`\n\n`model.eval(`) will set all the layers in your model to evaluation mode. This affects layers like dropout layers that turn \"off\" nodes during training with some probability, but should allow every node to be \"on\" for evaluation!", "_____no_output_____" ] ], [ [ "# initialize lists to monitor test loss and accuracy\ntest_loss = 0.0\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\nmodel.eval() # prep model for *evaluation*\n\nfor data, target in test_loader:\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # update test loss \n test_loss += loss.item()*data.size(0)\n # convert output probabilities to predicted class\n _, pred = torch.max(output, 1)\n # compare predictions to true label\n correct = np.squeeze(pred.eq(target.data.view_as(pred)))\n # calculate test accuracy for each object class\n for i in range(len(target)):\n label = target.data[i]\n class_correct[label] += correct[i].item()\n class_total[label] += 1\n\n# calculate and print avg test loss\ntest_loss = test_loss/len(test_loader.sampler)\nprint('Test Loss: {:.6f}\\n'.format(test_loss))\n\nfor i in range(10):\n if class_total[i] > 0:\n print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (\n str(i), 100 * class_correct[i] / class_total[i],\n np.sum(class_correct[i]), np.sum(class_total[i])))\n else:\n print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\nprint('\\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (\n 100. * np.sum(class_correct) / np.sum(class_total),\n np.sum(class_correct), np.sum(class_total)))", "_____no_output_____" ] ], [ [ "### Visualize Sample Test Results\n\nThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.", "_____no_output_____" ] ], [ [ "# obtain one batch of test images\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n\n# get sample outputs\noutput = model(images)\n# convert output probabilities to predicted class\n_, preds = torch.max(output, 1)\n# prep images for display\nimages = images.numpy()\n\n# plot the images in the batch, along with predicted and true labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(\"{} ({})\".format(str(preds[idx].item()), str(labels[idx].item())),\n color=(\"green\" if preds[idx]==labels[idx] else \"red\"))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc5ae229488f66b5a12d30643d761e847189639
40,476
ipynb
Jupyter Notebook
Tutorial_3_JAX_Neural_Network_from_Scratch_Colab.ipynb
gordicaleksa/get-started-with-JAX
036f5227a57f902861031f95001df368a5572ef7
[ "MIT" ]
234
2021-10-31T15:12:13.000Z
2022-03-30T21:43:33.000Z
Tutorial_3_JAX_Neural_Network_from_Scratch_Colab.ipynb
gordicaleksa/get-started-with-JAX
036f5227a57f902861031f95001df368a5572ef7
[ "MIT" ]
null
null
null
Tutorial_3_JAX_Neural_Network_from_Scratch_Colab.ipynb
gordicaleksa/get-started-with-JAX
036f5227a57f902861031f95001df368a5572ef7
[ "MIT" ]
28
2021-10-31T16:35:24.000Z
2022-03-20T23:02:56.000Z
81.440644
9,434
0.76806
[ [ [ "<a href=\"https://colab.research.google.com/github/gordicaleksa/get-started-with-JAX/blob/main/Tutorial_3_JAX_Neural_Network_from_Scratch_Colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# MLP training on MNIST", "_____no_output_____" ] ], [ [ "import numpy as np\nimport jax.numpy as jnp\nfrom jax.scipy.special import logsumexp\nimport jax\nfrom jax import jit, vmap, pmap, grad, value_and_grad\n\nfrom torchvision.datasets import MNIST\nfrom torch.utils.data import DataLoader", "_____no_output_____" ], [ "seed = 0\nmnist_img_size = (28, 28)\n\ndef init_MLP(layer_widths, parent_key, scale=0.01):\n\n params = []\n keys = jax.random.split(parent_key, num=len(layer_widths)-1)\n\n for in_width, out_width, key in zip(layer_widths[:-1], layer_widths[1:], keys):\n weight_key, bias_key = jax.random.split(key)\n params.append([\n scale*jax.random.normal(weight_key, shape=(out_width, in_width)),\n scale*jax.random.normal(bias_key, shape=(out_width,))\n ]\n )\n\n return params\n\n# test\nkey = jax.random.PRNGKey(seed)\nMLP_params = init_MLP([784, 512, 256, 10], key)\nprint(jax.tree_map(lambda x: x.shape, MLP_params))", "[[(512, 784), (512,)], [(256, 512), (256,)], [(10, 256), (10,)]]\n" ], [ "def MLP_predict(params, x):\n hidden_layers = params[:-1]\n\n activation = x\n for w, b in hidden_layers:\n activation = jax.nn.relu(jnp.dot(w, activation) + b)\n\n w_last, b_last = params[-1]\n logits = jnp.dot(w_last, activation) + b_last\n\n # log(exp(o1)) - log(sum(exp(o1), exp(o2), ..., exp(o10)))\n # log( exp(o1) / sum(...) )\n return logits - logsumexp(logits)\n\n# tests\n\n# test single example\n\ndummy_img_flat = np.random.randn(np.prod(mnist_img_size))\nprint(dummy_img_flat.shape)\n\nprediction = MLP_predict(MLP_params, dummy_img_flat)\nprint(prediction.shape)\n\n# test batched function\nbatched_MLP_predict = vmap(MLP_predict, in_axes=(None, 0))\n\ndummy_imgs_flat = np.random.randn(16, np.prod(mnist_img_size))\nprint(dummy_imgs_flat.shape)\npredictions = batched_MLP_predict(MLP_params, dummy_imgs_flat)\nprint(predictions.shape)", "(784,)\n(10,)\n(16, 784)\n(16, 10)\n" ], [ "def custom_transform(x):\n return np.ravel(np.array(x, dtype=np.float32))\n\ndef custom_collate_fn(batch):\n transposed_data = list(zip(*batch))\n\n labels = np.array(transposed_data[1])\n imgs = np.stack(transposed_data[0])\n\n return imgs, labels\n\nbatch_size = 128\ntrain_dataset = MNIST(root='train_mnist', train=True, download=True, transform=custom_transform)\ntest_dataset = MNIST(root='test_mnist', train=False, download=True, transform=custom_transform)\n\ntrain_loader = DataLoader(train_dataset, batch_size, shuffle=True, collate_fn=custom_collate_fn, drop_last=True)\ntest_loader = DataLoader(test_dataset, batch_size, shuffle=False, collate_fn=custom_collate_fn, drop_last=True)\n\n# test\nbatch_data = next(iter(train_loader))\nimgs = batch_data[0]\nlbls = batch_data[1]\nprint(imgs.shape, imgs[0].dtype, lbls.shape, lbls[0].dtype)\n\n# optimization - loading the whole dataset into memory\ntrain_images = jnp.array(train_dataset.data).reshape(len(train_dataset), -1)\ntrain_lbls = jnp.array(train_dataset.targets)\n\ntest_images = jnp.array(test_dataset.data).reshape(len(test_dataset), -1)\ntest_lbls = jnp.array(test_dataset.targets)", "(128, 784) float32 (128,) int64\n" ], [ "num_epochs = 5\n\ndef loss_fn(params, imgs, gt_lbls):\n predictions = batched_MLP_predict(params, imgs)\n\n return -jnp.mean(predictions * gt_lbls)\n\ndef accuracy(params, dataset_imgs, dataset_lbls):\n pred_classes = jnp.argmax(batched_MLP_predict(params, dataset_imgs), axis=1)\n return jnp.mean(dataset_lbls == pred_classes)\n\n@jit\ndef update(params, imgs, gt_lbls, lr=0.01):\n loss, grads = value_and_grad(loss_fn)(params, imgs, gt_lbls)\n\n return loss, jax.tree_multimap(lambda p, g: p - lr*g, params, grads)\n\n# Create a MLP\nMLP_params = init_MLP([np.prod(mnist_img_size), 512, 256, len(MNIST.classes)], key)\n\nfor epoch in range(num_epochs):\n\n for cnt, (imgs, lbls) in enumerate(train_loader):\n\n gt_labels = jax.nn.one_hot(lbls, len(MNIST.classes))\n \n loss, MLP_params = update(MLP_params, imgs, gt_labels)\n \n if cnt % 50 == 0:\n print(loss)\n\n print(f'Epoch {epoch}, train acc = {accuracy(MLP_params, train_images, train_lbls)} test acc = {accuracy(MLP_params, test_images, test_lbls)}')\n", "_____no_output_____" ], [ "imgs, lbls = next(iter(test_loader))\nimg = imgs[0].reshape(mnist_img_size)\ngt_lbl = lbls[0]\nprint(img.shape)\n\nimport matplotlib.pyplot as plt\n\npred = jnp.argmax(MLP_predict(MLP_params, np.ravel(img)))\nprint('pred', pred)\nprint('gt', gt_lbl)\n\nplt.imshow(img); plt.show()", "(28, 28)\npred 7\ngt 7\n" ] ], [ [ "# Visualizations", "_____no_output_____" ] ], [ [ "w = MLP_params[0][0]\nprint(w.shape)\n\nw_single = w[500, :].reshape(mnist_img_size)\nprint(w_single.shape)\nplt.imshow(w_single); plt.show()", "(512, 784)\n(28, 28)\n" ], [ "# todo: visualize embeddings using t-SNE\n\nfrom sklearn.manifold import TSNE\n\ndef fetch_activations(params, x):\n hidden_layers = params[:-1]\n\n activation = x\n for w, b in hidden_layers:\n activation = jax.nn.relu(jnp.dot(w, activation) + b)\n\n return activation\n\nbatched_fetch_activations = vmap(fetch_activations, in_axes=(None, 0))\nimgs, lbls = next(iter(test_loader))\n\nbatch_activations = batched_fetch_activations(MLP_params, imgs)\nprint(batch_activations.shape) # (128, 2)\n\nt_sne_embeddings = TSNE(n_components=2, perplexity=30,).fit_transform(batch_activations)\ncora_label_to_color_map = {0: \"red\", 1: \"blue\", 2: \"green\", 3: \"orange\", 4: \"yellow\", 5: \"pink\", 6: \"gray\"}\n\nfor class_id in range(10):\n plt.scatter(t_sne_embeddings[lbls == class_id, 0], t_sne_embeddings[lbls == class_id, 1], s=20, color=cora_label_to_color_map[class_id])\nplt.show()", "(128, 256)\n" ], [ "# todo: dead neurons\n\ndef fetch_activations2(params, x):\n hidden_layers = params[:-1]\n collector = []\n\n activation = x\n for w, b in hidden_layers:\n activation = jax.nn.relu(jnp.dot(w, activation) + b)\n collector.append(activation)\n\n return collector\n\nbatched_fetch_activations2 = vmap(fetch_activations2, in_axes=(None, 0))\n\nimgs, lbls = next(iter(test_loader))\n\nMLP_params2 = init_MLP([np.prod(mnist_img_size), 512, 256, len(MNIST.classes)], key)\n\nbatch_activations = batched_fetch_activations2(MLP_params2, imgs)\nprint(batch_activations[1].shape) # (128, 512/256)\n\ndead_neurons = [np.ones(act.shape[1:]) for act in batch_activations]\n\nfor layer_id, activations in enumerate(batch_activations):\n dead_neurons[layer_id] = np.logical_and(dead_neurons[layer_id], (activations == 0).all(axis=0))\n\nfor layers in dead_neurons:\n print(np.sum(layers))", "(128, 256)\n0\n7\n" ] ], [ [ "# Parallelization", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cbc5b9ec9334e6926dec0d6307d21993cace0e04
510,598
ipynb
Jupyter Notebook
pyflightdata examples.ipynb
MauriceM999/pyflightdata
a47ad69c83ff15f6c0a4c25f1d6c8dc5d663b8bc
[ "MIT" ]
60
2015-02-12T10:14:51.000Z
2021-12-01T07:04:20.000Z
pyflightdata examples.ipynb
MauriceM999/pyflightdata
a47ad69c83ff15f6c0a4c25f1d6c8dc5d663b8bc
[ "MIT" ]
39
2017-01-16T22:08:55.000Z
2022-02-17T08:07:02.000Z
pyflightdata examples.ipynb
MauriceM999/pyflightdata
a47ad69c83ff15f6c0a4c25f1d6c8dc5d663b8bc
[ "MIT" ]
33
2015-03-12T02:56:36.000Z
2021-12-05T14:48:39.000Z
50.93755
187
0.414277
[ [ [ "# MIT License\n#\n# Copyright (c) 2020 Hari Allamraju\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n", "_____no_output_____" ] ], [ [ "# pyflightdata examples\n\nThis document lists a few examples to show the basic usage of pyflightdata. This does not show all the potential uses of the data we get from the API.\n\nPlease note that this is not an official API and we do not endorse or recommend any commercial usage of this API to make large scale mass requests.\n\nAlso the API may break from time to time as the pages and their structure change at the underlying websites. For now this is only flightradar24.com but we might add more sites soon.", "_____no_output_____" ] ], [ [ "from pyflightdata import FlightData", "_____no_output_____" ], [ "api=FlightData()", "_____no_output_____" ], [ "api.get_countries()[:5]", "_____no_output_____" ], [ "api.get_airlines()[:5]", "_____no_output_____" ], [ "api.get_airports('India')[10:15]", "_____no_output_____" ], [ "#pass the airline-code from get_airlines\napi.get_fleet('emirates-ek-uae')", "_____no_output_____" ], [ "#pass airline-code from get_airlines to see all current live flights\napi.get_flights('AI1')[:10]", "_____no_output_____" ], [ "api.get_history_by_flight_number('AI101')[-5:]", "_____no_output_____" ], [ "api.get_history_by_tail_number('9V-SMA')[-5:]", "_____no_output_____" ], [ "api.get_info_by_tail_number('9V-SMA')", "_____no_output_____" ], [ "api.get_airport_arrivals('sin')", "_____no_output_____" ], [ "api.get_airport_departures('sin')", "_____no_output_____" ], [ "api.get_airport_details('sin')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc5bba0b5227570f2631f0b185a988379a38613
8,907
ipynb
Jupyter Notebook
docs/user-guide/notebooks/Plots.ipynb
nsmith-/hist
d16d6684c8261e1a921afb44b14e9d5782596d7f
[ "BSD-3-Clause" ]
84
2020-02-12T02:02:58.000Z
2022-03-23T10:50:03.000Z
docs/user-guide/notebooks/Plots.ipynb
nsmith-/hist
d16d6684c8261e1a921afb44b14e9d5782596d7f
[ "BSD-3-Clause" ]
213
2020-03-09T02:38:25.000Z
2022-03-16T19:22:31.000Z
docs/user-guide/notebooks/Plots.ipynb
nsmith-/hist
d16d6684c8261e1a921afb44b14e9d5782596d7f
[ "BSD-3-Clause" ]
15
2020-03-14T12:05:18.000Z
2021-11-12T14:25:07.000Z
25.96793
419
0.522286
[ [ [ "# Plots", "_____no_output_____" ], [ "One of the most amazing feature of hist is it's powerful plotting family. Here you can see how to plot Hist.", "_____no_output_____" ] ], [ [ "from hist import Hist\nimport hist", "_____no_output_____" ], [ "h = Hist(\n hist.axis.Regular(50, -5, 5, name=\"S\", label=\"s [units]\", flow=False),\n hist.axis.Regular(50, -5, 5, name=\"W\", label=\"w [units]\", flow=False),\n)", "_____no_output_____" ], [ "import numpy as np\n\ns_data = np.random.normal(size=100_000) + np.ones(100_000)\nw_data = np.random.normal(size=100_000)\n\n# normal fill\nh.fill(s_data, w_data)", "_____no_output_____" ] ], [ [ "## Via Matplotlib\n\nhist allows you to plot via [Matplotlib](https://matplotlib.org/) like this:", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(8, 5))\nw, x, y = h.to_numpy()\nmesh = ax.pcolormesh(x, y, w.T, cmap=\"RdYlBu\")\nax.set_xlabel(\"s\")\nax.set_ylabel(\"w\")\nfig.colorbar(mesh)\nplt.show()", "_____no_output_____" ] ], [ [ "## Via Mplhep\n\n[mplhep](https://github.com/scikit-hep/mplhep) is an important visualization tools in Scikit-Hep ecosystem. hist has integrate with mplhep and you can also plot using it. If you want more info about mplhep please visit the official repo to see it.", "_____no_output_____" ] ], [ [ "import mplhep\n\nfig, axs = plt.subplots(1, 2, figsize=(9, 4))\nmplhep.histplot(h.project(\"S\"), ax=axs[0])\n\nmplhep.hist2dplot(h, ax=axs[1])\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Via Plot\n\nHist has plotting methods for 1-D and 2-D histograms, `.plot1d()` and `.plot2d()` respectively. It also provides `.plot()` for plotting according to the its dimension. Moreover, to show the projection of each axis, you can use `.plot2d_full()`. If you have a Hist with higher dimension, you can use `.project()` to extract two dimensions to see it with our plotting suite.\n\nOur plotting methods are all based on Matplotlib, so you can pass Matplotlib's `ax` into it, and hist will draw on it. We will create it for you if you do not pass them in.", "_____no_output_____" ] ], [ [ "# plot1d\nfig, ax = plt.subplots(figsize=(6, 4))\n\n\nh.project(\"S\").plot1d(ax=ax, ls=\"--\", color=\"teal\", lw=3)\nplt.show()", "_____no_output_____" ], [ "# plot2d\nfig, ax = plt.subplots(figsize=(6, 6))\n\nh.plot2d(ax=ax, cmap=\"plasma\")\nplt.show()", "_____no_output_____" ], [ "# plot2d_full\nplt.figure(figsize=(8, 8))\n\nh.plot2d_full(\n main_cmap=\"coolwarm\",\n top_ls=\"--\",\n top_color=\"orange\",\n top_lw=2,\n side_ls=\":\",\n side_lw=2,\n side_color=\"steelblue\",\n)\n\nplt.show()", "_____no_output_____" ], [ "# auto-plot\nfig, axs = plt.subplots(1, 2, figsize=(9, 4), gridspec_kw={\"width_ratios\": [5, 4]})\n\n\nh.project(\"W\").plot(ax=axs[0], color=\"darkviolet\", lw=2, ls=\"-.\")\nh.project(\"W\", \"S\").plot(ax=axs[1], cmap=\"cividis\")\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Via Plot Pull\n\nPull plots are commonly used in HEP studies, and we provide a method for them with `.plot_pull()`, which accepts a `Callable` object, like the below `pdf` function, which is then fit to the histogram and the fit and pulls are shown on the plot. As Normal distributions are the generally desired function to fit the histogram data, the `str` aliases `\"normal\"`, `\"gauss\"`, and `\"gaus\"` are supported as well.", "_____no_output_____" ] ], [ [ "def pdf(x, a=1 / np.sqrt(2 * np.pi), x0=0, sigma=1, offset=0):\n return a * np.exp(-((x - x0) ** 2) / (2 * sigma ** 2)) + offset", "_____no_output_____" ], [ "np.random.seed(0)\n\nhist_1 = hist.Hist(\n hist.axis.Regular(\n 50, -5, 5, name=\"X\", label=\"x [units]\", underflow=False, overflow=False\n )\n).fill(np.random.normal(size=1000))\n\nfig = plt.figure(figsize=(10, 8))\nmain_ax_artists, sublot_ax_arists = hist_1.plot_pull(\n \"normal\",\n eb_ecolor=\"steelblue\",\n eb_mfc=\"steelblue\",\n eb_mec=\"steelblue\",\n eb_fmt=\"o\",\n eb_ms=6,\n eb_capsize=1,\n eb_capthick=2,\n eb_alpha=0.8,\n fp_c=\"hotpink\",\n fp_ls=\"-\",\n fp_lw=2,\n fp_alpha=0.8,\n bar_fc=\"royalblue\",\n pp_num=3,\n pp_fc=\"royalblue\",\n pp_alpha=0.618,\n pp_ec=None,\n ub_alpha=0.2,\n)", "_____no_output_____" ] ], [ [ "## Via Plot Ratio\n\nYou can also make an arbitrary ratio plot using the `.plot_ratio` API:", "_____no_output_____" ] ], [ [ "hist_2 = hist.Hist(\n hist.axis.Regular(\n 50, -5, 5, name=\"X\", label=\"x [units]\", underflow=False, overflow=False\n )\n).fill(np.random.normal(size=1700))\n\nfig = plt.figure(figsize=(10, 8))\nmain_ax_artists, sublot_ax_arists = hist_1.plot_ratio(\n hist_2,\n rp_ylabel=r\"Ratio\",\n rp_num_label=\"hist1\",\n rp_denom_label=\"hist2\",\n rp_uncert_draw_type=\"bar\", # line or bar\n)", "_____no_output_____" ] ], [ [ "Ratios between the histogram and a callable, or `str` alias, are supported as well", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(10, 8))\nmain_ax_artists, sublot_ax_arists = hist_1.plot_ratio(pdf)", "_____no_output_____" ] ], [ [ "Using the `.plot_ratio` API you can also make efficiency plots (where the numerator is a strict subset of the denominator)", "_____no_output_____" ] ], [ [ "hist_3 = hist_2.copy() * 0.7\nhist_2.fill(np.random.uniform(-5, 5, 600))\nhist_3.fill(np.random.uniform(-5, 5, 200))\n\nfig = plt.figure(figsize=(10, 8))\nmain_ax_artists, sublot_ax_arists = hist_3.plot_ratio(\n hist_2,\n rp_num_label=\"hist3\",\n rp_denom_label=\"hist2\",\n rp_uncert_draw_type=\"line\",\n rp_uncertainty_type=\"efficiency\",\n)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc5bc3f8499662e511ce929cadbf08e2dd1b0db
2,076
ipynb
Jupyter Notebook
Bisection Search.ipynb
Merlin1908/asrt119_session_7
220e84de71bef370281e32d3ff1766d7dafef7c8
[ "MIT" ]
null
null
null
Bisection Search.ipynb
Merlin1908/asrt119_session_7
220e84de71bef370281e32d3ff1766d7dafef7c8
[ "MIT" ]
null
null
null
Bisection Search.ipynb
Merlin1908/asrt119_session_7
220e84de71bef370281e32d3ff1766d7dafef7c8
[ "MIT" ]
null
null
null
22.085106
77
0.474952
[ [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "Define a function for which we'd like to find the roots", "_____no_output_____" ] ], [ [ "def function_for_roots(x):\n a = 1.01\n b = -3.04\n c= 2.07\n return a*x**2 + b*x + c #get the roots of ax^2 +bx + c", "_____no_output_____" ], [ "def check_initial_values(f,x_min,x_max,tol):\n \n y_min = f(x_min)\n y_max = f(x_max)\n \n if(y_min*y_max>=0.0):\n print(\"No zero crossing found in the range =\",x_min,x_max)\n s = \"f(%f) = %f, f(%f) = %f\" % (x_min,y_min,x_max,y_max)\n print(s)\n return 0\n \n # if x_min is a root, then return flag == 1\n if(np.fabs(y_min)<tol):\n return 1\n \n #if x_max is a root, then return flag == 2\n if(np.fabs(y_max)<tol):\n return 2\n \n #if we reach this point, the bracket is valid\n #and we will return 3\n return 3", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cbc5d7601e1130fdc2cae958153fe9fe49d3efac
58,215
ipynb
Jupyter Notebook
notebooks/image-classification.ipynb
kaust-vislab/tensorflow-federated-data-science-project
5bfc5efe38f3007fed57b48a488c13fa7e46a265
[ "BSD-3-Clause" ]
5
2020-02-25T14:01:08.000Z
2021-03-17T11:31:11.000Z
notebooks/image-classification.ipynb
kaust-vislab/tensorflow-federated-data-science-project
5bfc5efe38f3007fed57b48a488c13fa7e46a265
[ "BSD-3-Clause" ]
1
2020-02-23T08:29:27.000Z
2020-02-23T08:29:27.000Z
notebooks/image-classification.ipynb
kaust-vislab/tensorflow-federated-data-science-project
5bfc5efe38f3007fed57b48a488c13fa7e46a265
[ "BSD-3-Clause" ]
1
2020-03-10T08:34:15.000Z
2020-03-10T08:34:15.000Z
43.902715
563
0.577721
[ [ [ "# Hands-on Federated Learning: Image Classification\n\nIn their recent (and exteremly thorough!) review of the federated learning literature [*Kairouz, et al (2019)*](https://arxiv.org/pdf/1912.04977.pdf) define federated learning as a machine learning setting where multiple entities (clients) collaborate in solving a machine learning problem, under the coordination of a central server or service provider. Each client’s raw data is stored locally and not exchanged or transferred; instead, focused updates intended for immediate aggregation are used to achieve the learning objective.\n\nIn this tutorial we will use a federated version of the classic MNIST dataset to introduce the Federated Learning (FL) API layer of TensorFlow Federated (TFF), [`tff.learning`](https://www.tensorflow.org/federated/api_docs/python/tff/learning) - a set of high-level interfaces that can be used to perform common types of federated learning tasks, such as federated training, against user-supplied models implemented in TensorFlow or Keras.", "_____no_output_____" ], [ "# Preliminaries", "_____no_output_____" ] ], [ [ "import collections\nimport os\nimport typing\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow_federated as tff", "_____no_output_____" ], [ "# required to run TFF inside Jupyter notebooks\nimport nest_asyncio\nnest_asyncio.apply()", "_____no_output_____" ], [ "tff.federated_computation(lambda: 'Hello, World!')()", "_____no_output_____" ] ], [ [ "# Preparing the data\n\nIn the IID setting the local data on each \"client\" is assumed to be a representative sample of the global data distribution. This is typically the case by construction when performing data parallel training of deep learning models across multiple CPU/GPU \"clients\".\n\nThe non-IID case is significantly more complicated as there are many ways in which data can be non-IID and different degress of \"non-IIDness\". Consider a supervised task with features $X$ and labels $y$. A statistical model of federated learning involves two levels of sampling:\n\n1. Sampling a client $i$ from the distribution over available clients $Q$\n2. Sampling an example $(X,y)$ from that client’s local data distribution $P_i(X,y)$.\n\nNon-IID data in federated learning typically refers to differences between $P_i$ and $P_j$ for different clients $i$ and $j$. However, it is worth remembering that both the distribution of available clients, $Q$, and the distribution of local data for client $i$, $P_i$, may change over time which introduces another dimension of “non-IIDness”. Finally, if the local data on a client's device is insufficiently randomized, perhaps ordered by time, then independence is violated locally as well. \n\nIn order to facilitate experimentation TFF includes federated versions of several popular datasets that exhibit different forms and degrees of non-IIDness. ", "_____no_output_____" ] ], [ [ "# What datasets are available?\ntff.simulation.datasets.", "_____no_output_____" ] ], [ [ "This tutorial uses a version of MNIST that contains a version of the original NIST dataset that has been re-processed using [LEAF](https://leaf.cmu.edu/) so that the data is keyed by the original writer of the digits. \n\nThe federated MNIST dataset displays a particular type of non-IIDness: feature distribution skew (covariate shift). Whith feature distribution skew the marginal distributions $P_i(X)$ vary across clients, even though $P(y|X)$ is shared. In the federated MNIST dataset users are writing the same numbers but each user has a different writing style characterized but different stroke width, slant, etc.", "_____no_output_____" ] ], [ [ "tff.simulation.datasets.emnist.load_data?", "_____no_output_____" ], [ "emnist_train, emnist_test = (tff.simulation\n .datasets\n .emnist\n .load_data(only_digits=True, cache_dir=\"../data\"))\n", "_____no_output_____" ], [ "NUMBER_CLIENTS = len(emnist_train.client_ids)\nNUMBER_CLIENTS", "_____no_output_____" ], [ "def sample_client_ids(client_ids: typing.List[str],\n sample_size: typing.Union[float, int],\n random_state: np.random.RandomState) -> typing.List[str]:\n \"\"\"Randomly selects a subset of clients ids.\"\"\"\n number_clients = len(client_ids)\n error_msg = \"'client_ids' must be non-emtpy.\"\n assert number_clients > 0, error_msg\n if isinstance(sample_size, float):\n error_msg = \"Sample size must be between 0 and 1.\"\n assert 0 <= sample_size <= 1, error_msg\n size = int(sample_size * number_clients)\n elif isinstance(sample_size, int):\n error_msg = f\"Sample size must be between 0 and {number_clients}.\"\n assert 0 <= sample_size <= number_clients, error_msg\n size = sample_size\n else:\n error_msg = \"Type of 'sample_size' must be 'float' or 'int'.\"\n raise TypeError(error_msg)\n random_idxs = random_state.randint(number_clients, size=size)\n return [client_ids[i] for i in random_idxs]\n", "_____no_output_____" ], [ "# these are what the client ids look like\n_random_state = np.random.RandomState(42)\nsample_client_ids(emnist_train.client_ids, 10, _random_state)", "_____no_output_____" ], [ "def create_tf_datasets(source: tff.simulation.ClientData,\n client_ids: typing.Union[None, typing.List[str]]) -> typing.Dict[str, tf.data.Dataset]:\n \"\"\"Create tf.data.Dataset instances for clients using their client_id.\"\"\"\n if client_ids is None:\n client_ids = source.client_ids\n datasets = {client_id: source.create_tf_dataset_for_client(client_id) for client_id in client_ids}\n return datasets\n\n\ndef sample_client_datasets(source: tff.simulation.ClientData,\n sample_size: typing.Union[float, int],\n random_state: np.random.RandomState) -> typing.Dict[str, tf.data.Dataset]:\n \"\"\"Randomly selects a subset of client datasets.\"\"\"\n client_ids = sample_client_ids(source.client_ids, sample_size, random_state)\n client_datasets = create_tf_datasets(source, client_ids)\n return client_datasets\n", "_____no_output_____" ], [ "_random_state = np.random.RandomState()\nclient_datasets = sample_client_datasets(emnist_train, sample_size=1, random_state=_random_state)\n(client_id, client_dataset), *_ = client_datasets.items()\n\nfig, axes = plt.subplots(1, 5, figsize=(12,6), sharex=True, sharey=True)\nfor i, example in enumerate(client_dataset.take(5)):\n axes[i].imshow(example[\"pixels\"].numpy(), cmap=\"gray\")\n axes[i].set_title(example[\"label\"].numpy())\n_ = fig.suptitle(x= 0.5, y=0.75, t=f\"Training examples for a client {client_id}\", fontsize=15) \n", "_____no_output_____" ] ], [ [ "## Data preprocessing\n\nSince each client dataset is already a [`tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset), preprocessing can be accomplished using Dataset transformations. Another option would be to use preprocessing operations from [`sklearn.preprocessing`](https://scikit-learn.org/stable/modules/preprocessing.html).\n\nPreprocessing consists of the following steps:\n\n1. `map` a function that flattens the 28 x 28 images into 784-element tensors\n2. `map` a function that rename the features from pixels and label to X and y for use with Keras\n3. `shuffle` the individual examples\n4. `batch` the into training batches\n\nWe also throw in a `repeat` over the data set to run several epochs on each client device before sending parameters to the server for averaging.", "_____no_output_____" ] ], [ [ "AUTOTUNE = (tf.data\n .experimental\n .AUTOTUNE)\nSHUFFLE_BUFFER_SIZE = 1000\nNUMBER_TRAINING_EPOCHS = 5 # number of local updates!\nTRAINING_BATCH_SIZE = 32\nTESTING_BATCH_SIZE = 32\n\nNUMBER_FEATURES = 28 * 28\nNUMBER_TARGETS = 10", "_____no_output_____" ], [ "def _reshape(training_batch):\n \"\"\"Extracts and reshapes data from a training sample \"\"\"\n pixels = training_batch[\"pixels\"]\n label = training_batch[\"label\"]\n X = tf.reshape(pixels, shape=[-1]) # flattens 2D pixels to 1D\n y = tf.reshape(label, shape=[1])\n return X, y\n\n\ndef create_training_dataset(client_dataset: tf.data.Dataset) -> tf.data.Dataset:\n \"\"\"Create a training dataset for a client from a raw client dataset.\"\"\"\n training_dataset = (client_dataset.map(_reshape, num_parallel_calls=AUTOTUNE)\n .shuffle(SHUFFLE_BUFFER_SIZE, seed=None, reshuffle_each_iteration=True)\n .repeat(NUMBER_TRAINING_EPOCHS)\n .batch(TRAINING_BATCH_SIZE)\n .prefetch(buffer_size=AUTOTUNE))\n return training_dataset\n\n\ndef create_testing_dataset(client_dataset: tf.data.Dataset) -> tf.data.Dataset:\n \"\"\"Create a testing dataset for a client from a raw client dataset.\"\"\"\n testing_dataset = (client_dataset.map(_reshape, num_parallel_calls=AUTOTUNE)\n .batch(TESTING_BATCH_SIZE))\n return testing_dataset\n", "_____no_output_____" ] ], [ [ "## How to choose the clients included in each training round\n\nIn a typical federated training scenario there will be a very large population of user devices however only a fraction of these devices are likely to be available for training at a given point in time. For example, if the client devices are mobile phones then they might only participate in training when plugged into a power source, off a metered network, and otherwise idle.\n\nIn a simulated environment, where all data is locally available, an approach is to simply sample a random subset of the clients to be involved in each round of training so that the subset of clients involved will vary from round to round.\n\n### How many clients to include in each round?\n\nUpdating and averaging a larger number of client models per training round yields better convergence and in a simulated training environment probably makes sense to include as many clients as is computationally feasible. However in real-world training scenario while averaging a larger number of clients improve convergence, it also makes training vulnerable to slowdown due to unpredictable tail delays in computation/communication at/with the clients.", "_____no_output_____" ] ], [ [ "def create_federated_data(training_source: tff.simulation.ClientData,\n testing_source: tff.simulation.ClientData,\n sample_size: typing.Union[float, int],\n random_state: np.random.RandomState) -> typing.Dict[str, typing.Tuple[tf.data.Dataset, tf.data.Dataset]]:\n \n # sample clients ids from the training dataset\n client_ids = sample_client_ids(training_source.client_ids, sample_size, random_state)\n \n federated_data = {}\n for client_id in client_ids:\n # create training dataset for the client\n _tf_dataset = training_source.create_tf_dataset_for_client(client_id)\n training_dataset = create_training_dataset(_tf_dataset)\n \n # create the testing dataset for the client\n _tf_dataset = testing_source.create_tf_dataset_for_client(client_id)\n testing_dataset = create_testing_dataset(_tf_dataset)\n \n federated_data[client_id] = (training_dataset, testing_dataset)\n \n return federated_data", "_____no_output_____" ], [ "_random_state = np.random.RandomState(42)\nfederated_data = create_federated_data(emnist_train,\n emnist_test,\n sample_size=0.01,\n random_state=_random_state)", "_____no_output_____" ], [ "# keys are client ids, values are (training_dataset, testing_dataset) pairs\nlen(federated_data)", "_____no_output_____" ] ], [ [ "# Creating a model with Keras\n\nIf you are using Keras, you likely already have code that constructs a Keras model. Since the model will need to be replicated on each of the client devices we wrap the model in a no-argument Python function, a representation of which, will eventually be invoked on each client to create the model on that client.", "_____no_output_____" ] ], [ [ "def create_keras_model_fn() -> keras.Model:\n model_fn = keras.models.Sequential([\n keras.layers.Input(shape=(NUMBER_FEATURES,)),\n keras.layers.Dense(units=NUMBER_TARGETS),\n keras.layers.Softmax(),\n ])\n return model_fn\n", "_____no_output_____" ] ], [ [ "In order to use any model with TFF, it needs to be wrapped in an instance of the [`tff.learning.Model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) interface, which exposes methods to stamp the model's forward pass, metadata properties, etc, and also introduces additional elements such as ways to control the process of computing federated metrics. \n\nOnce you have a Keras model like the one we've just defined above, you can have TFF wrap it for you by invoking [`tff.learning.from_keras_model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/from_keras_model), passing the model and a sample data batch as arguments, as shown below.", "_____no_output_____" ] ], [ [ "tff.learning.from_keras_model?", "_____no_output_____" ], [ "def create_tff_model_fn() -> tff.learning.Model:\n keras_model = create_keras_model_fn()\n dummy_batch = (tf.constant(0.0, shape=(TRAINING_BATCH_SIZE, NUMBER_FEATURES), dtype=tf.float32),\n tf.constant(0, shape=(TRAINING_BATCH_SIZE, 1), dtype=tf.int32))\n loss_fn = (keras.losses\n .SparseCategoricalCrossentropy())\n metrics = [\n keras.metrics.SparseCategoricalAccuracy()\n ]\n tff_model_fn = (tff.learning\n .from_keras_model(keras_model, dummy_batch, loss_fn, None, metrics))\n return tff_model_fn\n", "_____no_output_____" ] ], [ [ "Again, since our model will need to be replicated on each of the client devices we wrap the model in a no-argument Python function, a representation of which, will eventually be invoked on each client to create the model on that client.", "_____no_output_____" ], [ "# Training the model on federated data\n\nNow that we have a model wrapped as `tff.learning.Model` for use with TFF, we can let TFF construct a Federated Averaging algorithm by invoking the helper function `tff.learning.build_federated_averaging_process` as follows.\n\nKeep in mind that the argument needs to be a constructor (such as `create_tff_model_fn` above), not an already-constructed instance, so that the construction of your model can happen in a context controlled by TFF.\n\nOne critical note on the Federated Averaging algorithm below, there are 2 optimizers: a \n\n1. `client_optimizer_fn` which is only used to compute local model updates on each client. \n2. `server_optimizer_fn` applies the averaged update to the global model on the server. \n\nN.B. the choice of optimizer and learning rate may need to be different than those you would use to train the model on a standard i.i.d. dataset. Start with stochastic gradient descent with a smaller (than normal) learning rate.", "_____no_output_____" ] ], [ [ "tff.learning.build_federated_averaging_process?", "_____no_output_____" ], [ "CLIENT_LEARNING_RATE = 1e-2\nSERVER_LEARNING_RATE = 1e0\n\n\ndef create_client_optimizer(learning_rate: float = CLIENT_LEARNING_RATE,\n momentum: float = 0.0,\n nesterov: bool = False) -> keras.optimizers.Optimizer:\n client_optimizer = (keras.optimizers\n .SGD(learning_rate, momentum, nesterov))\n return client_optimizer\n\n\ndef create_server_optimizer(learning_rate: float = SERVER_LEARNING_RATE,\n momentum: float = 0.0,\n nesterov: bool = False) -> keras.optimizers.Optimizer:\n server_optimizer = (keras.optimizers\n .SGD(learning_rate, momentum, nesterov))\n return server_optimizer\n\n\nfederated_averaging_process = (tff.learning\n .build_federated_averaging_process(create_tff_model_fn, \n create_client_optimizer,\n create_server_optimizer,\n client_weight_fn=None,\n stateful_delta_aggregate_fn=None,\n stateful_model_broadcast_fn=None))\n", "_____no_output_____" ] ], [ [ "What just happened? TFF has constructed a pair of *federated computations* (i.e., programs in TFF's internal glue language) and packaged them into a [`tff.utils.IterativeProcess`](https://www.tensorflow.org/federated/api_docs/python/tff/utils/IterativeProcess) in which these computations are available as a pair of properties `initialize` and `next`.\n\nIt is a goal of TFF to define computations in a way that they could be executed in real federated learning settings, but currently only local execution simulation runtime is implemented. To execute a computation in a simulator, you simply invoke it like a Python function. This default interpreted environment is not designed for high performance, but it will suffice for this tutorial.", "_____no_output_____" ], [ "\n## `initialize`\n\nA function that takes no arguments and returns the state of the federated averaging process on the server. This function is only called to initialize a federated averaging process after it has been created.", "_____no_output_____" ] ], [ [ "# () -> SERVER_STATE\nprint(federated_averaging_process.initialize.type_signature)", "_____no_output_____" ], [ "state = federated_averaging_process.initialize()", "_____no_output_____" ] ], [ [ "## `next`\n\nA function that takes current server state and federated data as arguments and returns the updated server state as well as any training metrics. Calling `next` performs a single round of federated averaging consisting of the following steps.\n\n1. pushing the server state (including the model parameters) to the clients\n2. on-device training on their local data\n3. collecting and averaging model updates\n4. producing a new updated model at the server.", "_____no_output_____" ] ], [ [ "# extract the training datasets from the federated data\nfederated_training_data = [training_dataset for _, (training_dataset, _) in federated_data.items()]\n\n# SERVER_STATE, FEDERATED_DATA -> SERVER_STATE, TRAINING_METRICS\nstate, metrics = federated_averaging_process.next(state, federated_training_data)\nprint(f\"round: 0, metrics: {metrics}\")", "_____no_output_____" ] ], [ [ "Let's run a few more rounds on the same training data (which will over-fit to a particular set of clients but will converge faster).", "_____no_output_____" ] ], [ [ "number_training_rounds = 15\nfor n in range(1, number_training_rounds):\n state, metrics = federated_averaging_process.next(state, federated_training_data)\n print(f\"round:{n}, metrics:{metrics}\")\n", "_____no_output_____" ] ], [ [ "# First attempt at simulating federated averaging\n\nA proper federated averaging simulation would randomly sample new clients for each training round, allow for evaluation of training progress on training and testing data, and log training and testing metrics to TensorBoard for reference.\n\nHere we define a function that randomly sample new clients prior to each training round and logs training metrics TensorBoard. We defer handling testing data until we discuss federated evaluation towards the end of the tutorial.", "_____no_output_____" ] ], [ [ "def simulate_federated_averaging(federated_averaging_process: tff.utils.IterativeProcess,\n training_source: tff.simulation.ClientData,\n testing_source: tff.simulation.ClientData,\n sample_size: typing.Union[float, int],\n random_state: np.random.RandomState,\n number_rounds: int,\n initial_state: None = None,\n tensorboard_logging_dir: str = None):\n \n state = federated_averaging_process.initialize() if initial_state is None else initial_state\n \n if tensorboard_logging_dir is not None:\n \n if not os.path.isdir(tensorboard_logging_dir):\n os.makedirs(tensorboard_logging_dir)\n\n summary_writer = (tf.summary\n .create_file_writer(tensorboard_logging_dir))\n\n with summary_writer.as_default():\n for n in range(number_rounds):\n federated_data = create_federated_data(training_source,\n testing_source,\n sample_size,\n random_state)\n anonymized_training_data = [dataset for _, (dataset, _) in federated_data.items()]\n state, metrics = federated_averaging_process.next(state, anonymized_training_data)\n print(f\"Round: {n}, Training metrics: {metrics}\")\n\n for name, value in metrics._asdict().items():\n tf.summary.scalar(name, value, step=n) \n else:\n for n in range(number_rounds):\n federated_data = create_federated_data(training_source,\n testing_source,\n sample_size,\n random_state)\n anonymized_training_data = [dataset for _, (dataset, _) in federated_data.items()]\n state, metrics = federated_averaging_process.next(state, anonymized_training_data)\n print(f\"Round: {n}, Training metrics: {metrics}\")\n \n return state, metrics", "_____no_output_____" ], [ "federated_averaging_process = (tff.learning\n .build_federated_averaging_process(create_tff_model_fn, \n create_client_optimizer,\n create_server_optimizer,\n client_weight_fn=None,\n stateful_delta_aggregate_fn=None,\n stateful_model_broadcast_fn=None))\n_random_state = np.random.RandomState(42)\n_tensorboard_logging_dir = \"../results/logs/tensorboard\"\nupdated_state, current_metrics = simulate_federated_averaging(federated_averaging_process,\n training_source=emnist_train,\n testing_source=emnist_test,\n sample_size=0.01,\n random_state=_random_state,\n number_rounds=5,\n tensorboard_logging_dir=_tensorboard_logging_dir)", "_____no_output_____" ], [ "updated_state", "_____no_output_____" ], [ "current_metrics", "_____no_output_____" ] ], [ [ "# Customizing the model implementation\n\nKeras is the recommended high-level model API for TensorFlow and you should be using Keras models and creating TFF models using [`tff.learning.from_keras_model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/from_keras_model) whenever possible.\n\nHowever, [`tff.learning`](https://www.tensorflow.org/federated/api_docs/python/tff/learning) provides a lower-level model interface, [`tff.learning.Model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model), that exposes the minimal functionality necessary for using a model for federated learning. Directly implementing this interface (possibly still using building blocks from [`keras`](https://www.tensorflow.org/guide/keras)) allows for maximum customization without modifying the internals of the federated learning algorithms.\n\nNow we are going to repeat the above from scratch!", "_____no_output_____" ], [ "## Defining model variables\n\nWe start by defining a new Python class that inherits from `tff.learning.Model`. In the class constructor (i.e., the `__init__` method) we will initialize all relevant variables using TF primatives as well as define the our \"input spec\" which defines the shape and types of the tensors that will hold input data. ", "_____no_output_____" ] ], [ [ "class MNISTModel(tff.learning.Model):\n\n def __init__(self):\n \n # initialize some trainable variables\n self._weights = tf.Variable(\n initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_FEATURES, NUMBER_TARGETS)),\n name=\"weights\",\n trainable=True\n )\n self._bias = tf.Variable(\n initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_TARGETS,)),\n name=\"bias\",\n trainable=True\n )\n \n # initialize some variables used in computing metrics\n self._number_examples = tf.Variable(0.0, name='number_examples', trainable=False)\n self._total_loss = tf.Variable(0.0, name='total_loss', trainable=False)\n self._number_true_positives = tf.Variable(0.0, name='number_true_positives', trainable=False)\n \n # define the input spec\n self._input_spec = collections.OrderedDict([\n ('X', tf.TensorSpec([None, NUMBER_FEATURES], tf.float32)),\n ('y', tf.TensorSpec([None, 1], tf.int32))\n ])\n\n @property\n def input_spec(self):\n return self._input_spec\n \n @property\n def local_variables(self):\n return [self._number_examples, self._total_loss, self._number_true_positives]\n\n @property\n def non_trainable_variables(self):\n return []\n \n @property\n def trainable_variables(self):\n return [self._weights, self._bias]\n\n ", "_____no_output_____" ] ], [ [ "## Defining the forward pass\n\nWith the variables for model parameters and cumulative statistics in place we can now define the `forward_pass` method that computes loss, makes predictions, and updates the cumulative statistics for a single batch of input data.", "_____no_output_____" ] ], [ [ "class MNISTModel(tff.learning.Model):\n\n def __init__(self):\n \n # initialize some trainable variables\n self._weights = tf.Variable(\n initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_FEATURES, NUMBER_TARGETS)),\n name=\"weights\",\n trainable=True\n )\n self._bias = tf.Variable(\n initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_TARGETS,)),\n name=\"bias\",\n trainable=True\n )\n \n # initialize some variables used in computing metrics\n self._number_examples = tf.Variable(0.0, name='number_examples', trainable=False)\n self._total_loss = tf.Variable(0.0, name='total_loss', trainable=False)\n self._number_true_positives = tf.Variable(0.0, name='number_true_positives', trainable=False)\n \n # define the input spec\n self._input_spec = collections.OrderedDict([\n ('X', tf.TensorSpec([None, NUMBER_FEATURES], tf.float32)),\n ('y', tf.TensorSpec([None, 1], tf.int32))\n ])\n\n @property\n def input_spec(self):\n return self._input_spec\n \n @property\n def local_variables(self):\n return [self._number_examples, self._total_loss, self._number_true_positives]\n\n @property\n def non_trainable_variables(self):\n return []\n \n @property\n def trainable_variables(self):\n return [self._weights, self._bias]\n\n @tf.function\n def _count_true_positives(self, y_true, y_pred):\n return tf.reduce_sum(tf.cast(tf.equal(y_true, y_pred), tf.float32))\n\n @tf.function\n def _linear_transformation(self, batch):\n X = batch['X']\n W, b = self.trainable_variables\n Z = tf.matmul(X, W) + b\n return Z\n \n @tf.function\n def _loss_fn(self, y_true, probabilities):\n return -tf.reduce_mean(tf.reduce_sum(tf.one_hot(y_true, NUMBER_TARGETS) * tf.math.log(probabilities), axis=1))\n \n @tf.function\n def _model_fn(self, batch):\n Z = self._linear_transformation(batch)\n probabilities = tf.nn.softmax(Z)\n return probabilities\n \n @tf.function\n def forward_pass(self, batch, training=True):\n probabilities = self._model_fn(batch)\n y_pred = tf.argmax(probabilities, axis=1, output_type=tf.int32)\n y_true = tf.reshape(batch['y'], shape=[-1])\n\n # compute local variables\n loss = self._loss_fn(y_true, probabilities)\n true_positives = self._count_true_positives(y_true, y_pred)\n number_examples = tf.size(y_true, out_type=tf.float32)\n \n # update local variables\n self._total_loss.assign_add(loss)\n self._number_true_positives.assign_add(true_positives)\n self._number_examples.assign_add(number_examples)\n\n batch_output = tff.learning.BatchOutput(\n loss=loss,\n predictions=y_pred,\n num_examples=tf.cast(number_examples, tf.int32)\n )\n return batch_output\n", "_____no_output_____" ] ], [ [ "## Defining the local metrics\n\nNext, we define a method `report_local_outputs` that returns a set of local metrics. These are the values, in addition to model updates (which are handled automatically), that are eligible to be aggregated to the server in a federated learning or evaluation process.\n\nFinally, we need to determine how to aggregate the local metrics emitted by each device by defining `federated_output_computation`. This is the only part of the code that isn't written in TensorFlow - it's a federated computation expressed in TFF.", "_____no_output_____" ] ], [ [ "class MNISTModel(tff.learning.Model):\n\n def __init__(self):\n \n # initialize some trainable variables\n self._weights = tf.Variable(\n initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_FEATURES, NUMBER_TARGETS)),\n name=\"weights\",\n trainable=True\n )\n self._bias = tf.Variable(\n initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_TARGETS,)),\n name=\"bias\",\n trainable=True\n )\n \n # initialize some variables used in computing metrics\n self._number_examples = tf.Variable(0.0, name='number_examples', trainable=False)\n self._total_loss = tf.Variable(0.0, name='total_loss', trainable=False)\n self._number_true_positives = tf.Variable(0.0, name='number_true_positives', trainable=False)\n \n # define the input spec\n self._input_spec = collections.OrderedDict([\n ('X', tf.TensorSpec([None, NUMBER_FEATURES], tf.float32)),\n ('y', tf.TensorSpec([None, 1], tf.int32))\n ])\n\n @property\n def federated_output_computation(self):\n return self._aggregate_metrics_across_clients\n \n @property\n def input_spec(self):\n return self._input_spec\n \n @property\n def local_variables(self):\n return [self._number_examples, self._total_loss, self._number_true_positives]\n\n @property\n def non_trainable_variables(self):\n return []\n \n @property\n def trainable_variables(self):\n return [self._weights, self._bias]\n \n @tff.federated_computation\n def _aggregate_metrics_across_clients(metrics):\n aggregated_metrics = {\n 'number_examples': tff.federated_sum(metrics.number_examples),\n 'average_loss': tff.federated_mean(metrics.average_loss, metrics.number_examples),\n 'accuracy': tff.federated_mean(metrics.accuracy, metrics.number_examples)\n }\n return aggregated_metrics\n\n @tf.function\n def _count_true_positives(self, y_true, y_pred):\n return tf.reduce_sum(tf.cast(tf.equal(y_true, y_pred), tf.float32))\n\n @tf.function\n def _linear_transformation(self, batch):\n X = batch['X']\n W, b = self.trainable_variables\n Z = tf.matmul(X, W) + b\n return Z\n \n @tf.function\n def _loss_fn(self, y_true, probabilities):\n return -tf.reduce_mean(tf.reduce_sum(tf.one_hot(y_true, NUMBER_TARGETS) * tf.math.log(probabilities), axis=1))\n \n @tf.function\n def _model_fn(self, batch):\n Z = self._linear_transformation(batch)\n probabilities = tf.nn.softmax(Z)\n return probabilities\n \n @tf.function\n def forward_pass(self, batch, training=True):\n probabilities = self._model_fn(batch)\n y_pred = tf.argmax(probabilities, axis=1, output_type=tf.int32)\n y_true = tf.reshape(batch['y'], shape=[-1])\n\n # compute local variables\n loss = self._loss_fn(y_true, probabilities)\n true_positives = self._count_true_positives(y_true, y_pred)\n number_examples = tf.cast(tf.size(y_true), tf.float32)\n \n # update local variables\n self._total_loss.assign_add(loss)\n self._number_true_positives.assign_add(true_positives)\n self._number_examples.assign_add(number_examples)\n\n batch_output = tff.learning.BatchOutput(\n loss=loss,\n predictions=y_pred,\n num_examples=tf.cast(number_examples, tf.int32)\n )\n return batch_output\n\n @tf.function\n def report_local_outputs(self):\n local_metrics = collections.OrderedDict([\n ('number_examples', self._number_examples),\n ('average_loss', self._total_loss / self._number_examples),\n ('accuracy', self._number_true_positives / self._number_examples)\n ])\n return local_metrics\n", "_____no_output_____" ] ], [ [ "Here are a few points worth highlighting:\n\n* All state that your model will use must be captured as TensorFlow variables, as TFF does not use Python at runtime (remember your code should be written such that it can be deployed to mobile devices).\n* Your model should describe what form of data it accepts (input_spec), as in general, TFF is a strongly-typed environment and wants to determine type signatures for all components. Declaring the format of your model's input is an essential part of it.\n* Although technically not required, we recommend wrapping all TensorFlow logic (forward pass, metric calculations, etc.) as tf.functions, as this helps ensure the TensorFlow can be serialized, and removes the need for explicit control dependencies.\n\nThe above is sufficient for evaluation and algorithms like Federated SGD. However, for Federated Averaging, we need to specify how the model should train locally on each batch.", "_____no_output_____" ] ], [ [ "class MNISTrainableModel(MNISTModel, tff.learning.TrainableModel):\n \n def __init__(self, optimizer):\n super().__init__()\n self._optimizer = optimizer\n\n @tf.function\n def train_on_batch(self, batch):\n with tf.GradientTape() as tape:\n output = self.forward_pass(batch)\n gradients = tape.gradient(output.loss, self.trainable_variables)\n self._optimizer.apply_gradients(zip(tf.nest.flatten(gradients), tf.nest.flatten(self.trainable_variables)))\n return output\n", "_____no_output_____" ] ], [ [ "# Simulating federated training with the new model\n\nWith all the above in place, the remainder of the process looks like what we've seen already - just replace the model constructor with the constructor of our new model class, and use the two federated computations in the iterative process you created to cycle through training rounds.", "_____no_output_____" ] ], [ [ "def create_custom_tff_model_fn():\n optimizer = keras.optimizers.SGD(learning_rate=0.02)\n return MNISTrainableModel(optimizer)\n \nfederated_averaging_process = (tff.learning\n .build_federated_averaging_process(create_custom_tff_model_fn))\n\n_random_state = np.random.RandomState(42)\nupdated_state, current_metrics = simulate_federated_averaging(federated_averaging_process,\n training_source=emnist_train,\n testing_source=emnist_test,\n sample_size=0.01,\n random_state=_random_state,\n number_rounds=10)", "_____no_output_____" ], [ "updated_state", "_____no_output_____" ], [ "current_metrics", "_____no_output_____" ] ], [ [ "# Evaluation\n\nAll of our experiments so far presented only federated training metrics - the average metrics over all batches of data trained across all clients in the round. Should we be concerened about overfitting? Yes! In federated averaging algorithms there are two different ways to over-fit. \n\n1. Overfitting the shared model (especially if we use the same set of clients on each round).\n2. Over-ftting local models on the clients.\n", "_____no_output_____" ], [ "## Federated evaluation\n\nTo perform evaluation on federated data, you can construct another federated computation designed for just this purpose, using the [`tff.learning.build_federated_evaluation`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_evaluation) function, and passing in your model constructor as an argument. Note that evaluation doesn't perform gradient descent and there's no need to construct optimizers.\n", "_____no_output_____" ] ], [ [ "tff.learning.build_federated_evaluation?", "_____no_output_____" ], [ "federated_evaluation = (tff.learning\n .build_federated_evaluation(create_custom_tff_model_fn))", "_____no_output_____" ], [ "# function type signature: SERVER_MODEL, FEDERATED_DATA -> METRICS\nprint(federate_evaluation.type_signature)", "_____no_output_____" ] ], [ [ "The `federated_evaluation` function is similar to `tff.utils.IterativeProcess.next` but with two important differences. \n\n1. Function does not return the server state; since evaluation doesn't modify the model or any other aspect of state - you can think of it as stateless.\n2. Function only needs the model and doesn't require any other part of server state that might be associated with training, such as optimizer variables.", "_____no_output_____" ] ], [ [ "training_metrics = federated_evaluation(updated_state.model, federated_training_data)", "_____no_output_____" ], [ "training_metrics", "_____no_output_____" ] ], [ [ "Note the numbers may look marginally better than what was reported by the last round of training. By convention, the training metrics reported by the iterative training process generally reflect the performance of the model at the beginning of the training round, so the evaluation metrics will always be one step ahead.", "_____no_output_____" ], [ "## Evaluating on client data not used in training\n\nSince we are training a shared model for digit classication we might also want to evaluate the performance of the model on client test datasets where the corresponding training dataset was not used in training.", "_____no_output_____" ] ], [ [ "_random_state = np.random.RandomState(42)\nclient_datasets = sample_client_datasets(emnist_test, sample_size=0.01, random_state=_random_state)\nfederated_testing_data = [create_testing_dataset(client_dataset) for _, client_dataset in client_datasets.items()]", "_____no_output_____" ], [ "testing_metrics = federated_evaluation(updated_state.model, federated_testing_data)", "_____no_output_____" ], [ "testing_metrics", "_____no_output_____" ] ], [ [ "# Adding evaluation to our federated averaging simulation", "_____no_output_____" ] ], [ [ "def simulate_federated_averaging(federated_averaging_process: tff.utils.IterativeProcess,\n federated_evaluation,\n training_source: tff.simulation.ClientData,\n testing_source: tff.simulation.ClientData,\n sample_size: typing.Union[float, int],\n random_state: np.random.RandomState,\n number_rounds: int,\n tensorboard_logging_dir: str = None):\n \n state = federated_averaging_process.initialize()\n \n if tensorboard_logging_dir is not None:\n \n if not os.path.isdir(tensorboard_logging_dir):\n os.makedirs(tensorboard_logging_dir)\n\n summary_writer = (tf.summary\n .create_file_writer(tensorboard_logging_dir))\n\n with summary_writer.as_default():\n for n in range(number_rounds):\n federated_data = create_federated_data(training_source,\n testing_source,\n sample_size,\n random_state)\n \n # extract the training and testing datasets\n anonymized_training_data = []\n anonymized_testing_data = []\n for training_dataset, testing_dataset in federated_data.values():\n anonymized_training_data.append(training_dataset)\n anonymized_testing_data.append(testing_dataset)\n \n state, _ = federated_averaging_process.next(state, anonymized_training_data)\n training_metrics = federated_evaluation(state.model, anonymized_training_data)\n testing_metrics = federated_evaluation(state.model, anonymized_testing_data)\n print(f\"Round: {n}, Training metrics: {training_metrics}, Testing metrics: {testing_metrics}\")\n\n # tensorboard logging\n for name, value in training_metrics._asdict().items():\n tf.summary.scalar(name, value, step=n)\n \n for name, value in testing_metrics._asdict().items():\n tf.summary.scalar(name, value, step=n)\n else:\n for n in range(number_rounds):\n federated_data = create_federated_data(training_source,\n testing_source,\n sample_size,\n random_state)\n \n # extract the training and testing datasets\n anonymized_training_data = []\n anonymized_testing_data = []\n for training_dataset, testing_dataset in federated_data.values():\n anonymized_training_data.append(training_dataset)\n anonymized_testing_data.append(testing_dataset)\n\n state, _ = federated_averaging_process.next(state, anonymized_training_data)\n training_metrics = federated_evaluation(state.model, anonymized_training_data)\n testing_metrics = federated_evaluation(state.model, anonymized_testing_data)\n print(f\"Round: {n}, Training metrics: {training_metrics}, Testing metrics: {testing_metrics}\")\n \n return state, (training_metrics, testing_metrics)", "_____no_output_____" ], [ "federated_averaging_process = (tff.learning\n .build_federated_averaging_process(create_tff_model_fn, \n create_client_optimizer,\n create_server_optimizer,\n client_weight_fn=None,\n stateful_delta_aggregate_fn=None,\n stateful_model_broadcast_fn=None))\n\nfederated_evaluation = (tff.learning\n .build_federated_evaluation(create_tff_model_fn))\n\n_random_state = np.random.RandomState(42)\nupdated_state, current_metrics = simulate_federated_averaging(federated_averaging_process,\n federated_evaluation,\n training_source=emnist_train,\n testing_source=emnist_test,\n sample_size=0.01,\n random_state=_random_state,\n number_rounds=15)", "_____no_output_____" ] ], [ [ "# Wrapping up", "_____no_output_____" ], [ "## Interesting resources\n\n[PySyft](https://github.com/OpenMined/PySyft) is a Python library for secure and private Deep Learning created by [OpenMined](https://www.openmined.org/). PySyft decouples private data from model training, using\n[Federated Learning](https://ai.googleblog.com/2017/04/federated-learning-collaborative.html),\n[Differential Privacy](https://en.wikipedia.org/wiki/Differential_privacy),\nand [Multi-Party Computation (MPC)](https://en.wikipedia.org/wiki/Secure_multi-party_computation) within the main Deep Learning frameworks like PyTorch and TensorFlow.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
cbc5e400ef016e9d165623d4ac661c457a12755b
11,630
ipynb
Jupyter Notebook
4.1_review_texts/review_filmes.ipynb
iterasolucoes/ciencia-de-dados
6076f670257bb95269003e637485f8df7a56fff5
[ "Apache-2.0" ]
1
2019-09-25T22:17:52.000Z
2019-09-25T22:17:52.000Z
4.1_review_texts/review_filmes.ipynb
iterasolucoes/ciencia-de-dados
6076f670257bb95269003e637485f8df7a56fff5
[ "Apache-2.0" ]
null
null
null
4.1_review_texts/review_filmes.ipynb
iterasolucoes/ciencia-de-dados
6076f670257bb95269003e637485f8df7a56fff5
[ "Apache-2.0" ]
null
null
null
23.213573
169
0.489252
[ [ [ "import pandas as pd\nfrom unidecode import unidecode\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nnltk.download('stopwords')", "[nltk_data] Downloading package stopwords to /home/zeus/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "df = pd.read_csv('../base/review.csv',encoding='latin-1')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "\n", "_____no_output_____" ], [ "import string\nfrom nltk.stem.snowball import SnowballStemmer\nimport swifter\nimport nltk\n\nstemmer = SnowballStemmer(\"english\")\nstop = set(stopwords.words('english'))\ndef lower(texto):\n return texto.lower()\n\ndef normalize(texto):\n return unidecode(texto)\n\ndef remove_ponctuation(texto):\n for punc in string.punctuation:\n texto = texto.replace(punc,\" \")\n return texto\n\ndef remove_stopwords(texto):\n ret = []\n for palavra in texto.split():\n if palavra not in stop:\n ret.append(palavra)\n return ' '.join(ret)\n\ndef stem(texto):\n\n ret = []\n for palavra in texto.split():\n ret.append(stemmer.stem(palavra))\n return ' '.join(ret)\n\n\ndef remove_number(texto):\n result = ''.join([i for i in texto if not i.isdigit()])\n return result\n\ndef pipeline(texto):\n texto = normalize(texto)\n texto = lower(texto)\n texto = remove_ponctuation(texto)\n texto = remove_stopwords(texto)\n texto = remove_number(texto)\n texto = stem(texto)\n return texto", "_____no_output_____" ], [ "df['SentimentText'].apply(lower).head()", "_____no_output_____" ], [ "remove_ponctuation(\"é, ué!\")", "_____no_output_____" ], [ "len(df)", "_____no_output_____" ], [ "df['preproc'] = df['SentimentText'].swifter.apply(pipeline)", "_____no_output_____" ], [ "# vectorizer = CountVectorizer()\n# X = vectorizer.fit_transform(df['preproc'])\n# len(vectorizer.get_feature_names())", "_____no_output_____" ], [ "vectorizer_tfidf = TfidfVectorizer()\nX = vectorizer_tfidf.fit_transform(df['preproc'])\nlen(vectorizer_tfidf.get_feature_names())", "_____no_output_____" ], [ "y = df['Sentiment']", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_score\nimport numpy as np\n\nclf = LogisticRegression(solver='liblinear')\nnp.mean(cross_val_score(clf,X, y, cv=10,scoring='balanced_accuracy'))", "_____no_output_____" ], [ "from sklearn.naive_bayes import MultinomialNB\nclf = MultinomialNB()\nnp.mean(cross_val_score(clf,X, y, cv=10,scoring='balanced_accuracy'))", "_____no_output_____" ], [ "clf.fit(X,y)", "_____no_output_____" ], [ "import pickle\nfilename = 'clf.pickle'\noutfile = open(filename,'wb')\npickle.dump(clf,outfile)\noutfile.close()", "_____no_output_____" ], [ "filename = 'vectorizer.pickle'\noutfile = open(filename,'wb')\npickle.dump(vectorizer_tfidf,outfile)\noutfile.close()", "_____no_output_____" ], [ "#I just love this movie. Specially the climax, seriously one of the best climax I have ever seen.", "_____no_output_____" ], [ "#I just want to say how amazing this film is from start to finish. This will take you on a emotional ride.You will not he disappointed", "_____no_output_____" ], [ "#LITERALLY , one of the best movies i have seen in my entire life , filled with a tone of action and emotions . you will love avenger endgame . ' i love you 3000 '", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc5f6f861ffe1cf6e9890ecb442fd7b7b39d337
82,974
ipynb
Jupyter Notebook
big-o-notation.ipynb
brandonjbryant/numpy-pandas-visualization-exercises
9137ddf7abd2288fbfaad057de787c712833bffc
[ "Apache-2.0" ]
1
2021-01-28T17:59:31.000Z
2021-01-28T17:59:31.000Z
big-o-notation.ipynb
brandonjbryant/numpy-pandas-visualization-exercises
9137ddf7abd2288fbfaad057de787c712833bffc
[ "Apache-2.0" ]
null
null
null
big-o-notation.ipynb
brandonjbryant/numpy-pandas-visualization-exercises
9137ddf7abd2288fbfaad057de787c712833bffc
[ "Apache-2.0" ]
null
null
null
663.792
80,188
0.949225
[ [ [ "import math\nimport matplotlib.pyplot as plt\n\n# x values\n\nx = range(1, 20)\n\n# O(1)\ny1 = [1 for n in x]\n\n# O(log n)\ny2 = [math.log(n) for n in x]\n\n# O(n)\ny3 = x\n\n# O(n log n)\ny4 = [n * math.log(n) for n in x]\n\n# O(n^2)\ny5 = [n**2 for n in x]\n\n# O(2^n)\ny6 = [2**n for n in x]\n\n# O(n!)\ny7 = [math.factorial(n) for n in x]\n\n# O(n^n)\ny8 = [n**n for n in x]\n\n\nplt.figure(figsize = (18, 8))\n\nplt.title('Big O Notation', fontsize = 15)\n\nplt.xlabel('Elements', fontsize=12)\nplt.ylabel('Operations', fontsize=12)\n\n# O(1)\nplt.plot(x, y1, label='$\\mathcal{O}(1)$', color='navy')\n\n# lightcoral, orange, mediumseagreen, cadetblue, dodgerblue, slateblue, violet, crimson\n\n# O(log n)\nplt.plot(x, y2, label='$\\mathcal{O}(logn)$', color='crimson')\n\n# O(n)\nplt.plot(x, y3, label='$O(n)$', color='darkgreen')\n\n# O(n log n)\nplt.plot(x, y4, label='$\\mathcal{O}(nlogn)$', color='cadetblue')\n\n# O(n^{2})\nplt.plot(x, y5, label='$\\mathcal{O}(n^{2})$', color='slateblue')\n\n# O(2^{n})\nplt.plot(x, y6, label='$\\mathcal{O}(2^{n})$', color='slategray')\n\n# O(n!)\nplt.plot(x, y7, label='$\\mathcal{O}(n!)$', color='goldenrod')\n\n# O(n^{n})\nplt.plot(x, y8, label='$\\mathcal{O}(n^{n})$', color='rebeccapurple')\n\nplt.xlim(1, 11)\nplt.ylim(0, 50)\n\nplt.legend(loc='upper right', fontsize=14)\n\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cbc5ff5f09f95ca5eeb3ff87cdac039206993e17
160,895
ipynb
Jupyter Notebook
MLDL_homework/HW3/51194506093.ipynb
im0qianqian/ML_demo
1ce0f655ba616a2ed4c2e6446bac379bd45c0352
[ "MIT" ]
2
2020-03-12T07:57:54.000Z
2022-03-20T13:58:25.000Z
MLDL_homework/HW3/51194506093.ipynb
im0qianqian/ML_demo
1ce0f655ba616a2ed4c2e6446bac379bd45c0352
[ "MIT" ]
null
null
null
MLDL_homework/HW3/51194506093.ipynb
im0qianqian/ML_demo
1ce0f655ba616a2ed4c2e6446bac379bd45c0352
[ "MIT" ]
1
2022-03-20T13:58:29.000Z
2022-03-20T13:58:29.000Z
564.54386
19,416
0.944162
[ [ [ "## 最小二乘法", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import leastsq\n\nXi = np.array(\n [157, 162, 169, 176, 188, 200, 211, 220, 230, 237, 247, 256, 268, 287, 285, 290, 301, 311, 326, 335, 337, 345, 348,\n 358, 384, 396, 409, 415, 432, 440, 448, 449, 461, 467, 478, 493], dtype=np.float)\nYi = np.array(\n [143, 146, 153, 160, 169, 180, 190, 196, 207, 215, 220, 228, 242, 253, 251, 257, 271, 283, 295, 302, 301, 305, 308,\n 324, 341, 357, 371, 382, 397, 406, 413, 411, 422, 434, 447, 458], dtype=np.float)\n\n\ndef func(p, x):\n k, b = p\n return k * x + b\n\n\ndef error(p, x, y):\n return func(p, x) - y\n\n\n# k,b的初始值,可以任意设定,经过几次试验,发现p0的值会影响cost的值:Para[1]\np0 = [1, 20]\n\n# 把error函数中除了p0以外的参数打包到args中(使用要求)\nPara = leastsq(error, p0, args=(Xi, Yi))\n\n# 读取结果\nk, b = Para[0]\n\n# 画样本点\nplt.figure(figsize=(8, 6)) ##指定图像比例: 8:6\nplt.scatter(Xi, Yi, color=\"green\", linewidth=2)\n\n# 画拟合直线\n# x = np.linspace(0, 12, 100) ##在0-15直接画100个连续点\n# x = np.linspace(0, 500, int(500/12)*100) ##在0-15直接画100个连续点\n\n# y = k * x + b ##函数式\nplt.plot(Xi, k * Xi + b, color=\"red\", linewidth=2)\nplt.legend(loc='lower right') # 绘制图例\nplt.show()", "No handles with labels found to put in legend.\n" ] ], [ [ "## 梯度下降法", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "x = np.array(\n [157, 162, 169, 176, 188, 200, 211, 220, 230, 237, 247, 256, 268, 287, 285, 290, 301, 311, 326, 335, 337, 345, 348,\n 358, 384, 396, 409, 415, 432, 440, 448, 449, 461, 467, 478, 493], dtype=np.float)\ny = np.array(\n [143, 146, 153, 160, 169, 180, 190, 196, 207, 215, 220, 228, 242, 253, 251, 257, 271, 283, 295, 302, 301, 305, 308,\n 324, 341, 357, 371, 382, 397, 406, 413, 411, 422, 434, 447, 458], dtype=np.float)", "_____no_output_____" ], [ "def GD(x, y, learning_rate, iteration_num=10000):\n theta = np.random.rand(2, 1) # 初始化参数\n x = np.hstack((np.ones((len(x), 1)), x.reshape(len(x), 1)))\n y = y.reshape(len(y), 1)\n\n for i in range(iteration_num):\n # 计算梯度\n grad = np.dot(x.T, (np.dot(x, theta) - y)) / x.shape[0]\n # 更新参数\n theta -= learning_rate * grad\n # 计算 MSE\n # loss = np.linalg.norm(np.dot(x, theta) - y)\n\n plt.figure()\n plt.title('Learning rate: {}, iteration_num: {}'.format(learning_rate, iteration_num))\n plt.scatter(x[:, 1], y.reshape(len(y)))\n plt.plot(x[:, 1], np.dot(x, theta), color='red', linewidth=3)", "_____no_output_____" ], [ "GD(x, y, learning_rate=0.00001, iteration_num=1)\nGD(x, y, learning_rate=0.00001, iteration_num=3)\nGD(x, y, learning_rate=0.00001, iteration_num=10)\nGD(x, y, learning_rate=0.00001, iteration_num=100)\nGD(x, y, learning_rate=0.000001, iteration_num=1)\nGD(x, y, learning_rate=0.000001, iteration_num=3)\nGD(x, y, learning_rate=0.000001, iteration_num=10)\nGD(x, y, learning_rate=0.000001, iteration_num=100)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cbc600ee83d465726db1e1b9b1b858041a73edbe
22,282
ipynb
Jupyter Notebook
site/ja/guide/intro_to_graphs.ipynb
NarimaneHennouni/docs-l10n
39a48e0d5aa34950e29efd5c1f111c120185e9d9
[ "Apache-2.0" ]
2
2021-03-12T18:02:29.000Z
2021-06-18T19:32:41.000Z
site/ja/guide/intro_to_graphs.ipynb
NarimaneHennouni/docs-l10n
39a48e0d5aa34950e29efd5c1f111c120185e9d9
[ "Apache-2.0" ]
null
null
null
site/ja/guide/intro_to_graphs.ipynb
NarimaneHennouni/docs-l10n
39a48e0d5aa34950e29efd5c1f111c120185e9d9
[ "Apache-2.0" ]
null
null
null
32.767647
251
0.544879
[ [ [ "##### Copyright 2020 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# グラフと関数の基礎", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://www.tensorflow.org/guide/intro_to_graphs\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\"> TensorFlow.orgで表示</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/intro_to_graphs.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Google Colab で実行</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/intro_to_graphs.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">GitHub でソースを表示{</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/intro_to_graphs.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\">ノートブックをダウンロード/a0}</a></td>\n</table>", "_____no_output_____" ], [ "# グラフと `tf.function` の基礎\n\nこのガイドは、TensorFlow の仕組みを説明するために、TensorFlow と Keras 基礎を説明します。今すぐ Keras に取り組みたい方は、[Keras のガイド一覧](keras/)を参照してください。\n\nこのガイドでは、グラフ取得のための単純なコード変更、格納と表現、およびモデルの高速化とエクスポートを行うための使用方法について、TensorFlow の中核的な仕組みを説明します。\n\n注意: TensorFlow 1.x のみの知識をお持ちの場合は、このガイドでは、非常に異なるグラフビューが紹介されています。\n\nこれは、基礎を概説したガイドです。これらの概念の徹底ガイドについては、[`tf.function` ガイド](function)を参照してください。\n", "_____no_output_____" ], [ "## グラフとは?\n\n前回の 3 つのガイドでは、TensorFlow の **Eager** execution について説明しました。これは、TensorFlow 演算が演算ごとにPythonによって実行され、結果を Python に返すことを意味します。Eager TensorFlow は GPU を活用し、変数、テンソル、さらには演算を GPU と TPU に配置することができます。また、デバックも簡単に行えます。\n\n一部のユーザーは、Python から移動する必要はありません。\n\nただし、TensorFlow を Python で演算ごとに実行すると、ほかの方法では得られない多数の高速化機能が利用できなくなります。Python からテンソルの計算を抽出できる場合は、*グラフ* にすることができます。\n\n**グラフとは、計算のユニットを表す一連の `tf.Operation` オブジェクトと、演算間を流れるデータのユニットを表す `tf.Tensor` オブジェクトを含むデータ構造です。** `tf.Graph` コンテキストで定義されます。これらのグラフはデータ構造であるため、元の Python コードがなくても、保存、実行、および復元することができます。\n\n次は、TensorBoard で視覚化された単純な二層グラフです。\n", "_____no_output_____" ], [ "![a two-layer tensorflow graph](https://storage.cloud.google.com/tensorflow.org/images/two-layer-network.png)", "_____no_output_____" ], [ "## グラフのメリット\n\nグラフを使用すると、柔軟性が大幅に向上し、モバイルアプリケーション。組み込みデバイス、バックエンドサーバーといった Python インタプリタのない環境でも TensorFlow グラフを使用できます。TensorFlow は、Python からエクスポートされた場合に、保存されるモデルの形式としてグラフを使用します。\n\nまた、グラフは最適化を簡単に行えるため、コンパイラは次のような変換を行えます。\n\n- 計算に定数ノードを畳み込むで、テンソルの値を統計的に推論します*(「定数畳み込み」)*。\n- 独立した計算のサブパートを分離し、スレッドまたはデバイスに分割します。\n- 共通部分式を取り除き、算術演算を単純化します。\n", "_____no_output_____" ], [ "これやほかの高速化を実行する [Grappler](./graph_optimization.ipynb) という総合的な最適化システムがあります。\n\nまとめると、グラフは非常に便利なもので、**複数のデバイス**で、TensorFlow の**高速化**、**並列化**、および効率化を期待することができます。\n\nただし、便宜上、Python で機械学習モデル(またはその他の計算)を定義した後、必要となったときに自動的にグラフを作成することをお勧めします。", "_____no_output_____" ], [ "# グラフのトレース\n\nTensorFlow でグラフを作成する方法は、直接呼出しまたはデコレータのいずれかとして `tf.function` を使用することです。", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport timeit\nfrom datetime import datetime", "_____no_output_____" ], [ "# Define a Python function\ndef function_to_get_faster(x, y, b):\n x = tf.matmul(x, y)\n x = x + b\n return x\n\n# Create a `Function` object that contains a graph\na_function_that_uses_a_graph = tf.function(function_to_get_faster)\n\n# Make some tensors\nx1 = tf.constant([[1.0, 2.0]])\ny1 = tf.constant([[2.0], [3.0]])\nb1 = tf.constant(4.0)\n\n# It just works!\na_function_that_uses_a_graph(x1, y1, b1).numpy()", "_____no_output_____" ] ], [ [ "`tf.function` 化された関数は、[Python コーラブル]()で、Python 相当と同じように機能します。特定のクラス(`python.eager.def_function.Function`)を使用しますが、ユーザーにとっては、トレースできないものと同じように動作します。\n\n`tf.function` は、それが呼び出す Python 関数を再帰的にトレースします。", "_____no_output_____" ] ], [ [ "def inner_function(x, y, b):\n x = tf.matmul(x, y)\n x = x + b\n return x\n\n# Use the decorator\[email protected]\ndef outer_function(x):\n y = tf.constant([[2.0], [3.0]])\n b = tf.constant(4.0)\n\n return inner_function(x, y, b)\n\n# Note that the callable will create a graph that\n# includes inner_function() as well as outer_function()\nouter_function(tf.constant([[1.0, 2.0]])).numpy()", "_____no_output_____" ] ], [ [ "TensorFlow 1.x を使用したことがある場合は、`Placeholder` または `tf.Sesssion` をまったく定義する必要がないことに気づくでしょう。", "_____no_output_____" ], [ "## フローの制御と副次的影響\n\nフロー制御とループは、デフォルトで `tf.autograph` によって TensorFlow に変換されます。Autograph は、ループコンストラクトの標準化、アンロール、および [AST](https://docs.python.org/3/library/ast.html) マニピュレーションなどのメソッドを組み合わせて使用します。\n", "_____no_output_____" ] ], [ [ "def my_function(x):\n if tf.reduce_sum(x) <= 1:\n return x * x\n else:\n return x-1\n\na_function = tf.function(my_function)\n\nprint(\"First branch, with graph:\", a_function(tf.constant(1.0)).numpy())\nprint(\"Second branch, with graph:\", a_function(tf.constant([5.0, 5.0])).numpy())", "_____no_output_____" ] ], [ [ "Autograph 変換を直接呼び出して、Python が TensorFlow 演算に変換される様子を確認することができます。これはほとんど解読不能ですが、変換を確認することができます。", "_____no_output_____" ] ], [ [ "# Don't read the output too carefully.\nprint(tf.autograph.to_code(my_function))", "_____no_output_____" ] ], [ [ "Autograph は、`if-then` 句、ループ、 `break`、`return`、`continue` などを自動的に変換します。\n\nほとんどの場合、Autograph の動作に特別な考慮はいりませんが、いくつかの注意事項があり、これについては [tf.function ガイド](./function.ipynb)のほか、[Autograph 完全リファレンス](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md)が役立ちます。", "_____no_output_____" ], [ "## 高速化の確認\n\ntensor-using 関数を `tf.function` でラッピングするだけでは、コードは高速化しません。単一のマシンで数回呼び出された小さな関数では、グラフまたはグラフの一部の呼び出しにかかるオーバーヘッドによってランタイムが占有されてしまうことがあります。また、GPU 大きな負荷をかける畳み込みのスタックなど、計算のほとんどがすでにアクセラレータで発生している場合は、グラフの高速化をあまり確認できません。\n\n複雑な計算については、グラフによって大幅な高速化を得ることができます。これは、グラフが Python からデバイスへの通信や一部の高速化の実装を減らすためです。\n\n次のコードは、小さな密のレイヤーでの数回の実行にかかる時間を計測します。", "_____no_output_____" ] ], [ [ "# Create an oveerride model to classify pictures\nclass SequentialModel(tf.keras.Model):\n def __init__(self, **kwargs):\n super(SequentialModel, self).__init__(**kwargs)\n self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))\n self.dense_1 = tf.keras.layers.Dense(128, activation=\"relu\")\n self.dropout = tf.keras.layers.Dropout(0.2)\n self.dense_2 = tf.keras.layers.Dense(10)\n\n def call(self, x):\n x = self.flatten(x)\n x = self.dense_1(x)\n x = self.dropout(x)\n x = self.dense_2(x)\n return x\n\ninput_data = tf.random.uniform([60, 28, 28])\n\neager_model = SequentialModel()\ngraph_model = tf.function(eager_model)\n\nprint(\"Eager time:\", timeit.timeit(lambda: eager_model(input_data), number=10000))\nprint(\"Graph time:\", timeit.timeit(lambda: graph_model(input_data), number=10000))\n", "_____no_output_____" ] ], [ [ "### 多層型関数\n\n関数をトレースする場合、**多層型**の `Function` オブジェクトを作成します。多層型関数は Pythonコーラブルで、1つの API の背後にあるいくつかの具象関数グラフをカプセル化します。\n\nこの `Function` は、あらゆる `dtypes` と形状に使用できます。新しい引数シグネチャでそれを呼び出すたびに、元の関数が新しい引数で再トレースされます。`Function` は、そのトレースに対応する `tf.Graph` を `concrete_function` に格納します。関数がすでにそのような引数でトレースされている場合は、トレース済みのグラフが取得されます。\n\n概念的に、次のようになります。\n\n- **`tf.Graph`** は計算を説明する未加工のポータブルなデータ構造である\n- **`Function`** は、ConcreteFunctions のキャッシュ、トレース、およびディスパッチャーである\n- **`ConcreteFunction`** は、Python からグラフを実行できるグラフの Eager 対応ラッパーである\n\n### 多層型関数の検査\n\n`a_function` を検査できます。これはPython 関数 `my_function` に対して `tf.function` を呼び出した結果です。この例では、3 つの引数で `a_function` を呼び出すことで、3 つの具象関数を得られています。\n", "_____no_output_____" ] ], [ [ "print(a_function)\n\nprint(\"Calling a `Function`:\")\nprint(\"Int:\", a_function(tf.constant(2)))\nprint(\"Float:\", a_function(tf.constant(2.0)))\nprint(\"Rank-1 tensor of floats\", a_function(tf.constant([2.0, 2.0, 2.0])))", "_____no_output_____" ], [ "# Get the concrete function that works on floats\nprint(\"Inspecting concrete functions\")\nprint(\"Concrete function for float:\")\nprint(a_function.get_concrete_function(tf.TensorSpec(shape=[], dtype=tf.float32)))\nprint(\"Concrete function for tensor of floats:\")\nprint(a_function.get_concrete_function(tf.constant([2.0, 2.0, 2.0])))\n", "_____no_output_____" ], [ "# Concrete functions are callable\n# Note: You won't normally do this, but instead just call the containing `Function`\ncf = a_function.get_concrete_function(tf.constant(2))\nprint(\"Directly calling a concrete function:\", cf(tf.constant(2)))", "_____no_output_____" ] ], [ [ "この例では、スタックの非常に奥を調べています。具体的にトレースを管理していない限り、通常は、ここに示されるように具象関数を呼び出す必要はありません。", "_____no_output_____" ], [ "# Eager execution でのデバッグ\n\nスタックトレースが長い場合、特に `tf.Graph` または `with tf.Graph().as_default()` の参照が含まれる場合、グラフコンテキストで実行している可能性があります。TensorFlow のコア関数は Keras の `model.fit()` などのグラフコンテキストを使用します。\n\nEager execution をデバッグする方がはるかに簡単であることがよくあります。スタックトレースは比較的に短く、理解しやすいからです。\n\nグラフのデバックが困難な場合は、Eager execution に戻ってデバックすることができます。\n\nEager で実行していることを確認するには、次を行います。\n\n- メソッドとレイヤーを直接コーラブルとして呼び出す\n\n- Keras compile/fit を使用している場合、コンパイル時に **`model.compile(run_eagerly=True)`** を使用する\n\n- **`tf.config.experimental_run_functions_eagerly(True)`** でグローバル実行モードを設定する\n", "_____no_output_____" ], [ "### `run_eagerly=True` を使用する", "_____no_output_____" ] ], [ [ "# Define an identity layer with an eager side effect\nclass EagerLayer(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(EagerLayer, self).__init__(**kwargs)\n # Do some kind of initialization here\n\n def call(self, inputs):\n print(\"\\nCurrently running eagerly\", str(datetime.now()))\n return inputs", "_____no_output_____" ], [ "# Create an override model to classify pictures, adding the custom layer\nclass SequentialModel(tf.keras.Model):\n def __init__(self):\n super(SequentialModel, self).__init__()\n self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))\n self.dense_1 = tf.keras.layers.Dense(128, activation=\"relu\")\n self.dropout = tf.keras.layers.Dropout(0.2)\n self.dense_2 = tf.keras.layers.Dense(10)\n self.eager = EagerLayer()\n\n def call(self, x):\n x = self.flatten(x)\n x = self.dense_1(x)\n x = self.dropout(x)\n x = self.dense_2(x)\n return self.eager(x)\n\n# Create an instance of this model\nmodel = SequentialModel()\n\n# Generate some nonsense pictures and labels\ninput_data = tf.random.uniform([60, 28, 28])\nlabels = tf.random.uniform([60])\n\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)", "_____no_output_____" ] ], [ [ "まず、Eager を使用せずにモデルをコンパイルします。モデルはトレースされません。名前にも関わらず、`compile` は、損失関数、最適化、およびトレーニングパラメータのセットアップしか行いません。", "_____no_output_____" ] ], [ [ "model.compile(run_eagerly=False, loss=loss_fn)", "_____no_output_____" ] ], [ [ "ここで、`fit` を呼び出し、関数がトレース(2 回)されると Eager 効果が実行しなくなるのを確認します。", "_____no_output_____" ] ], [ [ "model.fit(input_data, labels, epochs=3)", "_____no_output_____" ] ], [ [ "ただし、エポックを 1 つでも Eager で実行すると、Eager の副次的作用が 2 回現れます。", "_____no_output_____" ] ], [ [ "print(\"Running eagerly\")\n# When compiling the model, set it to run eagerly\nmodel.compile(run_eagerly=True, loss=loss_fn)\n\nmodel.fit(input_data, labels, epochs=1)\n", "_____no_output_____" ] ], [ [ "### `experimental_run_functions_eagerly` を使用する\n\nまた、すべてを Eager で実行するよにグローバルに設定することができます。これは、トレースし直した場合にのみ機能することに注意してください。トレースされた関数は、トレースされたままとなり、グラフとして実行します。", "_____no_output_____" ] ], [ [ "# Now, globally set everything to run eagerly\ntf.config.experimental_run_functions_eagerly(True)\nprint(\"Run all functions eagerly.\")\n\n# First, trace the model, triggering the side effect\npolymorphic_function = tf.function(model)\n\n# It was traced...\nprint(polymorphic_function.get_concrete_function(input_data))\n\n# But when you run the function again, the side effect happens (both times).\nresult = polymorphic_function(input_data)\nresult = polymorphic_function(input_data)", "_____no_output_____" ], [ "# Don't forget to set it back when you are done\ntf.config.experimental_run_functions_eagerly(False)", "_____no_output_____" ] ], [ [ "# トレースとパフォーマンス\n\nトレースにはある程度のオーバーヘッドがかかります。小さな関数のトレースは素早く行えますが、大規模なモデルであればかなりの時間がかかる場合があります。パフォーマンスが上昇するとこの部分の時間は迅速に取り戻されますが、大規模なモデルのトレーニングの最初の数エポックでは、トレースによって遅延が発生する可能性があることに注意しておくことが重要です。\n\nモデルの規模に関係なく、頻繁にトレースするのは避けたほうがよいでしょう。[tf.function ガイドのこのセクション](function.ipynb#when_to_retrace)では、入力仕様を設定し、テンソル引数を使用して再トレースを回避する方法について説明しています。フォーマンスが異常に低下している場合は、誤って再トレースしていないかどうかを確認することをお勧めします。\n\neager-only の副次的効果(Python 引数の出力など)を追加して、関数がいつトレースされているかを確認できます。ここでは、新しい Python 引数が常に再トレースをトリガするため、余分な再トレースが発生していることを確認できます。", "_____no_output_____" ] ], [ [ "# Use @tf.function decorator\[email protected]\ndef a_function_with_python_side_effect(x):\n print(\"Tracing!\") # This eager\n return x * x + tf.constant(2)\n\n# This is traced the first time\nprint(a_function_with_python_side_effect(tf.constant(2)))\n# The second time through, you won't see the side effect\nprint(a_function_with_python_side_effect(tf.constant(3)))\n\n# This retraces each time the Python argument chances\n# as a Python argument could be an epoch count or other\n# hyperparameter\nprint(a_function_with_python_side_effect(2))\nprint(a_function_with_python_side_effect(3))\n", "_____no_output_____" ] ], [ [ "# 次のステップ\n\nより詳しい説明については、`tf.function` API リファレンスページと[ガイド](./function.ipynb)を参照してください。", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cbc612c303867936cfaca2239617b3bdeb64bfb3
243,574
ipynb
Jupyter Notebook
text/Chapter5.ipynb
aoi7671/tutorial_python
ca8dd870d81861381c2634fd480b256c2d9f668d
[ "MIT" ]
1
2020-10-07T04:25:47.000Z
2020-10-07T04:25:47.000Z
text/Chapter5.ipynb
whale8/tutorial_python2
2e893989d2ff84506fbb4a63f07d9580db1076bd
[ "MIT" ]
null
null
null
text/Chapter5.ipynb
whale8/tutorial_python2
2e893989d2ff84506fbb4a63f07d9580db1076bd
[ "MIT" ]
null
null
null
192.09306
38,808
0.899571
[ [ [ "import warnings\nwarnings.filterwarnings('ignore') # 実行に影響のない warninig を非表示にします. 非推奨.", "_____no_output_____" ] ], [ [ "# Chapter 5: 機械学習 回帰問題", "_____no_output_____" ], [ "## 5-1. 回帰問題を Pythonで解いてみよう\n\n1. データセットの用意\n2. モデル構築", "_____no_output_____" ], [ "### 5-1-1. データセットの用意\n今回はwine-quality datasetを用いる. \nwine-quality dataset はワインのアルコール濃度や品質などの12要素の数値データ. \n赤ワインと白ワイン両方あります。赤ワインの含まれるデータ数は1600ほど. \nまずはデータセットをダウンロードする. \nproxy下ではjupyter notebookに設定をしないと以下は動作しない. ", "_____no_output_____" ] ], [ [ "! wget https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv ./data/winequality-red.csv", "--2019-10-17 14:59:12-- https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv\narchive.ics.uci.edu (archive.ics.uci.edu) をDNSに問いあわせています... 128.195.10.252\narchive.ics.uci.edu (archive.ics.uci.edu)|128.195.10.252|:443 に接続しています... ^C\n" ] ], [ [ "jupyter notebook の設定が面倒な人へ. \nproxyの設定をしたshell、もしくはブラウザなどで以下のURIからダウンロードしてください. \nhttps://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/", "_____no_output_____" ] ], [ [ "import pandas as pd\nwine = pd.read_csv(\"./data/winequality-red.csv\", sep=\";\") # sepは区切り文字の指定\ndisplay(wine.head(5))", "_____no_output_____" ] ], [ [ "まずは説明変数1つで回帰を行ってみよう. 今回はalcoholを目的変数 $t$ に, densityを説明変数 $x$ にする.", "_____no_output_____" ] ], [ [ "X = wine[[\"density\"]].values\nT = wine[\"alcohol\"].values", "_____no_output_____" ] ], [ [ "#### 前処理\nデータを扱いやすいように中心化する.", "_____no_output_____" ] ], [ [ "X = X - X.mean()\nT = T - T.mean()", "_____no_output_____" ] ], [ [ "trainとtestに分割する.", "_____no_output_____" ] ], [ [ "X_train = X[:1000, :]\nT_train = T[:1000]\nX_test = X[1000:, :]\nT_test = T[1000:]", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nfig, axes = plt.subplots(ncols=2, figsize=(12, 4))\n\naxes[0].scatter(X_train, T_train, marker=\".\")\naxes[0].set_title(\"train\")\naxes[1].scatter(X_test, T_test, marker=\".\")\naxes[1].set_title(\"test\")\nfig.show()", "_____no_output_____" ] ], [ [ "train と test の分布がかなり違う. \n予め shuffle して train と test に分割する必要があるようだ. \nXとTの対応関係を崩さず shuffle する方法は多々あるが、その1つが以下.", "_____no_output_____" ] ], [ [ "import numpy as np\n\nnp.random.seed(0) # random の挙動を固定\n\np = np.random.permutation(len(X)) # random な index のリスト\nX = X[p]\nT = T[p]", "_____no_output_____" ], [ "X_train = X[:1000, :]\nT_train = T[:1000]\nX_test = X[1000:, :]\nT_test = T[1000:]", "_____no_output_____" ], [ "fig, axes = plt.subplots(ncols=2, figsize=(12, 4))\n\naxes[0].scatter(X_train, T_train, marker=\".\")\naxes[0].set_title(\"train\")\naxes[1].scatter(X_test, T_test, marker=\".\")\naxes[1].set_title(\"test\")\nfig.show()", "_____no_output_____" ] ], [ [ "### 5-1-2. モデルの構築", "_____no_output_____" ], [ "**今回は**, 目的変数 $t$ を以下の回帰関数で予測する.\n$$y=ax+b$$\nこの時、損失が最小になるように, パラメータ$a,b$を定める必要がある. ここでは二乗損失関数を用いる.\n$$\\mathrm{L}\\left(a, b\\right)\n=\\sum^{N}_{n=1}\\left(t_n - y_n\\right)^2\n=\\sum^{N}_{n=1}\\left(t_n - ax_x-b\\right)^2$$\n\n<span style=\"color: gray; \">※これは, 目的変数 $t$ が上記の回帰関数 $y$ を中心としたガウス分布に従うという仮定を置いて最尤推定することと等価.</span> ", "_____no_output_____" ] ], [ [ "class MyLinearRegression(object):\n def __init__(self):\n \"\"\"\n Initialize a coefficient and an intercept.\n \"\"\"\n self.a = \n self.b = \n \n def fit(self, X, y):\n \"\"\"\n X: data, array-like, shape (n_samples, n_features)\n y: array, shape (n_samples,)\n Estimate a coefficient and an intercept from data.\n \"\"\"\n return self\n \n def predict(self, X):\n \"\"\"\n Calc y from X\n \"\"\"\n return y", "_____no_output_____" ] ], [ [ "上記の単回帰のクラスを完成させ, 以下の実行によって図の回帰直線が得られるはずだ.", "_____no_output_____" ] ], [ [ "clf = MyLinearRegression()\nclf.fit(X_train, T_train)\n# 回帰係数\nprint(\"係数: \", clf.a)\n# 切片\nprint(\"切片: \", clf.b)\n\nfig, axes = plt.subplots(ncols=2, figsize=(12, 4))\n\naxes[0].scatter(X_train, T_train, marker=\".\")\naxes[0].plot(X_train, clf.predict(X_train), color=\"red\")\naxes[0].set_title(\"train\")\n\naxes[1].scatter(X_test, T_test, marker=\".\")\naxes[1].plot(X_test, clf.predict(X_test), color=\"red\")\naxes[1].set_title(\"test\")\nfig.show()", "係数: [-288.60241658]\n切片: 0.03280215630403101\n" ] ], [ [ "もしdatasetをshuffleせずに上記の学習を行った時, 得られる回帰直線はどうなるだろう? \n試してみてください.", "_____no_output_____" ], [ "## 5-2. scikit-learnについて\n### 5-2-1. モジュールの概要\n[scikit-learn](http://scikit-learn.org/stable/)のホームページに詳しい情報がある. \n\n実は scikit-learn に線形回帰のモジュールがすでにある. \n\n#### scikit-learn の特徴\n- scikit-learn(sklearn)には,多くの機械学習アルゴリズムが入っており,統一した形式で書かれているため利用しやすい.\n- 各手法をコードで理解するだけでなく,その元となる論文も紹介されている.\n- チュートリアルやどのように利用するのかをまとめたページもあり,似た手法が列挙されている.", "_____no_output_____" ] ], [ [ "import sklearn\nprint(sklearn.__version__)", "0.21.2\n" ], [ "from sklearn.linear_model import LinearRegression\nclf = LinearRegression()\n\n# 予測モデルを作成\nclf.fit(X_train, T_train)\n \n# 回帰係数\nprint(\"係数: \", clf.coef_)\n \n# 切片\nprint(\"切片: \", clf.intercept_)\n \n# 決定係数\nprint(\"決定係数: \", clf.score(X_train, T_train))\n\nfig, axes = plt.subplots(ncols=2, figsize=(12, 4))\n\naxes[0].scatter(X_train, T_train, marker=\".\")\naxes[0].plot(X_train, clf.predict(X_train), color=\"red\")\naxes[0].set_title(\"train\")\n\naxes[1].scatter(X_test, T_test, marker=\".\")\naxes[1].plot(X_test, clf.predict(X_test), color=\"red\")\naxes[1].set_title(\"test\")\nfig.show()", "係数: [-288.60241658]\n切片: 0.03280215630403101\n決定係数: 0.24728357072544427\n" ] ], [ [ "自分のコードと同じ結果が出ただろうか? \nまた, データを shuffle せず得られた回帰直線のスコアと, shuffleした時の回帰直線のスコアの比較もしてみよう.", "_____no_output_____" ], [ "scikit-learn の linear regression のコードは [github][1] で公開されている. \nコーディングの参考になると思うので眺めてみるといいだろう. \n\n### 5-2-2. 回帰モデルの評価 \n性能を測るといっても,その目的によって指標を変える必要がある. \nどのような問題で,どのような指標を用いることが一般的か?という問いに対しては,先行研究を確認することを勧める. \nまた,指標それぞれの特性(数学的な意味)を知っていることもその役に立つだろう.\n[参考][2] \n\n回帰モデルの評価に用いられる指標は一般にMAE, MSE, 決定係数などが存在する.\n\n1. MAE\n2. MSE\n3. 決定係数\n\nscikit-learn はこれらの計算をするモジュールも用意されている.\n\n[1]:https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/linear_model/base.py#L367\n[2]:https://scikit-learn.org/stable/modules/model_evaluation.html", "_____no_output_____" ] ], [ [ "from sklearn import metrics\n\nT_pred = clf.predict(X_test)\nprint(\"MAE: \", metrics.mean_absolute_error(T_test, T_pred))\nprint(\"MSE: \", metrics.mean_squared_error(T_test, T_pred))\nprint(\"決定係数: \", metrics.r2_score(T_test, T_pred))", "MAE: 0.7218845932275127\nMSE: 0.7810900781645986\n決定係数: 0.23604463330510206\n" ] ], [ [ "### 5-2-3. scikit-learn の他モデルを使ってみよう", "_____no_output_____" ] ], [ [ "# 1. データセットを用意する\nfrom sklearn import datasets\niris = datasets.load_iris() # ここではIrisデータセットを読み込む\nprint(iris.data[0], iris.target[0]) # 1番目のサンプルのデータとラベル", "[5.1 3.5 1.4 0.2] 0\n" ], [ "# 2.学習用データとテスト用データに分割する\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target)\n\n# 3. 線形SVMという手法を用いて分類する\nfrom sklearn.svm import SVC, LinearSVC\nclf = LinearSVC()\nclf.fit(X_train, y_train) # 学習", "_____no_output_____" ], [ "# 4. 分類器の性能を測る\ny_pred = clf.predict(X_test) # 予測\nprint(metrics.classification_report(y_true=y_test, y_pred=y_pred)) # 予測結果の評価", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 10\n 1 1.00 0.92 0.96 12\n 2 0.94 1.00 0.97 16\n\n accuracy 0.97 38\n macro avg 0.98 0.97 0.98 38\nweighted avg 0.98 0.97 0.97 38\n\n" ] ], [ [ "### 5-2-4. 分類モデルの評価\n\n分類問題に対する指標について考えてみよう.一般的な指標だけでも以下の4つがある.\n1. 正解率(accuracy)\n2. 精度(precision)\n3. 再現率(recall)\n4. F値(F1-score)\n\n(精度,再現率,F値にはmacro, micro, weightedなどがある)\n\n今回の実験でのそれぞれの値を見てみよう.", "_____no_output_____" ] ], [ [ "print('accuracy: ', metrics.accuracy_score(y_test, y_pred))\nprint('precision:', metrics.precision_score(y_test, y_pred, average='macro'))\nprint('recall: ', metrics.recall_score(y_test, y_pred, average='macro'))\nprint('F1 score: ', metrics.f1_score(y_test, y_pred, average='macro'))", "accuracy: 0.9736842105263158\nprecision: 0.9803921568627452\nrecall: 0.9722222222222222\nF1 score: 0.9754062362758015\n" ] ], [ [ "## 5-3. 問題に合わせたコーディング", "_____no_output_____" ], [ "### 5-3-1. Irisデータの可視化\nIrisデータは4次元だったので,直接可視化することはできない. \n4次元のデータをPCAによって圧縮して,2次元にし可視化する.", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import PCA\nfrom sklearn import datasets\niris = datasets.load_iris()\n\npca = PCA(n_components=2)\nX, y = iris.data, iris.target\nX_pca = pca.fit_transform(X) # 次元圧縮\nprint(X_pca.shape)", "(150, 2)\n" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y);", "_____no_output_____" ], [ "# 次元圧縮したデータを用いて分類してみる\nX_train, X_test, y_train, y_test = train_test_split(X_pca, iris.target)\nclf = LinearSVC()\nclf.fit(X_train, y_train)\ny_pred2 = clf.predict(X_test)", "_____no_output_____" ], [ "from sklearn import metrics\nprint(metrics.classification_report(y_true=y_test, y_pred=y_pred2)) # 予測結果の評価", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 9\n 1 1.00 0.87 0.93 15\n 2 0.88 1.00 0.93 14\n\n accuracy 0.95 38\n macro avg 0.96 0.96 0.95 38\nweighted avg 0.95 0.95 0.95 38\n\n" ] ], [ [ "### 5-3-2. テキストに対する処理\n\n#### テキストから特徴量を設計\nテキストのカウントベクトルを作成し,TF-IDFを用いて特徴ベクトルを作る. \nいくつかの設計ができるが,例題としてこの手法を用いる.\n\nここでは,20newsgroupsというデータセットを利用する.", "_____no_output_____" ] ], [ [ "from sklearn.datasets import fetch_20newsgroups\ncategories = ['alt.atheism', 'soc.religion.christian','comp.graphics', 'sci.med']\n\nnews_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)", "Downloading 20news dataset. This may take a few minutes.\nDownloading dataset from https://ndownloader.figshare.com/files/5975967 (14 MB)\n" ], [ "from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\ncount_vec = CountVectorizer()\nX_train_counts = count_vec.fit_transform(news_train.data)", "_____no_output_____" ], [ "tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)\nX_train_tf = tf_transformer.transform(X_train_counts)", "_____no_output_____" ] ], [ [ "#### Naive Bayseによる学習", "_____no_output_____" ] ], [ [ "from sklearn.naive_bayes import MultinomialNB\nclf = MultinomialNB().fit(X_train_tf, news_train.target)", "_____no_output_____" ], [ "docs = [\"God is love.\", \"I study about Computer Science.\"]\nX_test_counts = count_vec.transform(docs)\nX_test_tf = tf_transformer.transform(X_test_counts)\npreds = clf.predict(X_test_tf)\nfor d, label_id in zip(docs, preds):\n print(\"{} -> {}\".format(d, news_train.target_names[label_id]))", "God is love. -> soc.religion.christian\nI study about Computer Science. -> sci.med\n" ] ], [ [ "このように文に対して,categoriesのうちのどれに対応するかを出力する学習器を作ることができた. \nこの技術を応用することで,ある文がポジティブかネガティブか,スパムか否かなど自然言語の文に対する分類問題を解くことができる. \n\n### 5-3-3. Pipelineによる結合", "_____no_output_____" ] ], [ [ "from sklearn.pipeline import Pipeline\n\ntext_clf = Pipeline([('countvec', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MultinomialNB())])\n\ntext_clf.fit(news_train.data, news_train.target)", "_____no_output_____" ], [ "for d, label_id in zip(docs, text_clf.predict(docs)):\n print(\"{} -> {}\".format(d, news_train.target_names[label_id]))", "God is love. -> soc.religion.christian\nI study about Computer Science. -> sci.med\n" ] ], [ [ "## 5.4 scikit-learn 準拠コーディング", "_____no_output_____" ], [ "scikit-learn 準拠でコーディングするメリットは多数存在する.\n1. scikit-learn の用意するgrid search や cross validation を使える.\n2. 既存のscikit-learn の他手法と入れ替えが容易になる.\n3. 他の人にみてもらいやすい。使ってもらいやすい. \n4. <span style=\"color: gray; \">本家のコミッターになれるかも?</span>\n\n詳しくは [Developer’s Guide][1] に書いてある.\n\n[1]:https://scikit-learn.org/stable/developers/#rolling-your-own-estimator", "_____no_output_____" ], [ "scikit-learn ではモデルは以下の4つのタイプに分類されている.\n\n- Classifer\n - Naive Bayes Classifer などの分類モデル\n- Clusterring\n - K-mearns 等のクラスタリングモデル\n- Regressor\n - Lasso, Ridge などの回帰モデル\n- Transformer\n - PCA などの変数の変換モデル\n\n***準拠コーディングでやるべきことは、***\n\n- sklearn.base.BaseEstimatorを継承する\n- 上記タイプに応じたMixinを多重継承する \n\n(予測モデルの場合)\n- fitメソッドを実装する\n - initでパラメータをいじる操作を入れるとgrid searchが動かなくなる(後述)\n- predictメソッドを実装する\n \n### 5-4-1. リッジ回帰のscikit-learn 準拠コーディング\n\n試しに今までにコーディングした MyLinearRegression を改造し, scikit-learn 準拠にコーディングし直してみよう. \nついでにリッジ回帰の選択ができるようにもしてみよう.", "_____no_output_____" ] ], [ [ "from sklearn.base import BaseEstimator, RegressorMixin\nfrom sklearn.utils.validation import check_X_y, check_is_fitted, check_array", "_____no_output_____" ] ], [ [ "回帰なので BaseEstimator と RegressorMixin の継承をする. \nさらにリッジ回帰のオプションも追加するため, initにハイパーパラメータも追加する. \n入力のshapeやdtypeを整えるために```check_X_y```や```check_array```を用いる(推奨).", "_____no_output_____" ] ], [ [ "class MyLinearRegression(BaseEstimator, RegressorMixin):\n def __init__(self, lam = 0):\n \"\"\"\n Initialize a coefficient and an intercept.\n \"\"\"\n self.a = \n self.b = \n self.lam = lam\n \n def fit(self, X, y):\n \"\"\"\n X: array-like, shape (n_samples, n_features)\n y: array, shape (n_samples,)\n Estimate a coefficient and an intercept from data.\n \"\"\"\n X, y = check_X_y(X, y, y_numeric=True)\n if self.lam != 0:\n pass\n else:\n pass\n \n self.a_ = \n self.b_ = \n return self\n \n def predict(self, X):\n \"\"\"\n Calc y from X\n \"\"\"\n check_is_fitted(self, \"a_\", \"b_\") # 学習済みかチェックする(推奨)\n X = check_array(X)\n return y", "_____no_output_____" ] ], [ [ "***制約***\n\n- initで宣言する変数に全て初期値を定める\n- また引数の変数名とクラス内の変数名は一致させる\n- initにデータは与えない。データの加工なども(必要なら)fit内で行う\n- データから推定された値はアンダースコアをつけて区別する. 今回なら、a_と b_をfit関数内で新しく定義する.\n- アンダースコアで終わる変数をinit内では宣言しないこと.\n- init内で引数の確認, 加工をしてはいけない. 例えば```self.lam=2*lam```などをするとgrid searchができなくなる. [参考][1]\n\n> As model_selection.GridSearchCV uses set_params to apply parameter setting to estimators, it is essential that calling set_params has the same effect as setting parameters using the __init__ method. The easiest and recommended way to accomplish this is to not do any parameter validation in __init__. All logic behind estimator parameters, like translating string arguments into functions, should be done in fit.\n\n[github][2]のコードをお手本にしてみるのもいいだろう.\n\n[1]:https://scikit-learn.org/stable/developers/contributing.html#coding-guidelines\n[2]:https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/linear_model/base.py#L367\n\n### 5-4-2. scikit-learn 準拠かどうか確認\n\n自作のコードがちゃんとscikit-learn準拠かどうか確かめるには以下を実行する.", "_____no_output_____" ] ], [ [ "from sklearn.utils.estimator_checks import check_estimator\ncheck_estimator(MyLinearRegression)", "_____no_output_____" ] ], [ [ "問題があれば指摘してくれるはずだ. なお上記を必ずパスする必要はない. \n\n#### Grid Search\n準拠モデルを作ったなら, ハイパーパラメータの決定をscikit-learnでやってみよう.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.model_selection import GridSearchCV\n\nnp.random.seed(0)\n\n# Grid search\nparameters = {'lam':np.exp([i for i in range(-30,1)])}\nreg = GridSearchCV(MyLinearRegression(),parameters,cv=5)\nreg.fit(X_train,T_train)\nbest = reg.best_estimator_\n\n# 決定係数\nprint(\"決定係数: \", best.score(X_train, T_train)) # BaseEstimatorを継承しているため使える\n# lambda\nprint(\"lam: \", best.lam)\n\nfig, axes = plt.subplots(ncols=2, figsize=(12, 4))\n\naxes[0].scatter(X_train, T_train, marker=\".\")\naxes[0].plot(X_train, best.predict(X_train), color=\"red\")\naxes[0].set_title(\"train\")\n\naxes[1].scatter(X_test, T_test, marker=\".\")\naxes[1].plot(X_test, best.predict(X_test), color=\"red\")\naxes[1].set_title(\"test\")\nfig.show()", "決定係数: 0.24638055627114677\nlam: 1.670170079024566e-05\n" ] ], [ [ "## [練習問題](./../exercise/questions.md#chapter-5)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cbc624a660e43f348831ce5c8eb1ac837ec30752
42,080
ipynb
Jupyter Notebook
Fletcher_Reeves, DFP, BFGS.ipynb
wutienyang/Optimal-Design-
1e8ad830a374e0ec9c4f9bba1bafae312f821a91
[ "BSD-3-Clause" ]
null
null
null
Fletcher_Reeves, DFP, BFGS.ipynb
wutienyang/Optimal-Design-
1e8ad830a374e0ec9c4f9bba1bafae312f821a91
[ "BSD-3-Clause" ]
null
null
null
Fletcher_Reeves, DFP, BFGS.ipynb
wutienyang/Optimal-Design-
1e8ad830a374e0ec9c4f9bba1bafae312f821a91
[ "BSD-3-Clause" ]
null
null
null
97.407407
30,550
0.824144
[ [ [ "from IPython.display import Image\nimport sympy as sp\nimport math\nimport numpy as np\nimport datetime", "_____no_output_____" ], [ "Image(filename='/Users/wy/Desktop/beales_function.png')", "_____no_output_____" ], [ "class GoldSearch(object):\n\n def __init__(self):\n self.l = 10**-5\n self.alpha = (math.sqrt(5)-1)/2.\n\n def g_lambda(self, a, b):\n return a+(1-self.alpha)*(b-a)\n\n def g_mu(self, a, b):\n return a+self.alpha*(b-a)\n\n def goldSearch(self, a, b,lambda_k,mu_k,function,k = 1):\n # step1\n if (b - a) < self.l:\n return (a+b)/2.\n\n if function(lambda_k) > function(mu_k):\n # step2\n a = lambda_k\n b = b\n lambda_k = mu_k\n mu_k = self.g_mu(a,b)\n k = k+1\n return self.goldSearch(a,b,lambda_k,mu_k,function,k)\n\n elif function(lambda_k) <= function(mu_k):\n # step3\n a = a\n b = mu_k\n mu_k = lambda_k\n lambda_k = self.g_lambda(a,b)\n k = k+1\n return self.goldSearch(a,b,lambda_k,mu_k,function,k)\nGoldSearch = GoldSearch()", "_____no_output_____" ], [ "def gradient(f):\n return [sp.lambdify((x1,x2), f.diff(x, 1), 'numpy') for x in [x1,x2]]", "_____no_output_____" ] ], [ [ "# Fletcher_Reeves\n初始點 (1,1) \nGoldSearch interval -5 ~ 5 \ne = 10**-5 \nnumber of iterations : 24 \nrun time : 0.91s", "_____no_output_____" ] ], [ [ "def Fletcher_Reeves(f,xj):\n lambda_j = sp.symbols('lambda_j')\n e = 10**-5\n sj = np.array(map(lambda fun : fun( xj[0],xj[1] ),gradient(f)))*(-1)\n i = 1\n while np.linalg.norm(sj) > e:\n i = i+1\n tmp = xj+lambda_j*sj\n new_f = f.subs([(x1,tmp[0]),(x2,tmp[1])])\n lambdaJ = GoldSearch.goldSearch(a,b,GoldSearch.g_lambda(a,b),GoldSearch.g_mu(a,b),sp.lambdify(lambda_j , new_f))\n xj_1 = xj+lambdaJ*sj\n sj_1 = np.array(map(lambda fun : fun( xj_1[0],xj_1[1] ),gradient(f)))*(-1)\n beta_j = np.dot(sj_1.T,sj_1)/np.dot(sj.T,sj)\n sj_1 = sj_1+beta_j*sj\n sj = sj_1\n xj = xj_1\n return xj_1,i", "_____no_output_____" ], [ "a = -5\nb = 5\nx1,x2 = sp.symbols('x1,x2')\nf = (1.5-x1*(1-x2))**2 + (2.25-x1*(1-x2**2))**2 + (2.625-x1*(1-x2**3))**2\n# 初始點\nxj = np.array([1,1])\nstart = datetime.datetime.now()\nxj_1,i = Fletcher_Reeves(f,xj)\nend = datetime.datetime.now()\nprint xj_1\nprint i\nprint end - start", "[ 3.00000314 0.50000078]\n24\n0:00:00.916073\n" ] ], [ [ "# DFP\n初始點 (1,1) \nGoldSearch interval -5 ~ 5 \ne = 10**-5 \nnumber of iterations : 8 \nrun time : 0.34s", "_____no_output_____" ] ], [ [ "def DFP(f,xi):\n lambda_i = sp.symbols('lambda_i')\n e = 10**-3\n gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)\n Bi = np.identity(2)\n i = 0\n while abs(np.linalg.norm(gradient_f)) > e:\n i = i+1\n si = (np.dot(Bi,gradient_f)*(-1)).reshape(1,2)[0]\n tmp = xi+lambda_i*si\n new_f = f.subs([(x1,tmp[0]),(x2,tmp[1])])\n lambdaI = GoldSearch.goldSearch(a,b,GoldSearch.g_lambda(a,b),GoldSearch.g_mu(a,b),sp.lambdify(lambda_i , new_f))\n xi_1 = xi+lambdaI*si\n gradient_f_1 = (np.array(map(lambda fun : fun( xi_1[0],xi_1[1] ),gradient(f)))).reshape(2,1)\n if abs(np.linalg.norm(gradient_f_1)) > e:\n gi = (gradient_f_1 - gradient_f).reshape(1,2)[0]\n Mi = (np.dot(si.reshape(2,1),si.reshape(2,1).T))*lambdaI/np.dot(si.T,gi)\n Ni = np.dot(np.dot(Bi,gi).reshape(2,1),np.dot(Bi,gi).T.reshape(1,2))*(-1)/np.dot(np.dot(gi.T,Bi),gi)\n Bi = Bi+Mi+Ni\n xi = xi_1\n gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)\n else:\n return xi_1,i", "_____no_output_____" ], [ "a = -5\nb = 5\nx1,x2 = sp.symbols('x1,x2')\nf = (1.5-x1*(1-x2))**2 + (2.25-x1*(1-x2**2))**2 + (2.625-x1*(1-x2**3))**2\nxi = np.array([1,1])\n\nstart = datetime.datetime.now()\nxi_1,i = DFP(f,xi)\nend = datetime.datetime.now()\nprint xi_1\nprint i\nprint end - start", "[ 3.00002223 0.49998904]\n8\n0:00:00.343704\n" ] ], [ [ "# BFGS\n初始點 (1,1) \nGoldSearch interval -5 ~ 5 \ne = 10**-5 \nnumber of iterations : 8 \nrun time : 0.38s", "_____no_output_____" ] ], [ [ "def BFGS(f,xi):\n lambda_i = sp.symbols('lambda_i')\n e = 10**-3\n gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)\n Bi = np.identity(2)\n i = 0\n while abs(np.linalg.norm(gradient_f)) > e:\n i = i+1\n si = (np.dot(Bi,gradient_f)*(-1)).reshape(1,2)[0]\n tmp = xi+lambda_i*si\n new_f = f.subs([(x1,tmp[0]),(x2,tmp[1])])\n lambdaI = GoldSearch.goldSearch(a,b,GoldSearch.g_lambda(a,b),GoldSearch.g_mu(a,b),sp.lambdify(lambda_i , new_f))\n xi_1 = xi+lambdaI*si\n gradient_f_1 = (np.array(map(lambda fun : fun( xi_1[0],xi_1[1] ),gradient(f)))).reshape(2,1)\n if abs(np.linalg.norm(gradient_f_1)) > e:\n gi = (gradient_f_1 - gradient_f).reshape(1,2)[0]\n di = xi_1-xi\n Mi = ((1 + np.dot(np.dot(gi.T,Bi),gi)/np.dot(di.T,gi))*np.dot(di.reshape(2,1),di.reshape(1,2)))/np.dot(di.T,gi)\n Ni = np.dot(np.dot(di.reshape(2,1),gi.reshape(1,2)),Bi)*(-1)/np.dot(di.T,gi)\n Qi = np.dot(np.dot(Bi,gi).reshape(2,1),di.reshape(1,2))*(-1)/np.dot(di.T,gi)\n Bi = Bi+Mi+Ni+Qi\n xi = xi_1\n gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)\n else:\n return xi_1,i", "_____no_output_____" ], [ "a = -5\nb = 5\nx1,x2 = sp.symbols('x1,x2')\nf = (1.5-x1*(1-x2))**2 + (2.25-x1*(1-x2**2))**2 + (2.625-x1*(1-x2**3))**2\nxi = np.array([1,1])\n\nstart = datetime.datetime.now()\nxi_1,i = BFGS(f,xi)\nend = datetime.datetime.now()\nprint xi_1\nprint i\nprint end - start", "[ 3.00002222 0.49998909]\n8\n0:00:00.389444\n" ], [ "from scipy.optimize import fmin\n\ndef fun(X):\n return (1.5-X[0]*(1-X[1]))**2 + (2.25-X[0]*(1-X[1]**2))**2 + (2.625-X[0]*(1-X[1]**3))**2\n\nfmin(fun,np.array([1,1]))", "Optimization terminated successfully.\n Current function value: 0.000000\n Iterations: 56\n Function evaluations: 107\n" ] ], [ [ "# scipy python做科學計算的lib\n出處 : http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.html#scipy.optimize.fmin \nMinimize a function using the downhill simplex algorithm. \nThis algorithm only uses function values, not derivatives or second derivatives. ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
cbc627ec9296ac3fe6a77ed328f8970349147579
4,731
ipynb
Jupyter Notebook
POPXI.ipynb
shinkuan/PopXi
7f91945272320fc624c52dacba1d41ecfb7ddcc3
[ "MIT" ]
null
null
null
POPXI.ipynb
shinkuan/PopXi
7f91945272320fc624c52dacba1d41ecfb7ddcc3
[ "MIT" ]
null
null
null
POPXI.ipynb
shinkuan/PopXi
7f91945272320fc624c52dacba1d41ecfb7ddcc3
[ "MIT" ]
null
null
null
30.326923
189
0.525893
[ [ [ "import json\nimport requests\nimport threading\nimport time\nimport os\nfrom seleniumwire import webdriver\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType", "_____no_output_____" ], [ "def interceptor(request):\n if request.url.startswith('https://us-central1-popxi-f3a4d.cloudfunctions.net/stats?count='):\n params = request.params\n params['count'] = '5000'\n request.params = params\n print('Popping...')", "_____no_output_____" ], [ "def initBrowser(proxy = None):\n options = webdriver.ChromeOptions()\n options.add_argument('ignore-certificate-errors')\n #options.add_argument('headless')\n options.add_argument('window-size=1920x1080')\n #options.add_argument(\"disable-gpu\")\n options.add_argument(\"--mute-audio\")\n #options.add_argument(\"--disable-gpu\")\n #seleniumwire_options = {\n # 'enable_har': True # Capture HAR data, retrieve with driver.har\n #}\n #driver = webdriver.Chrome('chromedriver', options=options, seleniumwire_options=seleniumwire_options)\n if proxy is not None:\n chrome_options.add_argument('--proxy-server=%s' % PROXY)\n \n driver = webdriver.Chrome('chromedriver', options=options)\n driver.request_interceptor = interceptor\n #driver.scopes = [\n # '.*www.google.com/*',\n # '.*us-central1-popxi-f3a4d.cloudfunctions.net/stats*.*'\n #]\n driver.get('https://popxi.click/')\n driver.execute_script('var event=new KeyboardEvent(\"keydown\",{key:\"g\",ctrlKey:!0});setInterval(function(){for(i=0;i<1;i++)document.dispatchEvent(event)},200);')\n return driver", "_____no_output_____" ], [ "def getRequests(driver):\n get = False\n while not get:\n for req in driver.requests:\n if req.url.startswith('https://us-central1-popxi-f3a4d.cloudfunctions.net/stats?count='):\n try:\n print('Response: ' + str(req.response.status_code))\n except:\n print('Response: None')\n print('Deleting cookies...')\n driver.delete_all_cookies()\n del driver.requests\n driver.execute_script(\"window.open('https://popxi.click/');\")\n driver.switch_to.window(driver.window_handles[0])\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n print('Deleted.')\n time.sleep(2)\n driver.execute_script('var event=new KeyboardEvent(\"keydown\",{key:\"g\",ctrlKey:!0});setInterval(function(){for(i=0;i<1;i++)document.dispatchEvent(event)},200);')\n time.sleep(1)", "_____no_output_____" ], [ "def Run(proxy = None):\n print(\"Starting browser...\")\n driver = initBrowser(proxy)\n print(\"Browser Started.\")\n print(\"Fetching requests...\")\n getRequests(driver)", "_____no_output_____" ], [ "Run()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cbc62a9647f119ecbe62a5495b377cedc4ed5e81
23,153
ipynb
Jupyter Notebook
matrix_two/day1_meta.ipynb
Kom8in4toR/dw_matrix
dd8f6707f465def271c2840099b545cc28bbe955
[ "MIT" ]
null
null
null
matrix_two/day1_meta.ipynb
Kom8in4toR/dw_matrix
dd8f6707f465def271c2840099b545cc28bbe955
[ "MIT" ]
null
null
null
matrix_two/day1_meta.ipynb
Kom8in4toR/dw_matrix
dd8f6707f465def271c2840099b545cc28bbe955
[ "MIT" ]
null
null
null
23,153
23,153
0.532026
[ [ [ "cd \"/content/drive/My Drive/Colab Notebooks/\"", "/content/drive/My Drive/Colab Notebooks\n" ], [ "ls\n", " day1_meta.ipynb 'day5 (1).ipynb' \u001b[0m\u001b[01;34mdw_matrix\u001b[0m/\n day4_meta.ipynb day5.ipynb Untitled0.ipynb\n" ], [ "!mkdir -p \"dw_matrix2\"", "_____no_output_____" ], [ "ls\n", " day1_meta.ipynb 'day5 (1).ipynb' \u001b[0m\u001b[01;34mdw_matrix\u001b[0m/ Untitled0.ipynb\n day4_meta.ipynb day5.ipynb \u001b[01;34mdw_matrix2\u001b[0m/\n" ], [ "cd \"dw_matrix2\"", "/content/drive/My Drive/Colab Notebooks/dw_matrix2\n" ], [ "ls", "car.h5\n" ], [ "!curl -L http://bit.ly/dw_car_data -o car.h5", " % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 152 100 152 0 0 2235 0 --:--:-- --:--:-- --:--:-- 2235\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n100 57.7M 100 57.7M 0 0 24.0M 0 0:00:02 0:00:02 --:--:-- 32.8M\n" ], [ "cd ", "\u001b[0m\u001b[01;34mdrive\u001b[0m/ \u001b[01;34msample_data\u001b[0m/\n" ], [ "import pandas as pd\n", "_____no_output_____" ], [ "df = pd.read_hdf(\"car.h5\")\ndf.shape", "_____no_output_____" ], [ "df.sample(5)", "_____no_output_____" ], [ "cd dw_matrix2/", "/content/drive/My Drive/Colab Notebooks/dw_matrix2\n" ], [ "!git add day1_meta.ipynb", "fatal: not a git repository (or any parent up to mount point /content)\nStopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set).\n" ], [ "!git commit -m \"Lets start the second stage\"", "[master cc49e64] Lets start the second stage\n 1 file changed, 1 insertion(+)\n create mode 100644 matrix_two/day1_meta.ipynb\n" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cbc6368fb5af87d0abe68f759ffcd6dd599369fe
54,653
ipynb
Jupyter Notebook
Logistic_Regression.ipynb
tonychang04/Sarcastic-Headlines-Detector
ed3bf85a8aba6a514b34d029831112226df4774d
[ "MIT" ]
null
null
null
Logistic_Regression.ipynb
tonychang04/Sarcastic-Headlines-Detector
ed3bf85a8aba6a514b34d029831112226df4774d
[ "MIT" ]
null
null
null
Logistic_Regression.ipynb
tonychang04/Sarcastic-Headlines-Detector
ed3bf85a8aba6a514b34d029831112226df4774d
[ "MIT" ]
1
2021-08-25T08:46:27.000Z
2021-08-25T08:46:27.000Z
85.797488
15,686
0.712239
[ [ [ "<a href=\"https://colab.research.google.com/github/tonychang04/Sarcastic-Headlines-Detector/blob/main/Logistic_Regression.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## Logistic Regression Model\n## Accuracy: 86%", "_____no_output_____" ] ], [ [ "from google.colab import files\nfrom google.colab import drive\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport string\n\nimport nltk\nfrom nltk.corpus import stopwords\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\n#drive.mount('/content/drive')\nnltk.download('stopwords')\n\n\n\n", "[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n" ], [ "#/content/Sarcasm_Headlines_Dataset.json\ndf1 = pd.read_json('/Sarcasm_Headlines_Dataset.json', lines = True)\ndf2 = pd.read_json('/Sarcasm_Headlines_Dataset_v2.json', lines = True)\nframes = [df1, df2]\ndf = pd.concat(frames) # merged two json files into 1 dataframe file\ndf.head(10)", "_____no_output_____" ], [ "# Some data visualizations...\nones = len(df[df['is_sarcastic'] == 1])\nzeros = len(df[df['is_sarcastic'] == 0])\noutput = ['0','1']\nplt.bar(output[0], [zeros])\nplt.bar(output[1], [ones])\nplt.legend(output)\nplt.xlabel('Sarcastic(1) or Not Sarcastic(0)')\nplt.ylabel('Number of Headlines')\nplt.title('Number of Sarcastic/Non-Sarcastic headlines')", "_____no_output_____" ], [ "print(stopwords.words('english'))", "['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', \"you're\", \"you've\", \"you'll\", \"you'd\", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', \"she's\", 'her', 'hers', 'herself', 'it', \"it's\", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', \"that'll\", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', \"don't\", 'should', \"should've\", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', \"aren't\", 'couldn', \"couldn't\", 'didn', \"didn't\", 'doesn', \"doesn't\", 'hadn', \"hadn't\", 'hasn', \"hasn't\", 'haven', \"haven't\", 'isn', \"isn't\", 'ma', 'mightn', \"mightn't\", 'mustn', \"mustn't\", 'needn', \"needn't\", 'shan', \"shan't\", 'shouldn', \"shouldn't\", 'wasn', \"wasn't\", 'weren', \"weren't\", 'won', \"won't\", 'wouldn', \"wouldn't\"]\n" ], [ "# some other visualizations... (average length of the headlines - sarcastic && nonsarcastic)\n# takes a while to get the valuse, just use the numbers below to make the graph\ndef find_average_length(df, len_sar, len_non_sar):\n for i in range(len(df)):\n if int(df[['is_sarcastic']].iloc[i]) == 0:\n len_non_sar += int(df[['headline']].iloc[i].str.len())\n else:\n len_sar += int(df[['headline']].iloc[i].str.len())\n sarcastic = len_sar / ones\n non_sarcastic = len_non_sar / zeros\n return sarcastic, non_sarcastic\n\nsarcastic, non_sarcastic = find_average_length(df, 0, 0)\n\n#sarcastic = 64.08620553671425\n#non_sarcastic = 59.55862529195863", "_____no_output_____" ], [ "# run it as needed\nsarcastic = 64.08620553671425\nnon_sarcastic = 59.55862529195863", "_____no_output_____" ], [ "fig = plt.figure()\nax = fig.add_subplot(111)\nlabels = ['sarcastic','non-sarcastic']\nvalues = [sarcastic, non_sarcastic]\nplt.bar(labels[0], values[0], color=(0.2, 0.4, 0.6, 0.6))\nplt.bar(labels[1], values[1], color=(0.3, 0.8, 0.7, 0.6))\n\nfor i, v in enumerate(values):\n ax.text(i, v+1, \"%d\" %v, ha=\"center\")\nplt.ylim(0, 75)\n\nplt.legend(labels)\nplt.xlabel('Sarcastic or Non-sarcastic')\nplt.ylabel('Average number of characters')\nplt.title('Average length of the headlines')", "_____no_output_____" ], [ "# Removing Stop words\n\ndef text_process_for_ML(mess):\n nopunc = [char for char in mess if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n #print(nopunc)\n \n #print(no_stop_words)\n return [word for word in nopunc.split() if word.lower() not in \n stopwords.words('english')]\n\n\ndef text_process(mess):\n nopunc = [char for char in mess if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n #print(nopunc)\n no_stop_words = [word for word in nopunc.split() if word.lower() not in \n stopwords.words('english')]\n #print(no_stop_words)\n return ' '.join(no_stop_words)\n\ndf['processed_headline'] = df['headline'].apply(text_process)\ndf.head()\n\n\n", "_____no_output_____" ], [ "#logistic regression model\n# https://www.kaggle.com/mrudhuhas/text-classification-spacy/execution\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\n\nX_train_2, X_test_2, Y_train_2, Y_test_2 = train_test_split(df['processed_headline'], df['is_sarcastic'], test_size=0.33, random_state=42)\n\n#train the model\nclassifier_lr = Pipeline([('tfidf',TfidfVectorizer()), ('clf',LogisticRegression(solver='saga'))])\nclassifier_lr.fit(X_train_2,Y_train_2)", "_____no_output_____" ], [ "#Predicting \ny_pred = classifier_lr.predict(X_test_2)\nyt_pred = classifier_lr.predict(X_train_2)\n\n#Analyzing\nfrom sklearn.metrics import accuracy_score\ncm = confusion_matrix(Y_test_2,y_pred)\nprint(f'Confusion Matrix :\\n {cm}\\n')\nprint(f'Test Set Accuracy Score :\\n {accuracy_score(Y_test_2,y_pred)}\\n')\nprint(f'Train Set Accuracy Score :\\n {accuracy_score(Y_train_2,yt_pred)}\\n')\nprint(f'Classification Report :\\n {classification_report(Y_test_2,y_pred)}')", "Confusion Matrix :\n [[8836 1007]\n [1605 6811]]\n\nTest Set Accuracy Score :\n 0.8569472588860289\n\nTrain Set Accuracy Score :\n 0.9195284469502819\n\nClassification Report :\n precision recall f1-score support\n\n 0 0.85 0.90 0.87 9843\n 1 0.87 0.81 0.84 8416\n\n accuracy 0.86 18259\n macro avg 0.86 0.85 0.86 18259\nweighted avg 0.86 0.86 0.86 18259\n\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc63df85c1833093aab27c0a8b847519a6e7711
122,383
ipynb
Jupyter Notebook
intro-neural-networks/student-admissions/StudentAdmissions.ipynb
Hossam-Tarek/deep-learning-v2-pytorch
6af3950aec12fcd42b5c0f94671c1d3a17e66f7d
[ "MIT" ]
null
null
null
intro-neural-networks/student-admissions/StudentAdmissions.ipynb
Hossam-Tarek/deep-learning-v2-pytorch
6af3950aec12fcd42b5c0f94671c1d3a17e66f7d
[ "MIT" ]
null
null
null
intro-neural-networks/student-admissions/StudentAdmissions.ipynb
Hossam-Tarek/deep-learning-v2-pytorch
6af3950aec12fcd42b5c0f94671c1d3a17e66f7d
[ "MIT" ]
null
null
null
126.428719
27,808
0.831488
[ [ [ " # Predicting Student Admissions with Neural Networks\nIn this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:\n- GRE Scores (Test)\n- GPA Scores (Grades)\n- Class rank (1-4)\n\nThe dataset originally came from here: http://www.ats.ucla.edu/\n\n## Loading the data\nTo load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:\n- https://pandas.pydata.org/pandas-docs/stable/\n- https://docs.scipy.org/", "_____no_output_____" ] ], [ [ "# Importing pandas and numpy\nimport pandas as pd\nimport numpy as np\n\n# Reading the csv file into a pandas DataFrame\ndata = pd.read_csv('student_data.csv')\n\n# Printing out the first 10 rows of our data\ndata[:10]", "_____no_output_____" ] ], [ [ "## Plotting the data\n\nFirst let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.", "_____no_output_____" ] ], [ [ "# Importing matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Function to help us plot\ndef plot_points(data):\n X = np.array(data[[\"gre\",\"gpa\"]])\n y = np.array(data[\"admit\"])\n admitted = X[np.argwhere(y==1)]\n rejected = X[np.argwhere(y==0)]\n plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')\n plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')\n plt.xlabel('Test (GRE)')\n plt.ylabel('Grades (GPA)')\n \n# Plotting the points\nplot_points(data)\nplt.show()", "_____no_output_____" ] ], [ [ "Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.", "_____no_output_____" ] ], [ [ "# Separating the ranks\ndata_rank1 = data[data[\"rank\"]==1]\ndata_rank2 = data[data[\"rank\"]==2]\ndata_rank3 = data[data[\"rank\"]==3]\ndata_rank4 = data[data[\"rank\"]==4]\n\n# Plotting the graphs\nplot_points(data_rank1)\nplt.title(\"Rank 1\")\nplt.show()\nplot_points(data_rank2)\nplt.title(\"Rank 2\")\nplt.show()\nplot_points(data_rank3)\nplt.title(\"Rank 3\")\nplt.show()\nplot_points(data_rank4)\nplt.title(\"Rank 4\")\nplt.show()", "_____no_output_____" ] ], [ [ "This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.\n\n## TODO: One-hot encoding the rank\nUse the `get_dummies` function in pandas in order to one-hot encode the data.\n\nHint: To drop a column, it's suggested that you use `one_hot_data`[.drop( )](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html).", "_____no_output_____" ] ], [ [ "# TODO: Make dummy variables for rank and concat existing columns\none_hot_data = pd.get_dummies(data, columns=[\"rank\"])\n\n# Print the first 10 rows of our data\none_hot_data[:10]", "_____no_output_____" ] ], [ [ "## TODO: Scaling the data\nThe next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.", "_____no_output_____" ] ], [ [ "# Making a copy of our data\nprocessed_data = one_hot_data[:]\n\n# TODO: Scale the columns\nprocessed_data[\"gre\"] /= 800\nprocessed_data[\"gpa\"] /= 4\n\n# Printing the first 10 rows of our procesed data\nprocessed_data[:10]", "_____no_output_____" ] ], [ [ "## Splitting the data into Training and Testing", "_____no_output_____" ], [ "In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.", "_____no_output_____" ] ], [ [ "sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)\ntrain_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)\n\nprint(\"Number of training samples is\", len(train_data))\nprint(\"Number of testing samples is\", len(test_data))\nprint(train_data[:10])\nprint(test_data[:10])", "Number of training samples is 360\nNumber of testing samples is 40\n admit gre gpa rank_1 rank_2 rank_3 rank_4\n172 0 0.850 0.8700 0 0 1 0\n137 0 0.875 1.0000 0 0 1 0\n126 1 0.750 0.8850 1 0 0 0\n94 1 0.825 0.8600 0 1 0 0\n72 0 0.600 0.8475 0 0 0 1\n33 1 1.000 1.0000 0 0 1 0\n380 0 0.875 0.9125 0 1 0 0\n223 0 1.000 0.8675 0 0 1 0\n307 0 0.725 0.8775 0 1 0 0\n227 0 0.675 0.7550 0 0 0 1\n admit gre gpa rank_1 rank_2 rank_3 rank_4\n20 0 0.625 0.7925 0 0 1 0\n21 1 0.825 0.9075 0 1 0 0\n48 0 0.550 0.6200 0 0 0 1\n50 0 0.800 0.9650 0 0 1 0\n54 0 0.825 0.8350 0 0 1 0\n58 0 0.500 0.9125 0 1 0 0\n87 0 0.750 0.8700 0 1 0 0\n99 0 0.500 0.8275 0 0 1 0\n130 1 0.775 0.7925 0 1 0 0\n134 0 0.700 0.7375 0 1 0 0\n" ] ], [ [ "## Splitting the data into features and targets (labels)\nNow, as a final step before the training, we'll split the data into features (X) and targets (y).", "_____no_output_____" ] ], [ [ "features = train_data.drop('admit', axis=1)\ntargets = train_data['admit']\nfeatures_test = test_data.drop('admit', axis=1)\ntargets_test = test_data['admit']\n\nprint(features[:10])\nprint(targets[:10])", " gre gpa rank_1 rank_2 rank_3 rank_4\n172 0.850 0.8700 0 0 1 0\n137 0.875 1.0000 0 0 1 0\n126 0.750 0.8850 1 0 0 0\n94 0.825 0.8600 0 1 0 0\n72 0.600 0.8475 0 0 0 1\n33 1.000 1.0000 0 0 1 0\n380 0.875 0.9125 0 1 0 0\n223 1.000 0.8675 0 0 1 0\n307 0.725 0.8775 0 1 0 0\n227 0.675 0.7550 0 0 0 1\n172 0\n137 0\n126 1\n94 1\n72 0\n33 1\n380 0\n223 0\n307 0\n227 0\nName: admit, dtype: int64\n" ] ], [ [ "## Training the 2-layer Neural Network\nThe following function trains the 2-layer neural network. First, we'll write some helper functions.", "_____no_output_____" ] ], [ [ "# Activation (sigmoid) function\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\ndef sigmoid_prime(x):\n return sigmoid(x) * (1-sigmoid(x))\ndef error_formula(y, output):\n return - y*np.log(output) - (1 - y) * np.log(1-output)", "_____no_output_____" ] ], [ [ "# TODO: Backpropagate the error\nNow it's your turn to shine. Write the error term. Remember that this is given by the equation $$ (y-\\hat{y}) \\sigma'(x) $$", "_____no_output_____" ] ], [ [ "# TODO: Write the error term formula\ndef error_term_formula(x, y, output):\n return (y - output) * sigmoid_prime(x)", "_____no_output_____" ], [ "# Neural Network hyperparameters\nepochs = 1000\nlearnrate = 0.5\n\n# Training function\ndef train_nn(features, targets, epochs, learnrate):\n \n # Use to same seed to make debugging easier\n np.random.seed(42)\n\n n_records, n_features = features.shape\n last_loss = None\n\n # Initialize weights\n weights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n\n for e in range(epochs):\n del_w = np.zeros(weights.shape)\n for x, y in zip(features.values, targets):\n # Loop through all records, x is the input, y is the target\n\n # Activation of the output unit\n # Notice we multiply the inputs and the weights here \n # rather than storing h as a separate variable \n output = sigmoid(np.dot(x, weights))\n\n # The error, the target minus the network output\n error = error_formula(y, output)\n\n # The error term\n error_term = error_term_formula(x, y, output)\n\n # The gradient descent step, the error times the gradient times the inputs\n del_w += error_term * x\n\n # Update the weights here. The learning rate times the \n # change in weights, divided by the number of records to average\n weights += learnrate * del_w / n_records\n\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n out = sigmoid(np.dot(features, weights))\n loss = np.mean((out - targets) ** 2)\n print(\"Epoch:\", e)\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n print(\"=========\")\n print(\"Finished training!\")\n return weights\n \nweights = train_nn(features, targets, epochs, learnrate)", "Epoch: 0\nTrain loss: 0.2720880569132244\n=========\nEpoch: 100\nTrain loss: 0.21246205776875948\n=========\nEpoch: 200\nTrain loss: 0.2097762104688896\n=========\nEpoch: 300\nTrain loss: 0.20842256739840967\n=========\nEpoch: 400\nTrain loss: 0.20770320279968493\n=========\nEpoch: 500\nTrain loss: 0.2072836497536405\n=========\nEpoch: 600\nTrain loss: 0.20700896993762954\n=========\nEpoch: 700\nTrain loss: 0.20680656034523956\n=========\nEpoch: 800\nTrain loss: 0.20664163968386293\n=========\nEpoch: 900\nTrain loss: 0.2064971536911898\n=========\nFinished training!\n" ] ], [ [ "## Calculating the Accuracy on the Test Data", "_____no_output_____" ] ], [ [ "# Calculate accuracy on test data\ntest_out = sigmoid(np.dot(features_test, weights))\npredictions = test_out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))", "Prediction accuracy: 0.700\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cbc63f7fedda2874e3c43ac5391c19c286e054d0
58,701
ipynb
Jupyter Notebook
preprocessing/population_processing.ipynb
tOverney/ADA-Project
69221210b1f4f13f6979123c6a7a1a9813ea18e5
[ "Apache-2.0" ]
null
null
null
preprocessing/population_processing.ipynb
tOverney/ADA-Project
69221210b1f4f13f6979123c6a7a1a9813ea18e5
[ "Apache-2.0" ]
1
2016-11-04T01:03:21.000Z
2016-11-04T10:10:06.000Z
preprocessing/population_processing.ipynb
tOverney/ADA-Project
69221210b1f4f13f6979123c6a7a1a9813ea18e5
[ "Apache-2.0" ]
null
null
null
45.328958
20,562
0.553244
[ [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport re", "_____no_output_____" ] ], [ [ "## Population preprocessing", "_____no_output_____" ] ], [ [ "file = \"../data/population_suisse_clean.xlsx\"\n\ndf_pop = pd.read_excel(file)\ndf_pop = df_pop.reset_index()", "_____no_output_____" ] ], [ [ "Remove district information", "_____no_output_____" ] ], [ [ "df_pop.drop(df_pop['index'].str.startswith(\">>\"), inplace=True)", "_____no_output_____" ] ], [ [ "Remove NPZ information and parenthesis information", "_____no_output_____" ] ], [ [ "clean_name = lambda s: s.partition(' ')[2]\nremove_sup = lambda s: re.sub(r'\\(*\\ [^)]*\\)', '', s)\n\ndf_pop['index'] = df_pop['index'].apply(clean_name)\ndf_pop['index'] = df_pop['index'].apply(remove_sup)", "_____no_output_____" ], [ "name2pop = {}\ndef create_dict(row):\n name2pop.update({row['index']: row[\"Etat de la population\"]})", "_____no_output_____" ], [ "a = df_pop.apply(create_dict, axis=1)", "_____no_output_____" ] ], [ [ "--------", "_____no_output_____" ], [ "## Capacity preprocessing", "_____no_output_____" ] ], [ [ "file = '../data/2017-01-30_out.csv'\ndf_train = pd.read_csv(file)", "/Users/TristanO/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py:2723: DtypeWarning: Columns (8) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n" ] ], [ [ "Clean stop id for aggregation", "_____no_output_____" ] ], [ [ "clean_id = lambda s : s.partition(':')[0]\n\ndf_train['stop_id'] = df_train['stop_id'].apply(clean_id)", "_____no_output_____" ] ], [ [ "Aggregate", "_____no_output_____" ] ], [ [ "most_present = lambda x: x.value_counts().index[0]", "_____no_output_____" ], [ "df = df_train.groupby(['stop_id']).agg({'name': most_present,'id': 'count'})\ndf[\"amount_of_train\"] = df.id\ndf = df[[\"amount_of_train\", \"name\"]]\ndf", "_____no_output_____" ], [ "def get_pop(x):\n if x in name2pop:\n return name2pop[x]\n x1 = x.split(\" \")[0]\n if x1 in name2pop:\n return name2pop[x1]\n x2 = x.split(\"-\")[0]\n if x2 in name2pop:\n return name2pop[x2]\n ", "_____no_output_____" ], [ "df['pop'] = df.name.apply(get_pop)", "_____no_output_____" ], [ "df_okay = df[~df['pop'].isnull()]", "_____no_output_____" ], [ "sum(df['pop'].isnull())", "_____no_output_____" ], [ "df_okay.plot.scatter(x='amount_of_train', y='pop', logy=True);", "_____no_output_____" ], [ "name2pop[\"Prilly\"]", "_____no_output_____" ], [ "df_okay[df_okay.amount_of_train < 3]", "_____no_output_____" ], [ "df_okay", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc64329a9214c97c9fa5a94c2ac36d5893d6eb2
348,201
ipynb
Jupyter Notebook
wambui_aurelia_core_week_6_assignment.ipynb
AureliaWambui19/Home-team-Away-team-scores-prediction
701c78aa514bcd7956e29ad6c083f098b79e4a3b
[ "MIT" ]
null
null
null
wambui_aurelia_core_week_6_assignment.ipynb
AureliaWambui19/Home-team-Away-team-scores-prediction
701c78aa514bcd7956e29ad6c083f098b79e4a3b
[ "MIT" ]
null
null
null
wambui_aurelia_core_week_6_assignment.ipynb
AureliaWambui19/Home-team-Away-team-scores-prediction
701c78aa514bcd7956e29ad6c083f098b79e4a3b
[ "MIT" ]
null
null
null
61.9024
33,038
0.628577
[ [ [ "<a href=\"https://colab.research.google.com/github/AureliaWambui19/Home-team-Away-team-scores-prediction/blob/main/wambui_aurelia_core_week_6_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# 1.1 Introduction\n * Football is a family of team sports that involve, to varying degrees, kicking a ball to score a goal.\n\n * The men's FIFA World Ranking is a ranking system for men's national teams in association football. \n * Currently led by Belgium\n \n * A points system is used, with points being awarded based on the results of all FIFA-recognised full international matches.\n \n ", "_____no_output_____" ], [ "## 1.1 Defining the Question\n\n* I have been recruited as a football analyst in a company - Mchezopesa Ltd and tasked to accomplish the task below :\n\n* Predict the result of a game between team 1 and team 2, based on who's home and who's away, and on whether or not the game is friendly (including rank of the respective team).", "_____no_output_____" ], [ "## 1.2 Metrics for Success\n \n * Obtaining about 80% Accuracy score and above\n * Correctly identifying status of results(win,loss,draw)\n \n ", "_____no_output_____" ], [ "## 1.3 The Context\n\nThe new model for calculating the FIFA/Coca-Cola World Ranking (FWR) was developed over two years\nduring which time a large number of different algorithms was tested and extensively discussed.\nThroughout this review and consultation process, the main aim was to identify an algorithm that is not\nonly intuitive, easy to understand and improves overall accuracy of the formula, but also addresses\nfeedback received about the previous model and provides fair and equal opportunities for all teams\nacross all confederations to ascend the FWR\n\nThe Elo method of calculation adds/subtracts points (as opposed to averaging points) for individual\nmatches to/from a team’s existing point total. The points which are added or subtracted are partially\ndetermined by the relative strength of the two opponents, including the logical expectation that teams\nhigher in the ranking should fare better against teams lower in the ranking.\n\n\n", "_____no_output_____" ], [ "## 1.4 Experimental design taken\n\n- Perform your EDA\n- Perform any necessary feature engineering\n- Check of multicollinearity\n- Building a model\n * Approach 1: Polynomial regression model\n \n * Model 1: Predict how many goals the home team scores\n * Model 2: Predict how many goals the away team scores\n\n * Approach 2: Logistic regression model\n \n * Figure out from the home team’s perspective if the game is a Win, Lose or Draw (W, L, D)\n \n- Cross-validate the model\n- Compute RMSE\n- Create residual plots for the model\n- Assess Heteroscedasticity using Bartlett’s test\n", "_____no_output_____" ], [ "## 1.5 Appropriateness of the available Data\n\nThis project has two datasets:\n\n* Ranking dataset: contains the team ranks from 1993 to 2018\n\n* Results dataset: contains matches and the team scores since 1892 to 2019\n\nThe link to the dataset is:\n\n* https://drive.google.com/open?id=1BYUqaEEnFtAe5lvzJh9lpVpR2MAvERUc\n\n The data is relevant for this project\n", "_____no_output_____" ], [ "# 2 Data Understanding", "_____no_output_____" ] ], [ [ "# Importing Libraries we use for our analysis\n\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold, LeaveOneOut\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.preprocessing import StandardScaler\n\n\n", "_____no_output_____" ] ], [ [ "# 2.1 Viewing our Datasets", "_____no_output_____" ] ], [ [ "# Reading the datasets\nFifaRank = pd.read_csv('fifa_ranking.csv')\nFifaResult = pd.read_csv('results.csv')", "_____no_output_____" ], [ "# Viewing the top 3 observation in the fifa ranking dataset\nFifaRank.head(3)", "_____no_output_____" ], [ "# Viewing the last 3 observation in the fifa ranking dataset\nFifaRank.tail(3)", "_____no_output_____" ], [ "# Viewing the last 3 observation in the result dataset\nFifaResult.tail(3)", "_____no_output_____" ] ], [ [ "# 2.2 Checking data", "_____no_output_____" ] ], [ [ "# Checking the size of the fifa ranking dataset\nFifaRank.shape", "_____no_output_____" ] ], [ [ "This dataset has 57993 rows and 16 columns", "_____no_output_____" ] ], [ [ "# checking the size of the results dataset\nFifaResult.shape", "_____no_output_____" ] ], [ [ "This dataset has 40839 rows and 9 columns", "_____no_output_____" ] ], [ [ "# Checking the ranking dataset information\n\nFifaRank.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 57793 entries, 0 to 57792\nData columns (total 16 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 rank 57793 non-null int64 \n 1 country_full 57793 non-null object \n 2 country_abrv 57793 non-null object \n 3 total_points 57793 non-null float64\n 4 previous_points 57793 non-null int64 \n 5 rank_change 57793 non-null int64 \n 6 cur_year_avg 57793 non-null float64\n 7 cur_year_avg_weighted 57793 non-null float64\n 8 last_year_avg 57793 non-null float64\n 9 last_year_avg_weighted 57793 non-null float64\n 10 two_year_ago_avg 57793 non-null float64\n 11 two_year_ago_weighted 57793 non-null float64\n 12 three_year_ago_avg 57793 non-null float64\n 13 three_year_ago_weighted 57793 non-null float64\n 14 confederation 57793 non-null object \n 15 rank_date 57793 non-null object \ndtypes: float64(9), int64(3), object(4)\nmemory usage: 7.1+ MB\n" ], [ "# Checking the result dataset information\n\nFifaResult.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 40839 entries, 0 to 40838\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 date 40839 non-null object\n 1 home_team 40839 non-null object\n 2 away_team 40839 non-null object\n 3 home_score 40839 non-null int64 \n 4 away_score 40839 non-null int64 \n 5 tournament 40839 non-null object\n 6 city 40839 non-null object\n 7 country 40839 non-null object\n 8 neutral 40839 non-null bool \ndtypes: bool(1), int64(2), object(6)\nmemory usage: 2.5+ MB\n" ], [ "# Viewing the column names of the ranking dataset\n\nFifaRank.columns", "_____no_output_____" ], [ "# Viewing the column names of the result dataset\n\nFifaResult.columns", "_____no_output_____" ] ], [ [ "# 3 Feature Engineering", "_____no_output_____" ] ], [ [ "# finding the difference of scores and storing them in a new column called game_result\nFifaResult['game_results'] = FifaResult['home_score'] -FifaResult['away_score']\nFifaResult.head(4)", "_____no_output_____" ], [ "# Creating a function to specify whether its a win , loss or a draw based on a home team perspective\n# the results (its a win , loss or a draw ) are stored in a new column called status\n \ndef home_team(game_results):\n if game_results > 0:\n return 'win'\n elif game_results < 0:\n return 'loss'\n else:\n return 'draw'\n \nFifaResult['status'] = FifaResult.game_results.apply(lambda w: home_team(w))\n\n\n\n", "_____no_output_____" ], [ "FifaResult.head(2)", "_____no_output_____" ], [ "# finding unique values in tournament column \n\nFifaResult.tournament.unique()\n", "_____no_output_____" ], [ "# Changing the tournament type into three categories \n# Tournament type (World cup, Friendly, Other) \n# The 3 respective category will be stored in a new column named tournament_type\ndef ton(tournament_type):\n if tournament_type == 'Friendly':\n return 'Friendly'\n elif tournament_type == 'FIFA World Cup':\n return 'World cup'\n else:\n return 'Other'\n \nFifaResult['tournament_type'] = FifaResult.tournament.apply(lambda t: ton(t))\n", "_____no_output_____" ], [ "FifaResult.head(2)", "_____no_output_____" ], [ "# Changing the dates column in both datasets into datetime format\n\nFifaResult['date'] = pd.to_datetime(FifaResult['date'])\nFifaRank['rank_date'] = pd.to_datetime(FifaRank['rank_date'])", "_____no_output_____" ], [ "# Confirming that we have changed the date columns into datetime datatypes\n\nprint(FifaRank.rank_date.dtypes)\nprint(' ')\nprint(FifaResult.date.dtypes)", "datetime64[ns]\n \ndatetime64[ns]\n" ], [ "# Extracting the year and month from the date column; \n# Here we will create a new column for each\nFifaResult['year'] = pd.DatetimeIndex(FifaResult['date']).year\nFifaResult['month'] = pd.DatetimeIndex(FifaResult['date']).month\n\nFifaRank['year'] = FifaRank['rank_date'].dt.year\nFifaRank['month'] =FifaRank['rank_date'].dt.month", "_____no_output_____" ], [ "# confirming the changes\nFifaResult.head(3)", "_____no_output_____" ], [ "# confirming changes\n\nFifaRank.head(2)", "_____no_output_____" ], [ "# changing the full country column name in ranking dataset to home_team so as to ease manipulation of the datasets when merging them later\n\nFifaRank= FifaRank.rename({'country_full': 'home_team'}, axis = 1)", "_____no_output_____" ], [ "# confirming changes\n\nFifaRank.head(2)", "_____no_output_____" ], [ "# Dropping unnecessary columns in result dataset\n\nFifaResult.drop(['date', 'game_results'], axis = 1, inplace = True)", "_____no_output_____" ], [ "FifaResult.columns", "_____no_output_____" ], [ "\n # Dropping unnecessary columns in rank dataset\n\nFifaRank.drop(['country_abrv','rank_date', 'total_points', 'previous_points','cur_year_avg', 'cur_year_avg_weighted' ,'last_year_avg' , 'last_year_avg_weighted' , 'two_year_ago_avg', 'two_year_ago_weighted', 'three_year_ago_avg' ,'three_year_ago_weighted', 'confederation'], axis =1, inplace = True)", "_____no_output_____" ], [ "# Merging datasets\n\n# Based on home_team, year, month\n\nhome_me= pd.merge(FifaResult,FifaRank, how=\"left\", on = ['home_team', 'year', 'month'])\n", "_____no_output_____" ], [ "# viewing our merged dataset 4 top observations\nhome_me.head(4)", "_____no_output_____" ], [ "# viewing our merged dataset lastb 3 observations\nhome_me.tail(3)", "_____no_output_____" ], [ "# renaming the rank column name to home_rank so as to get the respective rank of the home team\n\nhome_me = home_me.rename({'rank': 'home_rank'}, axis = 1)", "_____no_output_____" ], [ "# Confirming changes\n\nhome_me.head(2)", "_____no_output_____" ], [ "\n# renaming the column home_team (originally called country full) as away team so that we get their individual ranks of away teams\nFRankone= FifaRank.rename({'home_team': 'away_team'}, axis = 1)\nFRankone.head(2)", "_____no_output_____" ], [ "# Merging the home_merged dataset with the \n# Based on away_team, year, month\nFiifa = pd.merge(home_me,FRankone, how=\"left\", on = ['away_team', 'year', 'month'])", "_____no_output_____" ], [ "# Checking the first two observations of the merged dataset\n\nFiifa.head(2)", "_____no_output_____" ], [ "# renaming the rank column as away rank in the new dataframe\n\nFiifa = Fiifa.rename({'rank': 'away_rank'}, axis = 1)\nFiifa.head()", "_____no_output_____" ] ], [ [ "# 4 Tyding the dataset", "_____no_output_____" ] ], [ [ "# checking for unique year rankings\nFifaRank.year.unique()\n", "_____no_output_____" ] ], [ [ "Rankings are from 1993 t0 2018 .after merging a lot of missing values were noted especialy in years before 1993 and after 2018.therefore i will drop the data where this was observed as there are no rankings available", "_____no_output_____" ] ], [ [ "Fiifa.dropna(inplace = True)", "_____no_output_____" ], [ "# confirming that there are no null values\n\nFiifa.isnull().sum()", "_____no_output_____" ], [ "# checking for duplicates\n\nFiifa.duplicated().sum()", "_____no_output_____" ], [ "# dropping the duplicates\nFiifa.drop_duplicates(inplace = True)\n", "_____no_output_____" ], [ "# Checking that we have no duplicates in the data\nFiifa.duplicated().sum()", "_____no_output_____" ], [ "# checking columns of merged dataset\n\nFiifa.columns", "_____no_output_____" ], [ "# viewing our dataset after cleaning\nFiifa.head()", "_____no_output_____" ], [ "# checking the shape of the cleaned data\n\nFiifa.shape", "_____no_output_____" ] ], [ [ "This dataset has 16889 rows and 16 columns", "_____no_output_____" ] ], [ [ "# Encoding the categorical columns so as to manage perform operations such as correlation check\n#\n\nle = LabelEncoder()\nFiifa= Fiifa.apply(le.fit_transform)\n", "_____no_output_____" ], [ "# Confirming the changes\n\nFiifa.head(5)", "_____no_output_____" ], [ "# checking for outliers in our dataset\n# Using boxplots\n# Labeling the title of our chart\n# Displaying chart \n\nplt.figure(dpi = 100)\nax = sns.boxplot(data = Fiifa,orient='h')\nplt.title(' Outliers in Fifa dataset', color = 'red')\nplt.xlabel(' Frequency')\nplt.show()", "_____no_output_____" ] ], [ [ "# 5 Exploratory data analysis", "_____no_output_____" ] ], [ [ "h=Fiifa['home_score']\n\nplt.hist(h, histtype='bar', rwidth=0.9)\nplt.xlabel('No. of home scores')\nplt.ylabel('Quantity')\nplt.title('number of home scores',color='red')\nplt.show()", "_____no_output_____" ] ], [ [ " home teams scored mostly one goal", "_____no_output_____" ] ], [ [ "a=Fiifa['away_score']\n\nplt.hist(h, histtype='bar', rwidth=0.9)\nplt.xlabel('No. of away scores')\nplt.ylabel('Quantity')\nplt.title('number of away scores',color='red')\nplt.show()", "_____no_output_____" ] ], [ [ "Most away teams score atleast one goal\n\nBoth histograms are positively skewed .This shape indicates that there are a number of data points, perhaps outliers, that are greater than the mode\n", "_____no_output_____" ] ], [ [ "# status of game results in respect tothe home team(draw = 0, lose =1, win = 2)\n# Using a countplot to visualize these results\n# Using Seaborn\n# Labeling the x and y axis\n# Giving a title to our chart\n# Displaying our chart\n\nplt.figure(figsize = (6,6), dpi = 80)\nsns.countplot(Fiifa['status'])\nplt.xlabel('status (draw = 0, lose =1, win = 2)')\nplt.ylabel('Count')\nplt.title('status of games results', color = 'red')\nplt.show()", "/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ] ], [ [ "The above bar chart above shows that wins by the home teams are high as compared to loss/draws", "_____no_output_____" ], [ "# 6 Multicollinearity\n * Checking for multicollinearity\n * Solving multicollinearity", "_____no_output_____" ] ], [ [ "# Before we build a model we shall check if\n# the independent variables are collinear/ correlated to each other\n# Getting the pearson correation coefficient for each of the variables\n\ncorrelation = Fiifa.corr()\ncorrelation", "_____no_output_____" ] ], [ [ "The correlation matrix indicates that most variables are moderately or weakly correlated.*(both positively and negatively)\n\nThis is very beneficial when creating a model, as collinear variables reduce the power of the model to identify independent \nvariables that are statistically significant. \n\nWe will use the correlation matrix to calculate the vif (Variance Inflation Factor).\nVariance inflation factor (VIF) is a measure of the amount of multicollinearity in a set of multiple regression variables. Mathematically, the VIF for a regression model variable is equal to the ratio of the overall model variance to the variance of a model that includes only that single independent variable. This ratio is calculated for each independent variable. A high VIF indicates that the associated independent variable is highly collinear with the other variables in the model.\n", "_____no_output_____" ] ], [ [ "# checking for multicollinearity\n# Using the variance Inflation Factor (VIF)\n# \n# This is calculated using linear algebra inverse function\n\npd.DataFrame(np.linalg.inv(correlation.values), index = correlation.index, columns = correlation.columns)\n\n# From the correlation matrix below there are no correlated independent variables as all have VIF below 5, which is the threshold", "_____no_output_____" ] ], [ [ "We check VIFs along the diagonal.\n\nVIFs Values greater than 5 indicate that the presence of multicollinearity.\n\nIf present we remove the variable with the greatest VIF value.\n\nTypically, a VIF value around 5 is a potential problem, and value around 10 is considered seriously problematic and suggests that the related variable should be dropped from the model.\n\nFrom the correlation matrix there are no correlated independent variables as all have VIF values are below 5, which is the threshold and therefore no variable will be dropped in this project", "_____no_output_____" ], [ "# 8 Building a Model", "_____no_output_____" ], [ "## 8.1 Polynomial Regression Model", "_____no_output_____" ] ], [ [ "# Approach 1: Polynomial approach\n\n# What to train given:\n\n# Rank of home team\n# Rank of away team\n# Tournament type\n\n# Model 1: Predict how many goals the home team scores\n\n# Model 2: Predict how many goals the away team scores", "_____no_output_____" ] ], [ [ "### Model 1\n\nPredict how many goals the home team scores", "_____no_output_____" ] ], [ [ "# Viewing our dataset before splitting \n\nFiifa.head(2)", "_____no_output_____" ], [ "# Model 1\n# Predict how many goals the home team scores given home rank\n\nX = Fiifa['home_rank'].values.reshape(-1, 1)\ny = Fiifa['home_score'].values.reshape(-1, 1)\n\n\n", "_____no_output_____" ], [ "# showing relationship between home rank and home score\n\nplt.scatter(X,y) \nplt.title('Home team performance', color = 'red')\nplt.xlabel('homerank')\nplt.ylabel('home score')\nplt.show()\n", "_____no_output_____" ] ], [ [ "There are more points on the lower side of the scatter plot.\n home team scores are mostly between 0 and 5 goals.", "_____no_output_____" ] ], [ [ "X.shape", "_____no_output_____" ], [ "y.shape", "_____no_output_____" ], [ "# Split the dataset into train and test sets\n# this means training data is 80% while test size is 20%\n\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state=2)\n", "_____no_output_____" ], [ "# Fitting the polynomial features to the X the independent variable\n\n\npoly_reg = PolynomialFeatures(degree =4)\nX_poly = poly_reg.fit_transform(X)\n", "_____no_output_____" ], [ "# Fitting a polynomial Regression to the dataset.\n\npol_reg = LinearRegression()\npol_reg.fit(X_poly, y)", "_____no_output_____" ], [ "# Visualizing the polynomial Regression results\n\nplt.scatter(X, y, color='blue') \nplt.plot(X, pol_reg.predict(X_poly),color='red')\nplt.title('home score prediction')\nplt.xlabel('home rank')\nplt.ylabel('home score')\nplt.show()\n", "_____no_output_____" ] ], [ [ "\nUsing the polynomial regression model of degree 4,\n\nmost data points have been omitted\nthe visualization as appears makes it difficult to analyze and makes use of this model difficult to use for predictions", "_____no_output_____" ] ], [ [ "# Making predictions using our model\n\npoly_pred = pol_reg.predict(poly_reg.fit_transform([[20]]))\nprint('Polynomial prediction when home rank is 20 the home team score is: %d' %poly_pred)", "Polynomial prediction when home rank is 20 the home team score is: 1\n" ] ], [ [ "### Model 2 \n\nPredict how many goals the away team scores\n\n", "_____no_output_____" ] ], [ [ "# Model 2: Predict how many goals the away team scores given the away team rank\n\n#\nX = Fiifa['away_rank'].values.reshape(-1, 1)\n\ny = Fiifa['away_score'].values.reshape(-1, 1)\n\n", "_____no_output_____" ], [ "# Visualizing the dependent vs independent variable using a scatter plot\n\nplt.scatter(X,y) \nplt.title('away team performance', color = 'red')\nplt.xlabel('away rank')\nplt.ylabel('away score')\nplt.show()\n", "_____no_output_____" ] ], [ [ "\nmost cases the away team scores between 0 and 4 goals.", "_____no_output_____" ] ], [ [ "## Split the dataset into train and test sets\n\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state=0)", "_____no_output_____" ], [ "# Fitting the polynomial features to the X\n\npoly_reg = PolynomialFeatures(degree = 4)\nX_poly = poly_reg.fit_transform(X)", "_____no_output_____" ], [ "# Fitting a polynomial Regression to the dataset\n\npol_reg = LinearRegression()\npol_reg.fit(X_poly, y)", "_____no_output_____" ], [ " # Visualizing the polynomial Regression results using a scatter plot\n\nplt.scatter(X, y, color='blue') \nplt.plot(X, pol_reg.predict(X_poly),color='red')\nplt.title('away team prediction')\nplt.xlabel('away rank')\nplt.ylabel('away score')\nplt.show()\n", "_____no_output_____" ] ], [ [ "Using the polynomial regression model of degree 4,\nmost data points have not been highlighted\nThis is underfitting.\nThe polynomial regression is not a good model to predict how many goals the away team scores given the away team rank.", "_____no_output_____" ] ], [ [ "# Making predictions using our model\n\npoly_pred = pol_reg.predict(poly_reg.fit_transform([[58]]))\nprint('Polynomial prediction when home away rank is 58 the away team score is: %d' %poly_pred)", "Polynomial prediction when home away rank is 58 the away team score is: 1\n" ] ], [ [ "## 8.2 Logistic Regression Model\n\n- Logistic regression is a predictive analysis. \n\n- Logistic regression is used to describe data and to explain the relationship between one dependent binary variable and one or more nominal, ordinal, interval or ratio-level independent variables.\n- Logistic Regression is used when the dependent variable(target) is categorical.\n- In this model, we will be predicting whether the home team (Wins, Losses or Draws) in a match.\n", "_____no_output_____" ] ], [ [ "# Viewing the first two observations before splittig our dataset\n\nFiifa.head(2)", "_____no_output_____" ], [ "# Splitting our dataset\n# X: independent variables\n# y: dependent variable\n\n# Splitting the data into train and test sets\n\nX = Fiifa.drop(['status'], axis = 1)\ny = Fiifa.status\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .2, random_state=0)", "_____no_output_____" ], [ "# Fitting our model to our train sets\n# Logistic Regression model in this case\n# \n\nLogReg = LogisticRegression()\nLogReg.fit(X_train, y_train)", "/usr/local/lib/python3.6/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n" ], [ "# Using our model to make a prediction\n\ny_pred = LogReg.predict(X_test)", "_____no_output_____" ], [ "# Evalauting the model\n\n\nprint(accuracy_score(y_test, y_pred))", "0.6272942569567792\n" ] ], [ [ "The model has an accuracy score of 62.72%", "_____no_output_____" ], [ "# 9 Cross-Validation ", "_____no_output_____" ] ], [ [ "# Using KFolds\n\n# Splitting our dataset\n# independet variables as X\n# dependent variable as y\n\nX = Fiifa.drop(['status'], axis = 1).values\ny = Fiifa.status.values\n\n# specifying the number of folds\nfolds = KFold(n_splits = 10)\n\n\n# We now create and assess 10 models based on the folds we created.\n\nRMSES = [] # An array of RMSEs to keep track of the RSME of each model\ncount = 1 # starting point # helps to keep track of the model number in training\nfor train_index, test_index in folds.split(X):\n\n \n # Setting up the train and test based on the split determined by KFold\n # With 10 folds we split our data into training and test sets\n \n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n # fitting a Logistic regression model\n LogReg = LogisticRegression() \n LogReg.fit(X_train, y_train)\n \n # Assess the accuracy of the model\n y_pred = LogReg.predict(X_test)\n \n # Calculating the RMSES of each model\n # Appending each RMSE into the list earlier created\n rmse_value = np.sqrt(metrics.mean_squared_error(y_test, y_pred))\n RMSES.append(rmse_value)\n \n \n # printing each model RMSE\n print('Model ' + str(count) + ' Root Mean Squared Error:',rmse_value)\n count = count + 1\n \n ", "/usr/local/lib/python3.6/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n" ], [ "# Printing the mean of the RMSES in all the 10 models\n\nprint(np.mean(RMSES))\n\n ", "0.9031346265085574\n" ], [ " # Visualizing the 10-folds RMSES using a scatter plot\n\n\n\nplt.plot(RMSES)\nplt.ylabel('RMSE value')\nplt.title(\"RMSE line plot\", color = 'red')\nplt.xlabel('model ID')\nplt.show()", "_____no_output_____" ] ], [ [ "# 10.Heteroskedisity", "_____no_output_____" ], [ "Heteroscedasticity means unequal scatter. In regression analysis, we talk about heteroscedasticity in the context of the residuals or error term. Specifically, heteroscedasticity is a systematic change in the spread of the residuals over the range of measured values.", "_____no_output_____" ] ], [ [ "# First: splitting our dataset \n# Into the feature set and the target variable\n\nX = Fiifa.drop(['status'], axis = 1)\ny = Fiifa.status\n\n# Split the dataset into train and test sets\n\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state=0)\n\n# Fitting a Logistic model\n\nLogReg = LogisticRegression()\nLogReg.fit(X_train, y_train)\n\n# Using our model to make a prediction\n\ny_pred = LogReg.predict(X_test)\n\n# We now create the residual by subtracting the test value from the predicted \n# value for each row in our dataset\n\nresiduals = np.subtract(y_pred, y_test)\n\n# Creating a summary description of the residuals:\n\npd.DataFrame(residuals).describe()\n\nresiduals.mean()", "/usr/local/lib/python3.6/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n" ] ], [ [ "## 10..1 Residual Plots", "_____no_output_____" ] ], [ [ "# Visualizing the residuals using a scatter plot \n\n\nplt.scatter(y_pred, residuals, color='black')\nplt.ylabel('residual')\nplt.xlabel('predicted values')\nplt.axhline(y= residuals.mean(), color='red', linewidth=1)\nplt.show()", "_____no_output_____" ] ], [ [ "Residuals are centered around a mean of appx 0.43\n…positive values for the residual (on the y-axis) mean the prediction was too low, and negative values mean the prediction was too high; 0 means the guess was exactly correct\n", "_____no_output_____" ], [ "## 10.2 Barlett's test", "_____no_output_____" ] ], [ [ "# Carrying out Barlett's test \n# It is a more thorough heteroskedasticity test.\n\ntest_result, p_value = sp.stats.bartlett(y_pred, residuals)\n\n# To interpret the results we must also compute a critical value of the chi squared distribution\ndegree_of_freedom = len(y_pred)-1\nprobability = 1 - p_value\n\n\ncritical_value = sp.stats.chi2.ppf(probability, degree_of_freedom)\nprint(p_value)\n\n# If the test_result is greater than the critical value, then we reject our null\n# hypothesis. This would mean that there are patterns to the variance of the data\n\n# Otherwise, we can identify no patterns, and we accept the null hypothesis that \n# the variance is homogeneous across our data\n\nif (test_result > critical_value):\n print('the variances are unequal, and the model should be reassessed')\nelse:\n print('The variances are Homogeneous!')", "2.156757663300631e-97\nThe variances are Homogeneous!\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cbc6466a052b9cf10a84ca753a8739379bb3a853
15,001
ipynb
Jupyter Notebook
NLP_python.ipynb
vikram-patil1289/NLP-on-Yelp-dataset
a992736028f00ed4b46adc3a6d198cb37a40fbbc
[ "MIT" ]
null
null
null
NLP_python.ipynb
vikram-patil1289/NLP-on-Yelp-dataset
a992736028f00ed4b46adc3a6d198cb37a40fbbc
[ "MIT" ]
null
null
null
NLP_python.ipynb
vikram-patil1289/NLP-on-Yelp-dataset
a992736028f00ed4b46adc3a6d198cb37a40fbbc
[ "MIT" ]
null
null
null
29.529528
271
0.466436
[ [ [ "##### Authors: \n- Vikram Hanumanthrao Patil\n- Prashantkumar Kulkarni\n\n##### Date: 2/6/2019\n\n##### Version: 3.0\n\n##### Environment: Python 3.6.1 and Jupyter notebook", "_____no_output_____" ], [ "# Table of contents\n### 1. [Importing libraries](#library)\n### 2. [Initialization](#initialisation)\n### 3. [Read training and label](#read_train)\n### 4. [Data pre-processing](#preprocess) \n### 5. [Feature generation](#feature)\n- #### 5.1 [Dimention reduction technique(Chi-squared)](#dimension)\n- #### 5-2 [Multinomial logistic regression](#model)\n- #### 5-3 [Cross-validation](#cv) \n\n### 6. [Predict on test data](#test)", "_____no_output_____" ], [ "## 1. Importing libraries <a name=\"library\"></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom pattern.en import parse\nfrom nltk.corpus import stopwords\nimport string\nimport re\nimport nltk\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn import svm\nimport swifter\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn import metrics\nimport seaborn as sns\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import SelectKBest, chi2\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "C:\\Users\\pkul0001\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\tqdm\\autonotebook\\__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n" ] ], [ [ "## 2. Initialization<a name=\"initialisation\"></a>\n\n### Creating a custom dictionary to expand all the decontract words", "_____no_output_____" ] ], [ [ "#initialising the lemmatizer.\nwn = nltk.WordNetLemmatizer()\n\n# Creating a custom dictionary to expand all the decontract words\nappos = {\n\"aren't\" : \"are not\", \"can't\" : \"cannot\", \"couldn't\" : \"could not\", \"didn't\" : \"did not\", \"doesn't\" : \"does not\",\n\"don't\" : \"do not\", \"hadn't\" : \"had not\", \"hasn't\" : \"has not\", \"haven't\" : \"have not\",\n\"he'd\" : \"he would\", \"he'll\" : \"he will\", \"he's\" : \"he is\", \"i'd\" : \"I would\",\n\"i'd\" : \"I had\", \"i'll\" : \"I will\", \"i'm\" : \"I am\", \"isn't\" : \"is not\",\n\"it's\" : \"it is\", \"it'll\":\"it will\", \"i've\" : \"I have\", \"let's\" : \"let us\",\n\"mightn't\" : \"might not\", \"mustn't\" : \"must not\", \"shan't\" : \"shall not\", \"she'd\" : \"she would\",\n\"she'll\" : \"she will\", \"she's\" : \"she is\", \"shouldn't\" : \"should not\", \"that's\" : \"that is\",\n\"there's\" : \"there is\", \"they'd\" : \"they would\", \"they'll\" : \"they will\", \"they're\" : \"they are\",\n\"they've\" : \"they have\", \"we'd\" : \"we would\", \"we're\" : \"we are\", \"weren't\" : \"were not\",\n\"we've\" : \"we have\", \"what'll\" : \"what will\", \"what're\" : \"what are\", \"what's\" : \"what is\",\n\"what've\" : \"what have\", \"where's\" : \"where is\", \"who'd\" : \"who would\", \"who'll\" : \"who will\",\n\"who're\" : \"who are\", \"who's\" : \"who is\", \"who've\" : \"who have\", \"won't\" : \"will not\",\n\"wouldn't\" : \"would not\", \"you'd\" : \"you would\", \"you'll\" : \"you will\",\"you're\" : \"you are\",\n\"you've\" : \"you have\", \"'re\": \" are\", \"wasn't\": \"was not\", \"we'll\":\" will\",\"didn't\": \"did not\"\n}\n#reference[1]", "_____no_output_____" ] ], [ [ "## 3. Reading the training data and labels <a name=\"read_train\"></a>\n\n### merging both of them", "_____no_output_____" ] ], [ [ "data = pd.read_csv(\"train_data.csv\", sep=',') # read training data\ndata_labels = pd.read_csv(\"train_label.csv\", sep=',') # read training labels\ndf=pd.merge(data,data_labels,on='trn_id',how='left') # merging both of them", "_____no_output_____" ] ], [ [ "## 4. Data pre-processing <a name=\"preprocess\"></a>", "_____no_output_____" ] ], [ [ "#--------------------------\n# Data pre-processing step\n#--------------------------\ndef pre_process(text):\n \"\"\"\n Takes in a string of text, then performs the following:\n 1. converts to lower\n 2. Splits the sentence into tokens\n 3. Decontract the words. For example: \"won't\" --> \"will not\"\n 4. Lemmatization, reduces words to their base word\n 5. Returns the sentence of the cleaned text\n \"\"\"\n text = \"\".join([word.lower() for word in text])\n tokens = text.split(\" \")\n tokens = [appos[word] if word in appos else word for word in tokens]\n text = \" \".join([wn.lemmatize(word) for word in tokens]) \n return text\n\n\n#--------------------------\n# execute pre-processing\n#--------------------------\ndf['text']=df.swifter.apply(lambda x:pre_process(x['text']),axis=1) \n", "_____no_output_____" ] ], [ [ "## 5. Feature generation <a name=\"feature\"></a>", "_____no_output_____" ], [ "### 5.1- Dimension reduction technique (Chi-square)<a name=\"dimension\"></a>", "_____no_output_____" ] ], [ [ "#--------------------------------------\n#dimension reduction using chi-square\n#--------------------------------------\n\n\nx_train, x_validation, y_train, y_validation = train_test_split(df['text'], df['label'], test_size=.02)\n\ntvec = TfidfVectorizer(max_features=100000,ngram_range=(1, 3))\nx_train_tfidf = tvec.fit_transform(x_train)\nx_validation_tfidf = tvec.transform(x_validation)\n\n#reference[2]", "_____no_output_____" ] ], [ [ "### 5-2 Multinomial logistic regression<a name=\"model\"></a>", "_____no_output_____" ] ], [ [ "ch = SelectKBest(chi2, k=40000)\nx_train_feature_selected=ch.fit_transform(x_train_tfidf, y_train)\nx_test_chi_selected = ch.transform(x_validation_tfidf)\n\nfrom sklearn import linear_model\n\nclf = linear_model.LogisticRegression(multi_class='multinomial',solver = 'newton-cg')\nclf.fit(x_train_feature_selected, y_train)\nscore = clf.score(x_test_chi_selected, y_validation)\nscore", "_____no_output_____" ] ], [ [ "### 5-3 Cross-validation <a name=\"cv\"></a>", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import KFold, cross_val_score\n\n#rf = RandomForestClassifier(n_jobs=-1)\nk_fold = KFold(n_splits=3)\ncross_val_score(clf, x_train_chi2_selected, y_train, cv=k_fold, scoring='accuracy', n_jobs=-1)", "_____no_output_____" ] ], [ [ "--------------------------------", "_____no_output_____" ], [ "# 6.Prediction on test data<a name=\"test\"></a>", "_____no_output_____" ] ], [ [ "#--------------------------------------\n## Reading the test file into dataframe\n#--------------------------------------\n\n\ntest=pd.read_csv(\"test_data.csv\", sep=',')", "_____no_output_____" ], [ "#--------------------------------------------------------------------\n## Cleaning the test data as per the cleaning technique of train data\n#--------------------------------------------------------------------\n\ntest['text']=test.swifter.apply(lambda x:pre_process(x['text']),axis=1)", "_____no_output_____" ], [ "#--------------------------------------------------------------------\n## Transforming the text into vector tfidf vectorizer with chi-sqaure\n#--------------------------------------------------------------------\n\n\ntest_matrix= tvec.transform(test['text'])\ntest_matrix = ch.transform(test_matrix)", "_____no_output_____" ], [ "#---------------------------------------------------------------------\n## predicting the labels, storing it as label column in test dataframe\n#---------------------------------------------------------------------\n\ntest['label'] = pd.DataFrame(clf.predict(test_matrix))", "_____no_output_____" ], [ "#-----------------------------------------------------------\n## dropping all other columns keeping only test_id and label\n#-----------------------------------------------------------\n\ntest=test[['test_id','label']]\n\n############################################################\n\n#--------------------------------\n#Converting the dataframe to csv\n#--------------------------------\n\ntest.to_csv('predict_label.csv',index=False)", "_____no_output_____" ] ], [ [ "# References", "_____no_output_____" ], [ ".[1] https://drive.google.com/file/d/0B1yuv8YaUVlZZ1RzMFJmc1ZsQmM/view \n[2] https://github.com/tthustla/twitter_sentiment_analysis_part8/blob/master/Capstone_part4-Copy6.ipynb", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cbc647677a020da1b0ae37d3d818a1f6664c83df
5,207
ipynb
Jupyter Notebook
ipynb/Germany-Sachsen-Anhalt-LK-Mansfeld-Südharz.ipynb
oscovida/oscovida.github.io
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
[ "CC-BY-4.0" ]
2
2020-06-19T09:16:14.000Z
2021-01-24T17:47:56.000Z
ipynb/Germany-Sachsen-Anhalt-LK-Mansfeld-Südharz.ipynb
oscovida/oscovida.github.io
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
[ "CC-BY-4.0" ]
8
2020-04-20T16:49:49.000Z
2021-12-25T16:54:19.000Z
ipynb/Germany-Sachsen-Anhalt-LK-Mansfeld-Südharz.ipynb
oscovida/oscovida.github.io
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
[ "CC-BY-4.0" ]
4
2020-04-20T13:24:45.000Z
2021-01-29T11:12:12.000Z
31.179641
201
0.531208
[ [ [ "# Germany: LK Mansfeld-Südharz (Sachsen-Anhalt)\n\n* Homepage of project: https://oscovida.github.io\n* Plots are explained at http://oscovida.github.io/plots.html\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Sachsen-Anhalt-LK-Mansfeld-Südharz.ipynb)", "_____no_output_____" ] ], [ [ "import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")", "_____no_output_____" ], [ "%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *", "_____no_output_____" ], [ "overview(country=\"Germany\", subregion=\"LK Mansfeld-Südharz\", weeks=5);", "_____no_output_____" ], [ "overview(country=\"Germany\", subregion=\"LK Mansfeld-Südharz\");", "_____no_output_____" ], [ "compare_plot(country=\"Germany\", subregion=\"LK Mansfeld-Südharz\", dates=\"2020-03-15:\");\n", "_____no_output_____" ], [ "# load the data\ncases, deaths = germany_get_region(landkreis=\"LK Mansfeld-Südharz\")\n\n# get population of the region for future normalisation:\ninhabitants = population(country=\"Germany\", subregion=\"LK Mansfeld-Südharz\")\nprint(f'Population of country=\"Germany\", subregion=\"LK Mansfeld-Südharz\": {inhabitants} people')\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 1000 rows\npd.set_option(\"max_rows\", 1000)\n\n# display the table\ntable", "_____no_output_____" ] ], [ [ "# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Sachsen-Anhalt-LK-Mansfeld-Südharz.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook", "_____no_output_____" ], [ "# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------", "_____no_output_____" ] ], [ [ "print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")", "_____no_output_____" ], [ "# to force a fresh download of data, run \"clear_cache()\"", "_____no_output_____" ], [ "print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
cbc6488b7ac509d586bf6a2bd4ed64083085153a
4,462
ipynb
Jupyter Notebook
notebooks/tables/name.ipynb
source-foundry/opentype-notes
5605eaf3cb2dd323dc663392ad93507f888f3a4b
[ "CC-BY-4.0" ]
null
null
null
notebooks/tables/name.ipynb
source-foundry/opentype-notes
5605eaf3cb2dd323dc663392ad93507f888f3a4b
[ "CC-BY-4.0" ]
36
2019-05-04T20:21:28.000Z
2019-05-04T20:46:59.000Z
notebooks/tables/name.ipynb
source-foundry/opentype-notes
5605eaf3cb2dd323dc663392ad93507f888f3a4b
[ "CC-BY-4.0" ]
null
null
null
20.850467
212
0.521739
[ [ [ "# name Table\n\n<a href=\"https://colab.research.google.com/github/source-foundry/opentype-notes/blob/master/notebooks/tables/name.ipynb\">\n <img style=\"margin-left:0;margin-top:15px\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n</a>\n\n## Description\n\nThe name table includes platform-specific localized records of font metadata. These records are organized by platform ID, platform endcoding ID, and language ID. There are 26 defined name record fields.\n\n## Documentation\n\n- [Apple Specification](https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html)\n- [Microsoft Specification](https://docs.microsoft.com/en-us/typography/opentype/spec/name)\n", "_____no_output_____" ], [ "## Source\n", "_____no_output_____" ], [ "### Settings\n\nChange the paths below to view the table in a different font.\n", "_____no_output_____" ] ], [ [ "FONT_URL = \"https://github.com/source-foundry/opentype-notes/raw/master/assets/fonts/roboto/Roboto-Regular.ttf\"\nFONT_PATH = \"Roboto-Regular.ttf\"\n", "_____no_output_____" ] ], [ [ "### Setup\n", "_____no_output_____" ] ], [ [ "import os\n\ntry:\n import fontTools\nexcept ImportError:\n !pip install fontTools\n\nif not os.path.exists(FONT_PATH):\n !curl -L -O {FONT_URL}\n", "_____no_output_____" ] ], [ [ "### View Table\n", "_____no_output_____" ] ], [ [ "!ttx -t name -o - {FONT_PATH}\n", "_____no_output_____" ] ], [ [ "### Read/Write Access to Table\n\n- [fontTools `_n_a_m_e.py` module](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ttLib/tables/_n_a_m_e.py)\n", "_____no_output_____" ] ], [ [ "import inspect\nfrom fontTools.ttLib import TTFont\n\n# instantiate table object\ntt = TTFont(FONT_PATH)\ntable = tt[\"name\"]\n\n# print table methods\nprint(\"Printing methods of {}:\".format(table))\nmethods = inspect.getmembers(table, predicate=inspect.ismethod)\nmethods_list = [method[0] for method in methods]\nfor x in sorted(methods_list):\n print(x)\n", "_____no_output_____" ] ], [ [ "### Cleanup\n", "_____no_output_____" ] ], [ [ "!rm {FONT_PATH}\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc649c66f6d21cf578e1a6cc3d1dc8a6402a530
172,548
ipynb
Jupyter Notebook
pesquisa oportunidade odi jtbd.ipynb
renatoac/jupyternotebooks
f78e624907712780e78ead6de40892fde8a93a23
[ "MIT" ]
null
null
null
pesquisa oportunidade odi jtbd.ipynb
renatoac/jupyternotebooks
f78e624907712780e78ead6de40892fde8a93a23
[ "MIT" ]
null
null
null
pesquisa oportunidade odi jtbd.ipynb
renatoac/jupyternotebooks
f78e624907712780e78ead6de40892fde8a93a23
[ "MIT" ]
null
null
null
39.693582
14,660
0.266001
[ [ [ "import pandas as pd\nimport re\ndata = pd.read_csv('pesquisausuarios.csv')\n\ndf_oportunidade = pd.DataFrame()\n\nfor column in data.columns:\n if \"Satisfacao\" in column:\n m = re.search('Satisfacao (.*)', column)\n new_col = m.group(1)\n df_oportunidade[\"Oportunidade \" + new_col] = data.apply(lambda row: row[\"Importancia \" + new_col] + max(int(row[\"Importancia \" + new_col] - row[\"Satisfacao \" + new_col]),0), axis=1)\n\ndf_oportunidade", "_____no_output_____" ] ], [ [ "# Descobre os clusters", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\n\ncluster = KMeans(n_clusters=2)\ndata['Cluster'] = cluster.fit_predict(data.iloc[:,2:])\n\ndata", "_____no_output_____" ] ], [ [ "# Agrupa por Cluster", "_____no_output_____" ] ], [ [ "df = data.iloc[:,1:].groupby(['Cluster'], as_index = False).mean()\ndf", "_____no_output_____" ] ], [ [ "# Agrupa outcomes e cluster", "_____no_output_____" ] ], [ [ "# Transforma colunas de Outcome em linhas, agrupando por Outcome e Cluster\noutcomes = pd.melt(df, id_vars=[('Cluster')])\noutcomes\n\nImportancia = outcomes[outcomes.variable.str.contains(\"Importancia.*\")]\nSatisfacao = outcomes[outcomes.variable.str.contains(\"Satisfacao.*\")]\n", "_____no_output_____" ] ], [ [ "# Descobre Outcomes atrativos", "_____no_output_____" ] ], [ [ "new = {'Outcome': Importancia['variable']}\ndf_segmento = pd.DataFrame(data=new)\ndf_segmento['Cluster'] = Importancia['Cluster']\ndf_segmento['Satisfacao'] = Satisfacao['value'].values #ler https://stackoverflow.com/a/26221919\ndf_segmento['Importancia'] = Importancia['value']\ndf_segmento.tail()", "_____no_output_____" ] ], [ [ "# Calcular oportunidade e segmento de oportunidade", "_____no_output_____" ] ], [ [ "def calcular_oportunidade_segmento(row):\n row['Oportunidade'] = row['Importancia'] + (row['Importancia'] - row['Satisfacao'])\n if row['Oportunidade'] > 15.0:\n row['Segmento_oportunidade'] = 'Muito atrativo'\n elif row['Oportunidade'] > 10.0 and row['Oportunidade'] < 15.0:\n row['Segmento_oportunidade'] = 'Atrativo'\n else:\n row['Segmento_oportunidade'] = 'Não atrativo'\n return row\n\ndf_segmento = df_segmento.apply(calcular_oportunidade_segmento, axis=1)\ndf_segmento.tail()\n\n", "_____no_output_____" ], [ "from ggplot import *\nimport matplotlib.pyplot as plt \nimport seaborn as sns\n\nggplot(df_segmento, aes(x='Satisfacao', y='Importancia', color='Cluster')) + \\\n geom_point(size=75) + \\\n ggtitle(\"Customers Grouped by Cluster\") + \\\n xlim(1, 10) + \\\n ylim(1, 10)\n\ng = sns.FacetGrid(df_segmento, hue=\"Cluster\", size=6)\ng.map(plt.scatter, \"Satisfacao\", \"Importancia\", s=50, alpha=.7, linewidth=.5, edgecolor=\"white\")\ng.set(xlim=(1, 10), ylim=(1, 10));\ng.add_legend();\n", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nfrom factor_analyzer import FactorAnalyzer\nfa = FactorAnalyzer()\n\nfa.analyze(df_oportunidade.iloc[:,1:-2], 2, rotation='varimax', method='MINRES')\nnew_df = fa.loadings\n\n#new_df.loc[new_df['Factor1'] < 0.1, 'Factor1'] = np.nan\n#new_df.loc[new_df['Factor2'] < 0.1, 'Factor2'] = np.nan\n#new_df.loc[new_df['Factor3'] < 0.1, 'Factor3'] = np.nan\n#new_df.loc[new_df['Factor4'] < 0.1, 'Factor4'] = np.nan\n\nnew_df[(new_df.Factor1 > 0.1) | (new_df.Factor2 > 0.1)]\n\n# Keep in mind that each of the identified factors should have at least three variables \n# with high factor loadings, and that each variable should load highly on only one factor.\n\nfa.get_factor_variance()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cbc671034e32065db0ae87c32ebbec247fbf803a
108,939
ipynb
Jupyter Notebook
Fentes_Young/Fentes_Young_OCaml_sujet.ipynb
kf71/Probabilite_avec_OCaml
3966a154217d6ca6abbf2223823615ad8ae0e64b
[ "MIT" ]
null
null
null
Fentes_Young/Fentes_Young_OCaml_sujet.ipynb
kf71/Probabilite_avec_OCaml
3966a154217d6ca6abbf2223823615ad8ae0e64b
[ "MIT" ]
null
null
null
Fentes_Young/Fentes_Young_OCaml_sujet.ipynb
kf71/Probabilite_avec_OCaml
3966a154217d6ca6abbf2223823615ad8ae0e64b
[ "MIT" ]
null
null
null
59.302667
1,138
0.635172
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cbc697aecd73117bc24c8ad1b941837ff95e0ea2
97,434
ipynb
Jupyter Notebook
ajoens/BurstCube_grbrates_simulation.ipynb
BurstCube/Users
4df31e695ec6677800bf1f3feaf7864e2785a6a4
[ "MIT" ]
null
null
null
ajoens/BurstCube_grbrates_simulation.ipynb
BurstCube/Users
4df31e695ec6677800bf1f3feaf7864e2785a6a4
[ "MIT" ]
null
null
null
ajoens/BurstCube_grbrates_simulation.ipynb
BurstCube/Users
4df31e695ec6677800bf1f3feaf7864e2785a6a4
[ "MIT" ]
1
2018-07-23T19:18:05.000Z
2018-07-23T19:18:05.000Z
139.991379
50,548
0.867018
[ [ [ "import numpy as np\nimport matplotlib.pylab as plot\nfrom astropy.io import ascii,fits\nfrom scipy import interpolate\nimport grb_catalogs_copy\nfrom BurstCube.LocSim.Detector import *\nfrom BurstCube.LocSim.Spacecraft import *\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nfrom scipy.optimize import curve_fit\nimport math\nfrom astropy.table import Table\nimport pandas as pd", "_____no_output_____" ], [ "## code to use when reading in GBM effective area in order to get data into the desired format\ndef getGBMdata(gbmfile=None):\n \"\"\"Reads the GBM NaI effective area file and returns a numpy array\n with two columns ``energy`` and ``aeff``.\n Parameters\n ----------\n gbmfile : string\n Name of file that contains the GBM data.\n Returns\n ----------\n gbmdata : array \n numpy array with two columns ``energy`` and ``aeff``\n \"\"\"\n \n return np.genfromtxt(gbmfile,skip_header=2,names=('energy', 'aeff'))\n", "_____no_output_____" ], [ "## bit of useful code for interpolating in log space\ndef loginterpol(x,y,x1):\n\n f=interpolate.interp1d(np.log10(x),np.log10(y),bounds_error=False,fill_value=\"extrapolate\",kind='linear')\n y1=10**f(np.log10(x1))\n\n return y1\n\ndef loginterpol2d(x,y,z,x1,y1):\n\n wz=np.where(z==0)[0]\n zz=z\n zz[wz]=1.\n f=interpolate.interp2d(x,y,np.log10(zz),bounds_error=False,fill_value=\"extrapolate\",kind='linear')\n z1=10**f(x1,y1)", "_____no_output_____" ], [ "#read in GBM Trigger Catalog\ntrigfit=fits.open('gbmtrigcat.fits')\ntrig=trigfit[1].data\n\n#read in GBM Burst Catalog\ngbmfit=fits.open('gbmgrbcat_copy.fits')\ngbm=gbmfit[1].data\n", "_____no_output_____" ], [ "## generate random positions on the sky with equal area probability\ndef random_sky(n=1):\n\n u=np.random.rand(n)\n v=np.random.rand(n)\n\n phi=2*np.pi*u\n theta=np.arccos(2*v-1.)\n\n dec=-np.degrees(theta-np.pi/2.)\n ra=np.degrees(np.pi*2-phi)\n\n return ra,dec", "_____no_output_____" ], [ "#function to match GRBs in the Trigger catalog to those in the grb catalog so that we can create an array of the grbs in both\n#We will use the trigger timescale found in the trigger catalog \ndef match_catalogs_name(name1,name2):\n\n ind_dict = dict((q,e) for e,q in enumerate(name1))\n inter = set(ind_dict).intersection(name2)\n m1 = [ind_dict[element] for element in inter]\n print(np.shape(m1))\n \n ind_dict = dict((q,e) for e,q in enumerate(name2))\n inter = set(ind_dict).intersection(name1)\n m2 = [ind_dict[element] for element in inter]\n print(np.shape(m2))\n \n return m1,m2\n", "_____no_output_____" ], [ "#ordering the trig and gbm catalog so that they are in the same order\nso=np.argsort(np.array(trig['NAME']))\ntrig=trig[so]\nso=np.argsort(np.array(gbm['NAME']))\ngbm=gbm[so]\n\n#creating array of grbs that are found in both catalogs\nm1, m2 = match_catalogs_name(trig['NAME'],gbm['NAME'])\n\n#defining our two samples of bursts that are found in both catalogs so that we can utilize them further down\ntrigbursts = trig[m1]\ngbmbursts = gbm[m2]\nprint(gbmbursts['NAME'])", "(2352,)\n(2352,)\n['GRB091017861' 'GRB150822178' 'GRB150721242' ... 'GRB140626843'\n 'GRB150628767' 'GRB110410772']\n" ], [ "## read in the GBM Aeff\naeff_gbm = getGBMdata('/home/alyson/NASA/Simulation/BurstCube/Users/ajoens/gbm_effective_area.dat')", "_____no_output_____" ], [ "## read in BurstCube Aeff for various BC configurations\nfile='/home/alyson/NASA/Simulation/BurstCube/Users/jracusin/BC_eff_area_curves.ecsv'\nbcaeffs=ascii.read(file,format='ecsv')", "_____no_output_____" ], [ "## separate GBM short & long GRBs\nw=np.where(gbmbursts['FLUX_1024']>0)\ngbmbursts=gbmbursts[w]\ns=np.where((gbmbursts['T90'] <= 2.)&((gbmbursts['PFLX_SPECTRUM_STOP']-gbmbursts['PFLX_SPECTRUM_START'])>0))[0]\nl=np.where(gbmbursts['T90'] > 2.)[0]\nm=np.where(gbmbursts['PFLX_BEST_FITTING_MODEL'][s] == ' ')", "_____no_output_____" ], [ "## grab short GRBs with peak spectral info & plot all of the Aeff curves\nbceng=bcaeffs['keV']\nbcengdiff=bceng[1:]-bceng[0:-1]\nw=np.where(bcengdiff<0)[0]\nnsims=len(w)\nw=np.append(-1,w)#,len(eng))\nfor i in range(nsims):\n plot.plot(bcaeffs['keV'][w[i]+1:w[i+1]+1],bcaeffs['aeff'][w[i]+1:w[i+1]+1])\nplot.xscale('log')\nplot.yscale('log')\nplot.xlabel('Energy (keV)')\nplot.ylabel(r'Effective Area (cm$^2$)')\nplot.plot(aeff_gbm['energy'],aeff_gbm['aeff'])\ni=0\ngbmae=loginterpol(aeff_gbm['energy'],aeff_gbm['aeff'],bceng[w[i]+1:w[i+1]+1])\nplot.plot(bceng[w[i]+1:w[i+1]+1],gbmae)\n\nplot.show()", "_____no_output_____" ], [ "## grab energies from those curves and create an array of the energies\nE=np.array(bceng[w[i]+1:w[i+1]+1])\nprint(E)", "[ 50. 75.1462 112.939 169.739 255.105 383.404\n 576.227 866.025 1301.57 1956.16 2939.97 4418.55\n 6640.74 9980.54 15000. ]\n" ], [ "#Integrating the best fit spectrum for each GRB in the energy range of 50-300 KeV to get max. observed photon flux. \n#Doing the same but also folding in the effective area in order to get count rate.\n#This will give us the photon flux in units of ph/cm^2/s. \nmo=gbmbursts['PFLX_BEST_FITTING_MODEL'][s]\nbcpf=np.zeros(len(s)) \npf=np.zeros(len(s))\ngbmcr=np.zeros(len(s))\nbccr=np.zeros(len(s))\noutE=np.logspace(np.log10(50),np.log10(300),100) # returns numbers spaced evenly on a log scale\nfor i in range(len(s)):\n for j in range(nsims):\n E=np.array(bceng[w[j]+1:w[j+1]+1])\n AeffBC=loginterpol(E,bcaeffs['aeff'][w[j]+1:w[j+1]+1],outE)\n AeffGBM=loginterpol(aeff_gbm['energy'],aeff_gbm['aeff'],outE) #eng[w[j]+1:w[j+1]+1]) \n Aratio=(AeffBC/AeffGBM)\n \n # this should give us an array of the maximum observed photon flux for GBM\n if mo[i]=='PFLX_PLAW':\n gbmcr[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]])*AeffGBM,outE)\n pf[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]]),outE)\n bccr[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]])*AeffGBM*Aratio,outE)\n bcpf[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]])*Aratio,outE)\n \n if mo[i]=='PFLX_COMP':\n gbmcr[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]])*AeffGBM,outE)\n pf[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]]),outE)\n bccr[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]])*AeffGBM*Aratio,outE)\n bcpf[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]])*Aratio,outE)\n \n if mo[i]=='PFLX_BAND':\n gbmcr[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]])*AeffGBM,outE)\n pf[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]]),outE)\n bccr[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]])*AeffGBM*Aratio,outE)\n bcpf[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]])*Aratio,outE)\n\n if mo[i]=='PFLX_SBPL':\n gbmcr[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]])*AeffGBM,outE)\n pf[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]]),outE)\n bccr[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]])*AeffGBM*Aratio,outE)\n bcpf[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]])*Aratio,outE)\n\n", "_____no_output_____" ], [ "#plot Batse[64] against pf to see if they are the same\nflux=gbmbursts['FLUX_BATSE_64'][s]", "(339,)\n" ], [ "#define probability\n#p = np.array((np.arange(pf.shape[0])+1)**(-1.0))\np = np.array((np.arange(pf.shape[0])+1.05)**(-0.5))\np=p/sum(p)\n#randomly sample from the array of photon fluxes found above using our probability function found above so we draw more low flux bursts\n#creating our \"intrinsic\" sample\nr=np.random.choice(pf.shape[0], 1200, replace=True, p=p)\n\n\nsimgbmpfsample = np.array(pf[r])\nsimgbmcr = np.array(gbmcr[r])\nsimbcpfsample = np.array(bcpf[r])\nsimbccr = np.array(bccr[r])\n", "[1.65912578 2.11286919 1.40993712 ... 2.30111001 1.84464744 2.67858226]\n[0.8233 1.04303742 0.70100117 ... 1.14124665 0.95504148 1.33609964]\n" ], [ "#examining our probability distribution to be sure it is performing the eay we intend it to\nprint(min(p),max(p))\nplot.hist(p)", "0.0015372087534047084 0.02762295103904951\n" ], [ "## setup GBM\ngbm_pointings = {'01': ('45:54:0','20:36:0'),\n '02': ('45:6:0','45:18:0'),\n '03': ('58:24:0','90:12:0'),\n '04': ('314:54:0','45:12:0'),\n '05': ('303:12:0','90:18:0'),\n '06': ('3:24:0','89:48:0'),\n '07': ('224:54:0','20:24:0'),\n '08': ('224:36:0','46:12:0'),\n '09': ('236:36:0','90:0:0'),\n '10': ('135:12:0','45:36:0'),\n '11': ('123:42:0','90:24:0'),\n '12': ('183:42:0','90:18:0')}\n\nfermi = Spacecraft(gbm_pointings,window=0.1)\n\nres = 250\nrr,dd = np.meshgrid(np.linspace(0,360,res,endpoint=False),np.linspace(-90,90,res))\nexposure_positions = np.vstack([rr.ravel(),dd.ravel()])\ngbm_exposures = np.array([[ detector.exposure(position[0],position[1]) for position in exposure_positions.T] \n for detector in fermi.detectors])", "_____no_output_____" ], [ "## setup BurstCube\npointings = {'01': ('0:0:0','45:0:0'),\n '02': ('90:0:0','45:0:0'),\n '03': ('180:0:0','45:0:0'),\n '04': ('270:0:0','45:0:0')}\nburstcube = Spacecraft(pointings,window=0.1)\n\nres = 250\nrr,dd = np.meshgrid(np.linspace(0,360,res,endpoint=False),np.linspace(-90,90,res))\nexposure_positions = np.vstack([rr.ravel(),dd.ravel()])\nexposures = np.array([[ detector.exposure(position[0],position[1]) for position in exposure_positions.T] \n for detector in burstcube.detectors])", "_____no_output_____" ], [ "#using SkyCoord to convert coordinates to degrees and solve for distances.\ndef separation(ra1,dec1,ra2,dec2):\n\n c=SkyCoord(ra=ra1*u.deg,dec=dec1*u.deg)\n d=SkyCoord(ra=ra2*u.deg,dec=dec2*u.deg)\n dist=c.separation(d)\n dist=dist.value\n\n return dist", "_____no_output_____" ], [ "# now that GBM and BurstCube's pointings are set up we will throw GRBs at it and determine the exposure for each GRB. \n#generate GRBs and throw them at GBM\n\ndef throw_grbs(fermi,minflux,maxflux):\n \n nsims=int(np.round(len(simgbmpfsample))) \n ra,dec=random_sky(nsims)\n ra=np.array(ra)-180\n dec=np.array(dec)\n \n\n #GBM and BurstCube exposures for each random GRB.\n randgbmexposures = np.array([[detector.exposure(ra[i],dec[i]) for i in range(nsims)] for detector in fermi.detectors])\n randbcexposures = np.array([[detector.exposure(ra[i],dec[i]) for i in range(nsims)] for detector in burstcube.detectors])\n \n #Order randgbmexposures into descending order\n for column in randgbmexposures.T:\n newrandgbm = -np.sort(-randgbmexposures.T) \n gbmexposures = np.transpose(newrandgbm)\n \n for col in randbcexposures.T:\n newrandbc = -np.sort(-randbcexposures.T) \n bcexposures = np.transpose(newrandbc)\n\n \n #Select the second highest exposure value. \n #We will use this to ensure the second highest exposure detector has a sig >4.5\n secondhighestgbm = gbmexposures[1,:]\n secondhighestbc = bcexposures[1,:]\n \n return gbmexposures, bcexposures, secondhighestgbm, secondhighestbc, randgbmexposures, randbcexposures", "_____no_output_____" ], [ "#define the peak flux interval using the trigger catalog\nmsinterval = trigbursts['Trigger_Timescale'][s] \ninterval = msinterval/1000\n", "_____no_output_____" ], [ "#flux=simpf this is in ph/sec\nflux=simgbmpfsample\nminflux=min(flux)\nmaxflux=max(flux)\ngbmexposures, bcexposures, secondhighestgbm, secondhighestbc, randgbmexposures, randbcexposures = throw_grbs(fermi,minflux,maxflux)\n\n", "<class 'int'>\n" ], [ "#Solve for the number of detected counts which will equal our source photons\nsourcegbm = simgbmcr*secondhighestgbm*interval[r]\nsourcebc = simbccr*secondhighestbc*interval[r]", "_____no_output_____" ], [ "#Assuming a background count rate. units: cts/s\nbckgrd=300\n\n#scale the background count rate for the second highest detector\nscaledgbmbckgrd = bckgrd*secondhighestgbm*interval[r]\nscaledbcbckgrd = bckgrd*secondhighestbc*interval[r]\n", "_____no_output_____" ], [ "#creating an array of zeros that I can manipulate to create an array of detected GRBs\ndetectgbm = np.zeros(len(simgbmpfsample))\ndetectbc = np.zeros(len(simbcpfsample))\n\n#calculate the significance of the second highest exposure detector. If the significance is greater than 4.5 sigma than the burst is detectable.\nfor u in range(len(simgbmpfsample)):\n sig = sourcegbm[u] / (math.sqrt(sourcegbm[u] + scaledgbmbckgrd[u]))\n if sig > 4.5:\n detectgbm[u] = 1.0\n else:\n detectgbm[u] = 0.0\n\nfor j in range(len(simbcpfsample)):\n sig = sourcebc[j] / (math.sqrt(sourcebc[j] + scaledbcbckgrd[j]))\n if sig > 4.5:\n detectbc[j] = 1.0\n else:\n detectbc[j] = 0.0\n", "/home/alyson/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:7: RuntimeWarning: invalid value encountered in double_scalars\n import sys\n/home/alyson/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:14: RuntimeWarning: invalid value encountered in double_scalars\n \n" ], [ "#Creating plot of peak flux versus counts for real and simulated GBM\nw=np.where(pf>0)[0]\nwg = np.where(simgbmcr*detectgbm>0)[0]\nwbc = np.where(simbccr*detectbc>0)[0]\n\n\nfig=plot.figure(figsize=(20,5))\nplot.subplot(1,2,1)\nplot.hist(gbmcr[w],label='real GBM',bins=np.logspace(1,6,40),color='orange')\nplot.hist(simgbmcr[wg],label='Simulated GBM',bins=np.logspace(1,6,40),alpha=0.7,color='blue')\nplot.hist(simbccr[wbc],label='Simulated BurstCube',bins=np.logspace(1,6,40),alpha=0.7,color='green')\nplot.xscale('log')\nplot.legend()\nplot.subplot(1,2,2)\n#plot.hist(flux,label='All',bins=np.logspace(-1,2,40),color='green')\n#pf has been gathered from the GBM catalog\nplot.hist(pf[w],label='real GBM',bins=np.logspace(-1,4,40),color='orange')\n# this is the simulated GBM\nplot.hist(simgbmpfsample[wg],label='Simulated GBM',bins=np.logspace(-1,4,40),alpha=0.7,color='blue')\nplot.hist(simbcpfsample[wbc],label='Simulated BurstCube',bins=np.logspace(-1,4,40),alpha=0.7,color='green')\n#plot.hist(flux[w],label='BC',bins=np.logspace(-1,2,40),alpha=0.7,color='red')\nplot.xscale('log')\nplot.legend()\n\nplot.show()\n ", "_____no_output_____" ], [ "#solve for the detection fraction of BurstCube and Simulated GBM\ndetgbm = np.where(detectgbm == 1)[0]\nratiogbm = len(detgbm) / len(detectgbm)\nprint(ratiogbm)\n\ndetbc = np.where(detectbc == 1)[0]\nratiobc = len(detbc) / len(detectbc)\nprint(ratiobc)\n\n#number of bursts BurstCube will see a year\nbcbursts = ratiobc/ratiogbm *40\nprint(bcbursts)", "0.3475\n0.15916666666666668\n18.321342925659472\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc6a21d8e961344260cf8b53bd7d5edea5dbbf4
1,849
ipynb
Jupyter Notebook
tanmay/leetcode/valid-parentheses.ipynb
Aryan284/competitive-coding
af56e4fccfb209c6a616b913621a898f326f92c5
[ "MIT" ]
null
null
null
tanmay/leetcode/valid-parentheses.ipynb
Aryan284/competitive-coding
af56e4fccfb209c6a616b913621a898f326f92c5
[ "MIT" ]
null
null
null
tanmay/leetcode/valid-parentheses.ipynb
Aryan284/competitive-coding
af56e4fccfb209c6a616b913621a898f326f92c5
[ "MIT" ]
2
2020-10-07T13:48:02.000Z
2022-03-31T16:10:36.000Z
27.191176
71
0.339102
[ [ [ "# https://leetcode.com/problems/valid-parentheses/submissions/\nclass Solution:\n def isValid(self, s: str) -> bool:\n arr = []\n for i in s:\n if i == \"(\":\n arr.append(i)\n if i == \"{\":\n arr.append(i)\n if i == \"[\":\n arr.append(i)\n if i == \")\":\n if not len(arr):\n return False\n a = arr.pop()\n if a != \"(\":\n return False\n if i == \"}\":\n \n if not len(arr):\n return False\n a = arr.pop()\n if a != \"{\":\n return False\n if i == \"]\":\n \n if not len(arr):\n return False\n a = arr.pop()\n if a != \"[\":\n return False\n if len(arr):\n return False\n return True", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cbc6b7fec0213a809c062d2059c64dbc4f67d33e
339,435
ipynb
Jupyter Notebook
Aircondition_MRO/HNA_Survival_Analysis/10-HNA-Stephen-398908-3.ipynb
luoshuangyang/test1
30a4a2646a8d4739fb74aa54c1534bf8cd43b50c
[ "Apache-2.0" ]
null
null
null
Aircondition_MRO/HNA_Survival_Analysis/10-HNA-Stephen-398908-3.ipynb
luoshuangyang/test1
30a4a2646a8d4739fb74aa54c1534bf8cd43b50c
[ "Apache-2.0" ]
null
null
null
Aircondition_MRO/HNA_Survival_Analysis/10-HNA-Stephen-398908-3.ipynb
luoshuangyang/test1
30a4a2646a8d4739fb74aa54c1534bf8cd43b50c
[ "Apache-2.0" ]
null
null
null
124.517608
30,808
0.7967
[ [ [ "# Part1", "_____no_output_____" ] ], [ [ "from __future__ import unicode_literals\n\nimport pandas as pd\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom matplotlib import font_manager\nfrom matplotlib.font_manager import FontProperties \n\nfont = FontProperties(fname=r\"/root/anaconda2/envs/python3/lib/python3.6/site-packages/matplotlib/mpl-data/fonts/ttf/msyh.ttf\")\n\nimport numpy as np\n\nfrom sksurv.nonparametric import kaplan_meier_estimator\nfrom sksurv.preprocessing import OneHotEncoder\nfrom sksurv.linear_model import CoxnetSurvivalAnalysis#CoxPHSurvivalAnalysis\nfrom sksurv.linear_model import CoxPHSurvivalAnalysis\nfrom sksurv.metrics import concordance_index_censored\nfrom sksurv.metrics import concordance_index_ipcw\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV", "_____no_output_____" ], [ "data1 = pd.read_csv(\"398908-3.csv\", encoding = \"GB2312\")\n\n#data1 = data1[data1[\"部件装上使用小时数\"]!=\"00:00\"]\ndata1[\"部件本次装机使用小时\"] = data1[\"部件本次装机使用小时\"].str.split(':').str[0].astype(int)\ndata1 = data1[data1[\"部件本次装机使用小时\"]>0]\n\ndata1[\"IsPlanned\"] = data1[\"非计划\"]==\"X\"\nprint(data1[\"IsPlanned\"].value_counts())\ndata_y = data1[[\"IsPlanned\", \"部件本次装机使用小时\"]]\ndata_y[\"部件本次装机使用小时\"].hist(bins=12, range=(0,60000))\n\ndata1[\"IsPlaneNew\"] = data1[\"部件装上飞行小时数\"]==\"00:00\"\ndata1[\"IsPartNew\"] = data1[\"部件装上使用小时数\"]==\"00:00\"\ndef CheckNew(p1,p2):\n if p1 and p2:\n return \"PlaneNew-PartNew\"\n elif p1 and not p2:\n return \"PlaneNew-PartOld\"\n elif not p1 and p2:\n return \"PlaneOld-PartNew\"\n elif not p1 and not p2:\n return \"PlaneOld-PartOld\"\n\n#print([CheckNew(row[\"IsPlaneNew\"], row[\"IsPartNew\"]) for idx, row in data1.iterrows()])\ndata1[\"PlanePartType\"] = [CheckNew(row[\"IsPlaneNew\"], row[\"IsPartNew\"]) for idx, row in data1.iterrows()]\n\ndata1[\"安装日期\"] = pd.to_datetime(data1[\"安装日期\"])\ndata1[\"安装年度\"] = data1[\"安装日期\"].dt.year\n\ndi = {\"霍尼韦尔\": \"HONEYWELL\"}\ndata1.replace({\"最近送修公司\": di}, inplace=True)\ndata1[\"最近送修公司\"].fillna(\"Unknown\", inplace=True)\n\ndata1[\"FH TSN\"].fillna(\"00:00\", inplace=True)\ndata1[\"部件装上飞行小时数\"] = data1[\"部件装上飞行小时数\"].str.split(':').str[0].astype(int)\ndata1[\"部件装上使用小时数\"] = data1[\"部件装上使用小时数\"].str.split(':').str[0].astype(int)\n\ndata1[\"部件装上飞行小时数-Range\"] = pd.cut(data1['部件装上飞行小时数'], 8)\n#data1[\"部件装上飞行循环数-Range\"] = pd.cut(data1['部件装上飞行循环数'], 8)\ndata1[\"部件装上使用小时数-Range\"] = pd.cut(data1['部件装上使用小时数'], 8)\n#data1[\"部件装上使用循环数-Range\"] = pd.cut(data1['部件装上使用循环数'], 8)\ndata1[\"CY TSN-Range\"] = pd.cut(data1['CY TSN'], 8)\ndata1[\"FH TSN-Range\"] = pd.cut(data1['FH TSN'], 8)\n\n#data_x = data1[[\"机型\",\"制造序列号\",\"机号\",\"参考类型\",\"指令类型\",\"序号\",\"拆换原因\",\"部件装上飞行循环数\",\"部件装上使用循环数\",\n# \"部件拆下飞行循环数\",\"部件拆下使用循环数\",\"装上序号\",\"最近送修公司\",\"CY TSN\",\"FH TSN\"]]\n#data_x = data1[[\"机型\",\"参考类型\",\"指令类型\",\"拆换原因\",\"部件装上飞行循环数\",\"部件装上使用循环数\",\n# \"部件拆下飞行循环数\",\"部件拆下使用循环数\",\"CY TSN\",\"FH TSN\"]]\ndata_x = data1[[\"机型\",\"安装年度\",\"部件装上飞行小时数-Range\",\"部件装上使用小时数-Range\",\"FH TSN-Range\", \"最近送修公司\",\"PlanePartType\"]]", "True 963\nFalse 142\nName: IsPlanned, dtype: int64\n" ], [ "time, survival_prob = kaplan_meier_estimator(data_y[\"IsPlanned\"], data_y[\"部件本次装机使用小时\"])\nplt.step(time, survival_prob, where=\"post\")\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")", "_____no_output_____" ], [ "# \"机型\",\"拆换年度\",\"部件装上飞行小时数-Range\",\"部件装上飞行循环数-Range\",\"部件装上使用小时数-Range\",\"部件装上使用循环数-Range\",\"CY TSN-Range\",\"FH TSN-Range\", \"最近送修公司\"\n#col = \"机型\"\n#col = \"参考类型\"\ncol = \"PlanePartType\"\n#col = \"安装年度\"\n#col = \"机型\"\n\n#print((data_x[\"最近送修公司\"]!=\"上海航新\") & (data_x[\"最近送修公司\"]!=\"PP\"))\n\ny = data_y\nx = data_x\n\nfor value in x[col].unique():\n mask = x[col] == value\n time_cell, survival_prob_cell = kaplan_meier_estimator(y[\"IsPlanned\"][mask],\n y[\"部件本次装机使用小时\"][mask])\n plt.step(time_cell, survival_prob_cell, where=\"post\", label=\"%s (n = %d)\" % (value, mask.sum()))\n\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")\nplt.legend(loc=\"upper right\", prop=font)", "_____no_output_____" ], [ "# \"机型\",\"拆换年度\",\"部件装上飞行小时数-Range\",\"部件装上飞行循环数-Range\",\"部件装上使用小时数-Range\",\"部件装上使用循环数-Range\",\"CY TSN-Range\",\"FH TSN-Range\", \"最近送修公司\"\n#col = \"机型\"\n#col = \"参考类型\"\ncol = \"最近送修公司\"\n#col = \"安装年度\"\n#col = \"机型\"\n\n#print((data_x[\"最近送修公司\"]!=\"上海航新\") & (data_x[\"最近送修公司\"]!=\"PP\"))\n\nfilter1 = (data_x[\"最近送修公司\"]!=\"上海航新\") & (data_x[\"最近送修公司\"]!=\"PP\") & (data_x[\"最近送修公司\"]!=\"海航技术\")\ny = data_y[filter1]\nx = data_x[filter1]\n\nfor value in x[col].unique():\n mask = x[col] == value\n time_cell, survival_prob_cell = kaplan_meier_estimator(y[\"IsPlanned\"][mask],\n y[\"部件本次装机使用小时\"][mask])\n plt.step(time_cell, survival_prob_cell, where=\"post\", label=\"%s (n = %d)\" % (value, mask.sum()))\n\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")\nplt.legend(loc=\"upper right\", prop=font)", "_____no_output_____" ], [ "#data_x.select_dtypes(exclude=['int','int64' 'float']).columns\ndata_x.describe()", "_____no_output_____" ], [ "#\"部件装上飞行小时数-Range\",\"部件装上飞行循环数-Range\",\"部件装上使用小时数-Range\",\"部件装上使用循环数-Range\",\"CY TSN-Range\",\"FH TSN-Range\",\n#\nx = data_x.copy()\ncat_features = [\"机型\", \"安装年度\",\"部件装上飞行小时数-Range\",\"部件装上使用小时数-Range\",\"FH TSN-Range\", \"最近送修公司\",\"PlanePartType\"]\n\nfor col in cat_features:\n x[col] = x[col].astype('category')\n\ndata_x_numeric = OneHotEncoder().fit_transform(x[cat_features])\ndata_x_numeric.head()", "_____no_output_____" ], [ "null_columns=data1.columns[data1.isnull().any()]\ndata1[null_columns].isnull().sum()", "_____no_output_____" ], [ "#data_y = data_y.as_matrix()\ny = data_y.to_records(index=False)\nestimator = CoxPHSurvivalAnalysis() #CoxnetSurvivalAnalysis()\nestimator.fit(data_x_numeric, y)", "_____no_output_____" ], [ "#pd.Series(estimator.coef_, index=data_x_numeric.columns)", "_____no_output_____" ], [ "prediction = estimator.predict(data_x_numeric)\nresult = concordance_index_censored(y[\"IsPlanned\"], y[\"部件本次装机使用小时\"], prediction)\nprint(result[0])\nresult = concordance_index_ipcw(y, y, prediction)\nprint(result[0])", "0.7553380581255218\n0.7547391738249756\n" ], [ "def fit_and_score_features(X, y):\n n_features = X.shape[1]\n scores = np.empty(n_features)\n m = CoxnetSurvivalAnalysis()\n for j in range(n_features):\n Xj = X[:, j:j+1]\n m.fit(Xj, y)\n scores[j] = m.score(Xj, y)\n return scores\n\nscores = fit_and_score_features(data_x_numeric.values, y)\npd.Series(scores, index=data_x_numeric.columns).sort_values(ascending=False)", "_____no_output_____" ], [ "x_new = data_x_numeric.loc[[46,77,200,593]]\n#print(x_new)\ndata_x.loc[[46,77,200,593]]", "_____no_output_____" ], [ "y[[46,77,200,593]]", "_____no_output_____" ], [ "pred_surv = estimator.predict_survival_function(x_new)\nfor i, c in enumerate(pred_surv):\n plt.step(c.x, c.y, where=\"post\", label=\"Sample %d\" % (i + 1))\nplt.ylabel(\"est. probability of survival $\\hat{S}(t)$\")\nplt.xlabel(\"time $t$\")\nplt.legend(loc=\"best\")", "_____no_output_____" ], [ "pipe = Pipeline([('encode', OneHotEncoder()),\n ('select', SelectKBest(fit_and_score_features, k=3)),\n ('model', CoxPHSurvivalAnalysis())])", "_____no_output_____" ], [ "param_grid = {'select__k': np.arange(1, data_x_numeric.shape[1] -3)}\ngcv = GridSearchCV(pipe, param_grid=param_grid, return_train_score=True, cv=3, iid=True)\ngcv.fit(x, y)\n\npd.DataFrame(gcv.cv_results_).sort_values(by='mean_test_score', ascending=False)", "/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: all coefficients are zero, consider decreasing alpha.\n import sys\n" ], [ "pipe.set_params(**gcv.best_params_)\npipe.fit(x, y)\n\nencoder, transformer, final_estimator = [s[1] for s in pipe.steps]\npd.Series(final_estimator.coef_, index=encoder.encoded_columns_[transformer.get_support()])", "_____no_output_____" ] ], [ [ "# Part2", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nfrom sksurv.metrics import (concordance_index_censored,\n concordance_index_ipcw,\n cumulative_dynamic_auc)", "_____no_output_____" ], [ "data_x = data1[[\"安装年度\",\"部件装上飞行小时数\",\"部件装上使用小时数\",\"FH TSN\"]]", "_____no_output_____" ], [ "def df_to_sarray(df):\n \"\"\"\n Convert a pandas DataFrame object to a numpy structured array.\n This is functionally equivalent to but more efficient than\n np.array(df.to_array())\n\n :param df: the data frame to convert\n :return: a numpy structured array representation of df\n \"\"\"\n\n v = df.values\n cols = df.columns\n\n if False: # python 2 needs .encode() but 3 does not\n types = [(cols[i].encode(), df[k].dtype.type) for (i, k) in enumerate(cols)]\n else:\n types = [(cols[i], df[k].dtype.type) for (i, k) in enumerate(cols)]\n dtype = np.dtype(types)\n z = np.zeros(v.shape, dtype)\n for (i, k) in enumerate(z.dtype.names):\n z[:,i] = v[:, i]\n return z", "_____no_output_____" ], [ "y = data_y.to_records(index=False)\nx_train, x_test, y_train, y_test = train_test_split(data_x, y, test_size=0.2)#, random_state=1)\nx_train = x_train.values\nx_test = x_test.values", "_____no_output_____" ], [ "y_events_train = y_train[y_train['IsPlanned']==False]\ntrain_min, train_max = y_events_train[\"部件本次装机使用小时\"].min(), y_events_train[\"部件本次装机使用小时\"].max()\n\ny_events_test = y_test[y_test['IsPlanned']==False]\ntest_min, test_max = y_events_test[\"部件本次装机使用小时\"].min(), y_events_test[\"部件本次装机使用小时\"].max()\n\nassert train_min <= test_min < test_max < train_max, \\\n \"time range or test data is not within time range of training data.\"", "_____no_output_____" ], [ "times = np.percentile(data_y[\"部件本次装机使用小时\"], np.linspace(5, 95, 15))\nprint(times)", "[2.02000000e+01 1.18514286e+02 4.06142857e+02 9.61257143e+02\n 1.85705714e+03 2.84934286e+03 3.76962857e+03 5.19400000e+03\n 6.53765714e+03 8.26800000e+03 1.09074000e+04 1.40677429e+04\n 1.68197143e+04 2.17654286e+04 2.87464000e+04]\n" ], [ "import matplotlib\nmatplotlib.matplotlib_fname()", "_____no_output_____" ], [ "num_columns = [\"安装年度\",\"部件装上飞行小时数\",\"部件装上使用小时数\",\"FH TSN\"]\ndef plot_cumulative_dynamic_auc(risk_score, label, color=None):\n auc, mean_auc = cumulative_dynamic_auc(y_train, y_test, risk_score, times)\n \n plt.plot(times, auc, marker=\"o\", color=color, label=label)\n plt.legend(prop = font)\n \n plt.xlabel(\"time时间\",fontproperties=font)\n plt.ylabel(\"time-dependent AUC\")\n plt.axhline(mean_auc, color=color, linestyle=\"--\")\n\n \nfor i, col in enumerate(num_columns):\n plot_cumulative_dynamic_auc(x_test[:, i], col, color=\"C{}\".format(i))\n ret = concordance_index_ipcw(y_train, y_test, x_test[:, i], tau=times[-1])", "_____no_output_____" ] ], [ [ "# Part3", "_____no_output_____" ] ], [ [ "data_x = data1[[\"机型\",\"安装年度\",\"部件装上飞行小时数\",\"部件装上使用小时数\",\"FH TSN\", \"最近送修公司\",\"PlanePartType\"]]\n\ncat_features = [\"机型\", \"安装年度\", \"最近送修公司\",\"PlanePartType\"]\n\nfor col in cat_features:\n data_x[col] =data_x[col].astype('category')", "/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:6: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \n" ], [ "times = np.percentile(data_y[\"部件本次装机使用小时\"], np.linspace(5, 95, 15))\nprint(times)", "[2.02000000e+01 1.18514286e+02 4.06142857e+02 9.61257143e+02\n 1.85705714e+03 2.84934286e+03 3.76962857e+03 5.19400000e+03\n 6.53765714e+03 8.26800000e+03 1.09074000e+04 1.40677429e+04\n 1.68197143e+04 2.17654286e+04 2.87464000e+04]\n" ], [ "estimator = CoxPHSurvivalAnalysis() #CoxnetSurvivalAnalysis()\nestimator.fit(data_x_numeric, y)", "_____no_output_____" ], [ "from sklearn.pipeline import make_pipeline\n\ny = data_y.to_records(index=False)\n\nx_train, x_test, y_train, y_test = train_test_split(data_x, y, test_size=0.2)#, random_state=1)\n\ncph = make_pipeline(OneHotEncoder(), CoxPHSurvivalAnalysis())\ncph.fit(x_train, y_train)\n\nresult = concordance_index_censored(y_test[\"IsPlanned\"], y_test[\"部件本次装机使用小时\"], cph.predict(x_test))\nprint(result[0])\n\n# estimate performance on training data, thus use `va_y` twice.\nva_auc, va_mean_auc = cumulative_dynamic_auc(y_train, y_test, cph.predict(x_test), times)\n\nplt.plot(times, va_auc, marker=\"o\")\nplt.axhline(va_mean_auc, linestyle=\"--\")\nplt.xlabel(\"time from enrollment\")\nplt.ylabel(\"time-dependent AUC\")\nplt.grid(True)\n\nprint(y_test[\"部件本次装机使用小时\"])\nprint(cph.predict_survival_function(x_test))\nprint(y_test[\"部件本次装机使用小时\"] - cph.predict(x_test))", "0.756783634038926\n[24341 10021 11228 6 162 925 8692 13401 6 1736 94 3197\n 7054 13193 24324 1243 2810 8395 12582 16474 452 15863 32718 32618\n 15790 22 10 14395 5263 26845 13 5123 7 7852 6 13400\n 15 6855 4764 5727 15920 13955 2382 15848 2098 40 8193 3853\n 18057 5836 6109 17069 2205 315 98 2489 3099 13996 2281 30424\n 609 65 15869 3877 1647 1935 3166 12358 4369 25760 537 23217\n 21621 19 681 16516 24324 11413 37029 19146 17661 15757 2080 66\n 170 7419 12465 18203 17153 12 93 8 2757 10922 2500 15018\n 6041 2393 11133 28173 807 26479 2229 8509 3175 10559 25 4369\n 5032 15454 38 3904 8059 7452 11100 2680 16 26662 18 4047\n 1971 10175 8266 328 7472 20799 22579 23477 6925 32025 473 7593\n 14913 38822 4118 15451 391 2984 5399 290 64 57 18 5916\n 1930 4227 3023 6833 18239 35806 253 3149 244 29731 15255 605\n 10 10321 427 15017 3450 6942 10 19081 6 226 5940 695\n 8 11548 5614 396 8241 4705 12 14125 6925 192 135 121\n 2614 869 10946 16951 14163 806 17077 3387 467 28105 147 189\n 10448 3045 6148 5677 5772 47 3629 11951 1427 8 27694 16668\n 366 2042 39129 457 12882 20070 9522 7810 11 34692 1750 529\n 24 1002 19601 8089 20000]\n" ] ], [ [ "# Part4", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas\nimport seaborn as sns\nfrom sklearn.model_selection import ShuffleSplit, GridSearchCV\n\nfrom sksurv.datasets import load_veterans_lung_cancer\nfrom sksurv.column import encode_categorical\nfrom sksurv.metrics import concordance_index_censored\nfrom sksurv.svm import FastSurvivalSVM\n\nsns.set_style(\"whitegrid\")", "_____no_output_____" ], [ "data_x = data1[[\"机型\",\"安装年度\",\"部件装上飞行小时数\",\"部件装上使用小时数\",\"FH TSN\", \"最近送修公司\",\"PlanePartType\"]]\n\ncat_features = [\"机型\", \"安装年度\", \"最近送修公司\",\"PlanePartType\"]\n\nfor col in cat_features:\n data_x[col] = data_x[col].astype('category')", "/root/anaconda2/envs/python3/lib/python3.6/site-packages/ipykernel_launcher.py:6: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \n" ], [ "x = OneHotEncoder().fit_transform(data_x)#encode_categorical(data_x)", "_____no_output_____" ], [ "x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)#, random_state=1)\nestimator = FastSurvivalSVM(optimizer=\"rbtree\",rank_ratio=0.0, max_iter=1000, tol=1e-6, random_state=0, alpha=2.**-6)\nestimator.fit(x_train, y_train)", "_____no_output_____" ], [ "prediction = estimator.predict(x_test)\nresult = concordance_index_censored(y_test[\"IsPlanned\"], y_test[\"部件本次装机使用小时\"], prediction)\nprint(result[0])", "0.5744231561598682\n" ], [ "estimator.predict(x_train)", "_____no_output_____" ], [ "estimator = FastSurvivalSVM(optimizer=\"rbtree\", max_iter=1000, tol=1e-6, random_state=0)", "_____no_output_____" ], [ "def score_survival_model(model, X, y):\n prediction = model.predict(X)\n result = concordance_index_censored(y['IsPlanned'], y['部件本次装机使用小时'], prediction)\n return result[0]", "_____no_output_____" ], [ "param_grid = {'alpha': 2. ** np.arange(-12, 13, 2)}\ncv = ShuffleSplit(n_splits=20, test_size=0.4, random_state=0)\ngcv = GridSearchCV(estimator, param_grid, scoring=score_survival_model,\n n_jobs=12, iid=False, refit=False,\n cv=cv)", "_____no_output_____" ], [ "param_grid", "_____no_output_____" ], [ "import warnings\ny = data_y.to_records(index=False)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\ngcv = gcv.fit(x, y)", "_____no_output_____" ], [ "gcv.best_score_, gcv.best_params_", "_____no_output_____" ], [ "def plot_performance(gcv):\n n_splits = gcv.cv.n_splits\n cv_scores = {\"alpha\": [], \"test_score\": [], \"split\": []}\n order = []\n for i, params in enumerate(gcv.cv_results_[\"params\"]): \n name = \"%.5f\" % params[\"alpha\"]\n order.append(name)\n for j in range(n_splits):\n vs = gcv.cv_results_[\"split%d_test_score\" % j][i]\n cv_scores[\"alpha\"].append(name)\n cv_scores[\"test_score\"].append(vs)\n cv_scores[\"split\"].append(j)\n df = pandas.DataFrame.from_dict(cv_scores)\n _, ax = plt.subplots(figsize=(11, 6))\n sns.boxplot(x=\"alpha\", y=\"test_score\", data=df, order=order, ax=ax)\n _, xtext = plt.xticks()\n for t in xtext:\n t.set_rotation(\"vertical\")", "_____no_output_____" ], [ "plot_performance(gcv)", "_____no_output_____" ], [ "from sksurv.svm import FastKernelSurvivalSVM\nfrom sksurv.kernels import clinical_kernel", "_____no_output_____" ], [ "x_train, x_test, y_train, y_test = train_test_split(data_x, y, test_size=0.5)#, random_state=1)\nkernel_matrix = clinical_kernel(x_train)\nkssvm = FastKernelSurvivalSVM(optimizer=\"rbtree\", kernel=\"precomputed\", random_state=0, alpha=2.**-6)\nkssvm.fit(kernel_matrix, y_train)", "_____no_output_____" ], [ "x_test.shape", "_____no_output_____" ], [ "kernel_matrix = clinical_kernel(x_test[0:552])\nprediction = kssvm.predict(kernel_matrix)\nresult = concordance_index_censored(y_test[0:552][\"IsPlanned\"], y_test[0:552][\"部件本次装机使用小时\"], prediction)\nprint(result[0])", "0.6438882846429413\n" ], [ "kernel_matrix = clinical_kernel(data_x)\nkssvm = FastKernelSurvivalSVM(optimizer=\"rbtree\", kernel=\"precomputed\", random_state=0, alpha=2.**-12)", "_____no_output_____" ], [ "kgcv = GridSearchCV(kssvm, param_grid, score_survival_model,\n n_jobs=12, iid=False, refit=False,\n cv=cv)", "_____no_output_____" ], [ "import warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nkgcv = kgcv.fit(kernel_matrix, y)", "_____no_output_____" ], [ "kgcv.best_score_, kgcv.best_params_", "_____no_output_____" ], [ "plot_performance(kgcv)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc6ba3f3effb62d01efb27b01216f1c645c7607
438,733
ipynb
Jupyter Notebook
notebooks/02.1-project-UMAP/macaque-syllable-umap.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
notebooks/02.1-project-UMAP/macaque-syllable-umap.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
notebooks/02.1-project-UMAP/macaque-syllable-umap.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
823.138837
210,164
0.952632
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom tqdm.autonotebook import tqdm\nfrom joblib import Parallel, delayed\nimport umap\nimport pandas as pd", "/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n" ], [ "from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir\nfrom avgn.signalprocessing.create_spectrogram_dataset import flatten_spectrograms", "_____no_output_____" ] ], [ [ "### Collect data", "_____no_output_____" ] ], [ [ "DATASET_ID = 'macaque_coo'", "_____no_output_____" ], [ "from avgn.visualization.projections import (\n scatter_projections,\n draw_projection_transitions,\n)", "_____no_output_____" ], [ "df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'macaque.pickle'", "_____no_output_____" ], [ "syllable_df = pd.read_pickle(df_loc)", "_____no_output_____" ], [ "syllable_df[:3]", "_____no_output_____" ], [ "fig, axs = plt.subplots(ncols=4, figsize=(24,6))\naxs[0].hist([np.max(i) for i in syllable_df.spectrogram.values], bins=50);axs[0].set_title('max')\naxs[1].hist([np.sum(i) for i in syllable_df.spectrogram.values], bins=50);axs[1].set_title('sum')\naxs[2].hist((syllable_df.end_time - syllable_df.start_time).values, bins = 50); axs[2].set_title('len')\naxs[3].hist([np.min(i) for i in syllable_df.spectrogram.values], bins=50);axs[3].set_title('min')", "_____no_output_____" ] ], [ [ "### cluster", "_____no_output_____" ] ], [ [ "specs = list(syllable_df.spectrogram.values)\nspecs = [i / np.max(i) for i in specs]\nspecs_flattened = flatten_spectrograms(specs)\nnp.shape(specs_flattened)", "_____no_output_____" ], [ "from cuml.manifold.umap import UMAP as cumlUMAP", "_____no_output_____" ], [ "cuml_umap = cumlUMAP(min_dist=0.25)\nz = np.vstack(list(cuml_umap.fit_transform(specs_flattened)))", "/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/ipykernel_launcher.py:1: UserWarning: Parameter should_downcast is deprecated, use convert_dtype in fit, fit_transform and transform methods instead. \n \"\"\"Entry point for launching an IPython kernel.\n/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/ipykernel_launcher.py:2: UserWarning: Parameter should_downcast is deprecated, use convert_dtype in fit, fit_transform and transform methods instead. \n \n" ] ], [ [ "### variation across populations", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(15,15))\nscatter_projections(projection=z, alpha=.5, labels = syllable_df.indv.values, s=10, ax = ax)\n#ax.set_xlim([-15,15])", "_____no_output_____" ], [ "from avgn.visualization.projections import scatter_spec", "_____no_output_____" ], [ "np.shape(z), np.shape(specs)", "_____no_output_____" ], [ "from avgn.utils.general import save_fig\nfrom avgn.utils.paths import FIGURE_DIR, ensure_dir", "_____no_output_____" ], [ "scatter_spec(\n z,\n specs,\n column_size=15,\n #x_range = [-5.5,7],\n #y_range = [-10,10],\n pal_color=\"hls\",\n color_points=False,\n enlarge_points=20,\n figsize=(10, 10),\n scatter_kwargs = {\n 'labels': syllable_df.indv.values,\n 'alpha':1.0,\n 's': 3,\n \"color_palette\": 'Set2',\n 'show_legend': False\n },\n matshow_kwargs = {\n 'cmap': plt.cm.Greys\n },\n line_kwargs = {\n 'lw':1,\n 'ls':\"solid\",\n 'alpha':0.25,\n },\n draw_lines=True\n);\nsave_fig(FIGURE_DIR / 'macaque_coo', dpi=300, save_jpg=True)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cbc6be53dc44ec80ceefd86fe0bf934fb8db7bdf
12,930
ipynb
Jupyter Notebook
A3.ipynb
wangjue2020/ImageSegmentationByUNet
8e4d0f863cf1a45bdd470dca2353ab82b84d426e
[ "Apache-2.0" ]
null
null
null
A3.ipynb
wangjue2020/ImageSegmentationByUNet
8e4d0f863cf1a45bdd470dca2353ab82b84d426e
[ "Apache-2.0" ]
null
null
null
A3.ipynb
wangjue2020/ImageSegmentationByUNet
8e4d0f863cf1a45bdd470dca2353ab82b84d426e
[ "Apache-2.0" ]
null
null
null
12,930
12,930
0.642305
[ [ [ "!pip install torch\n!pip3 install torchvision", "Requirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (1.10.0+cu111)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch) (3.10.0.2)\nRequirement already satisfied: torchvision in /usr/local/lib/python3.7/dist-packages (0.11.1+cu111)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torchvision) (1.19.5)\nRequirement already satisfied: torch==1.10.0 in /usr/local/lib/python3.7/dist-packages (from torchvision) (1.10.0+cu111)\nRequirement already satisfied: pillow!=8.3.0,>=5.3.0 in /usr/local/lib/python3.7/dist-packages (from torchvision) (7.1.2)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch==1.10.0->torchvision) (3.10.0.2)\n" ], [ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport os\nfrom torch.utils.data import Dataset\nimport cv2\nfrom tqdm import tqdm\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# Load the Drive helper and mount\nfrom google.colab import drive\n\n# This will prompt for authorization.\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "import tensorflow as tf\ndevice_name = tf.test.gpu_device_name()\nif device_name != '/device:GPU:0':\n raise SystemError('GPU device not found')\nprint('Found GPU at: {}'.format(device_name))", "Found GPU at: /device:GPU:0\n" ], [ "class UnetModel(nn.Module):\n def conv(self, in_channels, out_channels):\n block = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=(1,1)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=3,padding=(1,1)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n return block\n \n def up_conv(self, in_channels, out_channels):\n block = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(in_channels, out_channels, kernel_size=3,padding=(1,1)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n return block\n \n def __init__(self, in_channel, out_channel):\n super(UnetModel, self).__init__()\n \n self.conv1 = self.conv(in_channel,64)\n self.conv1_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv2 = self.conv(64, 128)\n self.conv2_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv3 = self.conv(128, 256)\n self.conv3_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv4 = self.conv(256, 512)\n self.conv4_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv5 = self.conv(512, 1024)\n \n self.up_conv4 = self.up_conv(1024, 512)\n self.up4_conv =self.conv(1024,512)\n self.up_conv3 = self.up_conv(512, 256)\n self.up3_conv = self.conv(512,256)\n self.up_conv2 = self.up_conv(256,128)\n self.up2_conv = self.conv(256,128)\n self.up_conv1 = self.up_conv(128,64)\n self.up1_conv = self.conv(128,64)\n \n self.conv_1x1 = nn.Conv2d(64,out_channel,kernel_size=1)\n self.sigmoid = nn.Sigmoid()\n def forward(self, x):\n out1 = self.conv1(x)\n \n out2 = self.conv1_maxpool(out1)\n out2 = self.conv2(out2)\n \n out3 = self.conv2_maxpool(out2)\n out3 = self.conv3(out3)\n \n out4 = self.conv3_maxpool(out3)\n out4 = self.conv4(out4)\n \n out5 = self.conv4_maxpool(out4)\n out5 = self.conv5(out5)\n \n exp5 = self.up_conv4(out5)\n exp5 = torch.cat((out4, exp5), dim=1)\n exp5 = self.up4_conv(exp5)\n \n exp4 = self.up_conv3(exp5)\n exp4 = torch.cat((out3, exp4), dim=1)\n exp4 = self.up3_conv(exp4)\n \n exp3 = self.up_conv2(exp4)\n exp3 = torch.cat((out2, exp3), dim=1)\n exp3 = self.up2_conv(exp3)\n \n exp2 = self.up_conv1(exp3)\n exp2 = torch.cat((out1, exp2), dim=1)\n exp2 = self.up1_conv(exp2)\n \n exp1 = self.conv_1x1(exp2)\n exp1 = self.sigmoid(exp1)\n return exp1\n \n \n", "_____no_output_____" ], [ "class MyDataset(Dataset):\n\n def __init__(self, len, home_directory, noise=2, mode=\"Train\"):\n self.len = len\n self.examples = []\n self.iter_index = 0\n self.X = torch.empty((len, 128,128))\n self.Y = torch.empty((len,128,128), dtype=torch.long)\n self.input_directory = os.path.join(home_directory, mode, 'input')\n self.mask_directory = os.path.join(home_directory, mode, 'mask')\n \n print(\"dataset input path {}\".format(self.input_directory))\n print(\"dataset mask path {}\".format(self.mask_directory))\n \n input_names = os.listdir(self.input_directory)\n input_names.sort()\n mask_names = os.listdir(self.mask_directory)\n mask_names.sort()\n \n self.set_dataset(self.input_directory, input_names, True)\n self.set_dataset(self.mask_directory, mask_names, False)\n\n \n def set_dataset(self, directory, names, input_na = True):\n # print(self.len)\n # print(len(names))\n # print(names)\n index = 0\n for name in names:\n img_path = directory + '/' + name\n img = cv2.imread(img_path) \n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img =img/255\n resize_img = cv2.resize(img, (128,128))\n if input_na:\n # print(index)\n self.X[index] = torch.tensor(resize_img)\n else:\n \n resize_img = torch.from_numpy(resize_img).float()\n self.Y[index] = resize_img\n index += 1 \n \n def __len__(self):\n return self.len\n\n def __getitem__(self, idx):\n return (self.X[idx], self.Y[idx])\n \n", "_____no_output_____" ], [ "dataset_train = MyDataset(60,'/content/drive/My Drive/A3/cat_data/cat_data')\ntrainloader = torch.utils.data.DataLoader(dataset_train, batch_size=20, shuffle=True)\n\n# dataset_test = MyDataset(20,'/content/drive/My Drive/A3/cat_data/cat_data', 'Test')\n\nmodel = UnetModel(1, 1)\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.7)\n\nepochs = 10\nmodel.train()\na = True\nfor e in range(epochs):\n running_loss = 0\n for images, labels in tqdm(trainloader):\n optimizer.zero_grad()\n images = images.unsqueeze(1)\n labels = labels.unsqueeze(1)\n labels = labels.float()\n log_ps = model(images)\n loss = criterion(log_ps, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n else:\n print(f\"Traning loss: {running_loss/len(trainloader)}\")\n", "dataset input path /content/drive/My Drive/A3/cat_data/cat_data/Train/input\ndataset mask path /content/drive/My Drive/A3/cat_data/cat_data/Train/mask\n" ], [ "dataset_test = MyDataset(21,'/content/drive/My Drive/A3/cat_data/cat_data', mode='Test')\ntestloader = torch.utils.data.DataLoader(dataset_test, batch_size=20, shuffle=True)\nwith torch.no_grad():\n for images, labels in tqdm(testloader):\n optimizer.zero_grad()\n images = images.unsqueeze(1)\n labels = labels.unsqueeze(1)\n log_ps = model(images)\n loss = criterion(log_ps, labels)\n running_loss += loss.item()\nprint(f\"Test loss: {running_loss/len(testloader)}\")", "dataset input path /content/drive/My Drive/A3/cat_data/cat_data/Test/input\ndataset mask path /content/drive/My Drive/A3/cat_data/cat_data/Test/mask\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cbc6c4ec26e1463af18e52f66607263be802df29
9,630
ipynb
Jupyter Notebook
Notebooks/Conditional_Probability/.ipynb_checkpoints/Conditional_probability-checkpoint.ipynb
sidneyarcidiacono/QL1.1
ffa8ffeaac19e18b8ef4f8da3b9bd4aefaadd956
[ "MIT" ]
4
2020-08-28T06:47:46.000Z
2020-11-24T18:43:22.000Z
Notebooks/Conditional_Probability/.ipynb_checkpoints/Conditional_probability-checkpoint.ipynb
sidneyarcidiacono/QL1.1
ffa8ffeaac19e18b8ef4f8da3b9bd4aefaadd956
[ "MIT" ]
2
2020-03-31T15:47:48.000Z
2020-06-24T18:25:19.000Z
Notebooks/Conditional_Probability/.ipynb_checkpoints/Conditional_probability-checkpoint.ipynb
sidneyarcidiacono/QL1.1
ffa8ffeaac19e18b8ef4f8da3b9bd4aefaadd956
[ "MIT" ]
20
2020-01-29T23:58:16.000Z
2021-07-29T02:38:35.000Z
28.491124
155
0.489408
[ [ [ "## Conditional Probability\n\n- Conditional probability has many applications, we learn it by mentioning its application in text analysis\n\n- Assume this small dataset is given:\n\n<img src=\"spam_ham_data_set.png\" width=\"600\" height=\"600\">", "_____no_output_____" ], [ "## Question: What is the probability that an email be spam? What is the probability that an email be ham?\n\n- $P(spam) = ?$\n\n- $P(ham) = ?$", "_____no_output_____" ], [ "## Question: We know an email is spam, what is the probability that password be a word in it? (What is the frequency of password in a spam email?)\n\n- Hint: Create the dictionary of spam where its key would be unique words in spam emails and the value shows the occurance of that word", "_____no_output_____" ] ], [ [ "spam = {\n \"password\": 2,\n \"review\": 1,\n \"send\": 3,\n \"us\": 3,\n \"your\": 3,\n \"account\": 1\n}", "_____no_output_____" ] ], [ [ "$P(password \\mid spam) = 2/(2+1+3+3+3+1) = 2/13$ ", "_____no_output_____" ] ], [ [ "# or \np_password_given_spam = spam['password']/sum(spam.values())\nprint(p_password_given_spam)", "0.15384615384615385\n" ] ], [ [ "## Question: We know an email is ham, what is the probability that password be a word in it? (What is the frequency of password in a ham email?)\n\n- Hint: Create the dictionary of ham where its key would be unique words in spam emails and the value shows the occurance of that word", "_____no_output_____" ] ], [ [ "ham = {\n \"password\": 1,\n \"review\": 2,\n \"send\": 1,\n \"us\": 1,\n \"your\": 2,\n \"account\": 0\n}", "_____no_output_____" ] ], [ [ "$P(password \\mid ham) = 1/(1+2+1+1+1+0) = 1/6$ ", "_____no_output_____" ] ], [ [ "# or \np_password_given_ham = ham['password']/sum(ham.values())\nprint(p_password_given_ham)", "0.16666666666666666\n" ] ], [ [ "## Question: Assume we have seen password in an email, what is the probability that the email be spam?\n\n- $P(spam \\mid password) = ?$\n\n- Hint: Use Bayes' rule:\n\n$P(spam \\mid password) = (P(password \\mid spam) P(spam))/ P(password)$ \n\n$P(password) = P(password \\mid spam) P(spam) + P(password \\mid ham) P(ham)$\n", "_____no_output_____" ] ], [ [ "p_spam = 4/6\np_ham = 2/6\np_password = p_password_given_spam*p_spam + p_password_given_ham*p_ham \nprint(p_password)\np_spam_given_password = p_password_given_spam*p_spam/p_password\nprint(p_spam_given_password)", "0.1581196581196581\n0.6486486486486487\n" ] ], [ [ "## Activity: Do the above computation for each word by writing code", "_____no_output_____" ] ], [ [ "p_spam = 4/6\np_ham = 2/6\nls1 = []\nls2 = []\nfor i in spam:\n print(i)\n p_word_given_spam = # TODO\n p_word_given_ham = # TODO\n # obtain the probability of each word by assuming the email is spam\n # obtain the probability of each word by assuming the email is ham \n \n #TODO\n \n # obtain the probability that for a seen word it belongs to spam email\n # obtain the probability that for a seen word it belongs to ham email\n \n #TODO\n", "password\nreview\nsend\nus\nyour\naccount\n" ] ], [ [ "## Quiz: Compute the expected value of a fair dice\n\nBy Definition, the expected value of random events (a random variable) like rolling a dice is computed as: \n\n$E(X) = \\sum_{i=1}^{6}i * P(dice = i)$\n\n<img src=\"dice.jpg\" width=\"100\" height=\"100\">\n\n1- For a fair dice,\n\ncompute the probability that when roll the dice then 1 apprears (P(dice = 1)), \n\ncompute the probability that when roll the dice then 2 apprears (P(dice = 2)), \n\n.\n\n.\n\n.\n\ncompute the probability that when roll the dice then 2 apprears (P(dice = 6))\n\n2- Compute $E(X)$ from the above steps.", "_____no_output_____" ], [ "### Answer:\n\nThe expected value for a fair dice is:\n\n$E(X) = (1*1/6) + (2*1/6) + (3*1/6)+ (4*1/6) + (5*1/6) + (6*1/6)$\n\n$E(X) = 3.5$\n ", "_____no_output_____" ] ], [ [ "# We can show that E(X) is the mean of the random variable\nimport numpy as np\n# lets roll the dice 1000 times \ndice = np.random.randint(low=1.0, high=7.0, size=1000)\nprint(dice)\n# Compute the mean of dice list\nprint(np.mean(dice))\nprint(sum(dice)/len(dice))", "[1 4 1 2 5 4 2 5 3 5 5 1 5 6 4 4 1 3 5 6 6 1 2 4 6 4 1 2 3 4 4 3 1 6 6 2 2\n 1 6 4 2 4 2 1 6 6 4 2 2 5 2 2 6 1 1 5 4 4 5 1 5 4 1 6 3 5 5 5 2 3 6 4 6 4\n 6 2 2 4 5 5 3 1 3 1 4 3 2 2 1 4 3 6 5 2 3 6 3 3 2 4 1 2 2 4 4 6 3 3 3 4 4\n 6 6 5 5 1 2 3 6 5 3 3 4 2 5 1 2 3 5 6 1 4 4 1 6 3 5 2 4 3 5 3 1 4 1 3 6 3\n 4 2 4 3 1 2 5 6 1 3 4 3 3 4 1 3 3 5 1 1 6 2 6 5 2 2 6 5 1 4 4 4 3 6 3 5 5\n 2 6 5 3 2 6 2 2 5 3 3 5 6 5 6 2 3 3 2 6 2 6 3 1 5 4 1 5 6 5 4 6 4 5 3 5 4\n 2 5 6 6 1 5 1 6 1 1 1 1 1 2 2 2 3 1 2 3 3 5 3 4 1 4 5 5 4 3 3 3 5 3 4 1 5\n 2 1 2 6 5 3 6 3 4 4 3 4 4 2 4 5 5 4 4 1 2 3 3 2 4 4 5 4 2 3 5 5 1 3 5 1 4\n 1 3 4 1 5 4 4 3 4 5 3 1 6 6 1 5 3 6 1 6 5 6 4 5 6 5 4 2 6 5 4 6 3 2 2 6 4\n 3 5 2 2 3 4 2 3 1 1 3 6 2 1 2 4 6 4 2 3 1 6 3 3 2 6 6 5 2 5 1 3 4 5 6 3 3\n 6 6 3 4 2 1 6 1 4 5 1 3 1 4 2 1 6 2 5 6 6 2 5 5 5 3 4 2 4 1 3 1 2 1 3 6 3\n 1 1 1 5 2 3 2 1 1 4 5 3 2 1 2 5 6 5 1 2 1 3 5 2 3 1 6 1 5 5 6 4 4 6 4 3 3\n 2 6 3 1 5 5 3 6 3 3 3 4 3 5 4 1 2 6 5 3 2 1 4 6 4 2 4 6 5 3 1 1 5 4 6 6 3\n 6 5 2 4 5 3 6 3 1 4 1 1 3 5 5 4 2 2 4 6 1 5 1 5 6 1 1 4 6 2 6 6 3 1 5 3 1\n 2 6 5 2 1 3 5 6 3 3 2 5 6 6 5 6 5 1 6 3 3 6 6 6 6 6 1 2 4 6 4 4 3 5 5 1 4\n 1 2 3 2 3 6 4 3 1 1 4 5 5 2 5 6 1 6 2 6 5 3 5 2 2 2 3 5 4 3 5 2 5 4 4 1 5\n 6 4 2 1 2 2 2 1 5 1 2 6 2 6 5 1 6 4 6 4 3 1 3 5 2 5 2 3 3 1 6 1 2 6 3 6 5\n 1 3 3 1 4 6 4 4 1 5 2 6 2 4 4 5 4 1 4 6 6 4 3 2 1 4 6 2 2 5 5 2 2 2 5 1 5\n 5 6 1 3 2 1 3 6 5 2 3 4 4 4 1 1 5 6 1 2 5 2 5 1 6 6 2 1 2 6 1 5 5 1 3 1 3\n 6 6 6 6 3 5 1 4 3 6 3 5 5 2 4 4 2 2 2 5 1 5 4 5 5 4 6 3 6 3 1 1 4 5 1 1 1\n 4 4 1 1 5 1 6 2 1 1 4 1 1 5 6 1 1 3 3 6 2 6 1 5 4 2 3 2 6 1 2 6 3 3 2 1 3\n 3 3 6 3 4 1 6 5 4 1 4 5 1 5 2 5 3 2 2 3 6 2 6 5 3 2 6 1 5 4 2 6 2 2 3 2 5\n 6 5 4 3 3 3 2 6 5 3 4 5 3 2 5 4 2 2 6 5 2 6 1 6 2 3 2 5 2 5 5 4 3 6 3 4 1\n 4 6 2 5 5 1 4 1 6 3 2 3 4 1 4 5 4 4 1 1 6 2 1 4 1 6 5 5 2 6 6 4 1 1 1 1 5\n 6 1 2 5 2 6 2 3 6 2 4 5 4 6 1 1 4 6 2 3 4 4 1 5 3 6 5 6 3 4 1 2 6 5 1 4 6\n 4 4 6 2 1 3 4 5 4 2 2 4 5 5 5 3 3 3 4 2 2 6 1 6 6 5 5 6 6 4 1 4 1 1 2 2 2\n 6 2 5 2 5 4 1 4 6 1 4 4 5 3 6 3 3 3 2 3 4 5 4 4 1 6 3 3 2 3 2 6 6 5 5 4 5\n 4]\n3.508\n3.508\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cbc6cda1b8f0bab7e5aeac36e31f8b79293affd1
68,658
ipynb
Jupyter Notebook
Chapter 12 - Principal Components Analysis with scikit-learn/Chapter12.ipynb
m7adeel/Practical-Discrete-Mathematics
c658728639fc9a8ef9a9f9a3640cf96beca13852
[ "MIT" ]
36
2021-03-24T07:03:13.000Z
2022-03-23T04:20:33.000Z
Chapter 12 - Principal Components Analysis with scikit-learn/Chapter12.ipynb
bestcourses-ai/Practical-Discrete-Mathematics-with-Python
7637b52c48f2eb74a1c96511b1c9c9a5b7375e1c
[ "MIT" ]
null
null
null
Chapter 12 - Principal Components Analysis with scikit-learn/Chapter12.ipynb
bestcourses-ai/Practical-Discrete-Mathematics-with-Python
7637b52c48f2eb74a1c96511b1c9c9a5b7375e1c
[ "MIT" ]
18
2021-03-15T09:41:13.000Z
2022-03-23T04:21:35.000Z
142.443983
32,524
0.873605
[ [ [ "# Chapter 12 - Principal Components Analysis with scikit-learn\n\nThis notebook contains code accompanying Chapter 12 Principal Components Analysis with scikit-learn in *Practical Discrete Mathematics* by Ryan T. White and Archana Tikayat Ray.\n\n## Eigenvalues and eigenvectors, orthogonal bases\n\n### Example: Pizza nutrition", "_____no_output_____" ] ], [ [ "import pandas as pd \ndataset = pd.read_csv('pizza.csv')\ndataset.head()", "_____no_output_____" ] ], [ [ "### Example: Computing eigenvalues and eigenvectors", "_____no_output_____" ] ], [ [ "import numpy as np\nA = np.array([[3,1], [1,3]])\nl, v = np.linalg.eig(A)\nprint(\"The eigenvalues are:\\n \",l)\nprint(\"The eigenvectors are:\\n \", v)", "The eigenvalues are:\n [4. 2.]\nThe eigenvectors are:\n [[ 0.70710678 -0.70710678]\n [ 0.70710678 0.70710678]]\n" ] ], [ [ "## The scikit-learn implementation of PCA\n\nWe will start by importing the dataset and then dropping the brand column from it. This is done to make sure that all our feature variables are numbers and hence can be scaled/normalized. We will then create another variable called target which will contain the names of the brands of pizzas.", "_____no_output_____" ] ], [ [ "import pandas as pd\ndataset = pd.read_csv('pizza.csv')\n#Dropping the brand name column before standardizing the data\ndf_num = dataset.drop([\"brand\"], axis=1)\n\n# Setting the brand name column as the target variable\ntarget = dataset['brand']", "_____no_output_____" ] ], [ [ "Now that we have the dataset in order, we will then normalize the columns of the dataset to make sure that the mean for a variable is 0 and the variance is 1 and then we will run PCA on the dataset.", "_____no_output_____" ] ], [ [ "#Scaling the data\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaler.fit(df_num)\nscaled_data = scaler.transform(df_num)\n\n#Applying PCA to the scaled data\nfrom sklearn.decomposition import PCA\n\n#Reducing the dimesions to 2 components so that we can have a 2D visualization\npca = PCA(n_components = 2)\npca.fit(scaled_data)\n\n#Applying to our scaled dataset\nscaled_data_pca = pca.transform(scaled_data)\n\n#Check the shape of the original dataset and the new dataset\nprint(\"The dimensions of the original dataset is: \", scaled_data.shape)\nprint(\"The dimensions of the dataset after performing PCA is: \", scaled_data_pca.shape)", "The dimensions of the original dataset is: (300, 7)\nThe dimensions of the dataset after performing PCA is: (300, 2)\n" ] ], [ [ "Now we have reduced our 7-dimensional dataset to its 2 principal components as can be seen from the dimensions shown above. We will move forward with plotting the principal components to check whether 2 principal components were enough to capture the variability in the dataset – the different nutritional content of pizzas produced by different companies.", "_____no_output_____" ] ], [ [ "#Plotting the principal components\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.scatterplot(scaled_data_pca[:,0], scaled_data_pca[:,1], target)\nplt.legend(loc=\"best\")\nplt.gca().set_aspect(\"equal\")\nplt.xlabel(\"Principal Component 1\")\nplt.ylabel(\"Principal Component 2\")\nplt.show()", "C:\\Users\\Ryan\\anaconda3\\envs\\DL\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y, hue. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ] ], [ [ "Now, we will move on to perform PCA in a way where we do not choose the number of desired principal components, rather we choose the number of principal components that add up to a certain desired variance. The Python implementation of this is very similar to the previous way with very slight changes to the code as shown below.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ndataset = pd.read_csv('pizza.csv')\n\n#Dropping the brand name column before standardizing the data\ndf_num = dataset.drop([\"brand\"], axis=1)\n\n# Setting the brand name column as the target variable\ntarget = dataset['brand']\n\n#Scaling the data (Step 1)\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaler.fit(df_num)\nscaled_data = scaler.transform(df_num)\n\n#Applying PCA to the scaled data\nfrom sklearn.decomposition import PCA\n\n#Setting the variance to 0.95\npca = PCA(n_components = 0.95)\npca.fit(scaled_data)\n\n#Applying to our scaled dataset\nscaled_data_pca = pca.transform(scaled_data)\n\n#Check the shape of the original dataset and the new dataset\nprint(\"The dimensions of the original dataset are: \", scaled_data.shape)\nprint(\"The dimensions of the dataset after performing PCA is: \", scaled_data_pca.shape)", "The dimensions of the original dataset are: (300, 7)\nThe dimensions of the dataset after performing PCA is: (300, 3)\n" ] ], [ [ "As we can see from the above output, 3 principal components are required to capture 95% of the variance in the dataset. This means that by choosing 2 principal directions previously, we were capturing < 95% of the variance in the dataset. Despite capturing < 95% of the variance, we were able to visualize the fact that the pizzas produced by different companies have different nutritional contents.", "_____no_output_____" ], [ "## An application to real-world data\n\nThe first step is to import the data as shown below. It is going to take some time since it is a big dataset, hence hang tight. The dataset contains images of 70000 digits (0-9) where each image has 784 features.", "_____no_output_____" ] ], [ [ "#Importing the dataset\nfrom sklearn.datasets import fetch_openml\nmnist_data = fetch_openml('mnist_784', version = 1)\n\n# Choosing the independent (X) and dependent variables (y)\nX,y = mnist_data[\"data\"], mnist_data[\"target\"]", "_____no_output_____" ] ], [ [ "Now that we have the dataset imported, we will move on to visualize the image of a digit to get familiar with the dataset. For visualization, we will use the `matplotlib` library. We will visualize the 50000th digit image. Feel free to check out other digit images of your choice – make sure to use an index between 0 and 69999. We will set colormap to \"binary\" to output a grayscale image. ", "_____no_output_____" ] ], [ [ "#Plotting one of the digits\nimport matplotlib.pyplot as plt\nplt.figure(1)\n#Plotting the 50000th digit\ndigit = X[50000]\n#Reshaping the 784 features into a 28x28 matrix\ndigit_image = digit.reshape(28,28)\n\nplt.imshow(digit_image, cmap='binary')\nplt.show()", "_____no_output_____" ] ], [ [ "Next, we will apply PCA to this dataset to reduce its dimension from $28*28=784$ to a lower number. We will plot the proportion of the variation that is reflected by PCA-reduced dimensional data of different dimensions.", "_____no_output_____" ] ], [ [ "#Scaling the data\nfrom sklearn.preprocessing import StandardScaler\nscaled_mnist_data = StandardScaler().fit_transform(X)\nprint(scaled_mnist_data.shape)\n\n#Applying PCA to ur dataset\nfrom sklearn.decomposition import PCA\n\npca = PCA(n_components=784)\nmnist_data_pca = pca.fit_transform(scaled_mnist_data)\n\n#Calculating cumulative variance captured by PCs\nimport numpy as np\nvariance_percentage = pca.explained_variance_/np.sum(pca.explained_variance_)\n\n#Calculating cumulative variance\ncumulative_variance = np.cumsum(variance_percentage)\n\n#Plotting cumalative variance\nimport matplotlib.pyplot as plt\nplt.figure(2)\nplt.plot(cumulative_variance)\nplt.xlabel('Number of principal components')\nplt.ylabel('Cumulative variance explained by PCs')\nplt.grid()\nplt.show()", "(70000, 784)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cbc709daf98881bc549d08d26b89bad3518c0a40
957
ipynb
Jupyter Notebook
Tests.ipynb
lneal5/Fragrance-Trends-V1
40edd65028c20cf7421c940677a0fbe10b8c9ba9
[ "MIT" ]
null
null
null
Tests.ipynb
lneal5/Fragrance-Trends-V1
40edd65028c20cf7421c940677a0fbe10b8c9ba9
[ "MIT" ]
null
null
null
Tests.ipynb
lneal5/Fragrance-Trends-V1
40edd65028c20cf7421c940677a0fbe10b8c9ba9
[ "MIT" ]
null
null
null
22.785714
228
0.498433
[ [ [ "<a href=\"https://colab.research.google.com/github/lneal5/Fragrance-Trends-V1/blob/main/Tests.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]