repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
srikanthmalla/stereo
|
https://github.com/srikanthmalla/stereo
|
361b7a34de6a52476e199c6ad1c74f455011b4d8
|
28fa95963ec870577b280deb05d4db12c5b450de
|
eb694eed4c24996edd769d496a172354cc83b3c7
|
refs/heads/master
| 2021-01-01T20:07:11.045689 | 2017-08-02T00:39:04 | 2017-08-02T00:39:04 | 98,767,408 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6819671988487244,
"alphanum_fraction": 0.7272131443023682,
"avg_line_length": 35.33333206176758,
"blob_id": "5adc63eb3f4a9f24f905b4f677d6e258a782448f",
"content_id": "5a21affd18fab1de80b98137bfac2b2d6e4b9871",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1525,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 42,
"path": "/main.py",
"repo_name": "srikanthmalla/stereo",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nimport euromav.left_cam as L\nimport euromav.right_cam as R\nfrom draw_epipolarlines import * \n\nleft_frame=cv2.imread('./euromav/left.png')\nright_frame=cv2.imread('./euromav/right.png')\n\nh, w = left_frame.shape[:2] # both frames should be of same shape\n\n#transformation between two cameras\nT=np.matmul(np.linalg.inv(R.transformation),L.transformation)\nrotation=T[0:3,0:3]\ntranslation=T[0:3,3]\n\n#Perform stereorectification \nR1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(L.camera_matrix, L.dist_coeffs, R.camera_matrix, R.dist_coeffs, (w,h), rotation, translation, cv2.CALIB_ZERO_DISPARITY,0, (0,0))\nmapxL, mapyL = cv2.initUndistortRectifyMap(L.camera_matrix, L.dist_coeffs, R1, P1, (w,h), cv2.CV_32FC1)\nmapxR, mapyR = cv2.initUndistortRectifyMap(R.camera_matrix, R.dist_coeffs, R2, P2, (w,h), cv2.CV_32FC1)\ndstL = cv2.remap(left_frame, mapxL, mapyL,cv2.INTER_LINEAR)\ndstR = cv2.remap(right_frame, mapxR, mapyR,cv2.INTER_LINEAR)\nprint(P1)\nprint(P2)\n\ndstL=cv2.resize(dstL,(448, 320), interpolation = cv2.INTER_CUBIC)\ndstR=cv2.resize(dstR,(448, 320), interpolation = cv2.INTER_CUBIC)\n\ncv2.imwrite(\"left_rectified.png\",dstL)\ncv2.imwrite(\"right_rectified.png\",dstR)\n\n# draw_epipolarlines(left_frame,right_frame)\n# draw_epipolarlines(dstL,dstR)\n\n# display the images\nwhile (True):\t\n # cv2.imshow('Left Image',img3)\n # cv2.imshow('Right Image',img5)\n cv2.imshow('Left rectify',dstL)\n cv2.imshow('Right rectify',dstR)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break"
},
{
"alpha_fraction": 0.42303770780563354,
"alphanum_fraction": 0.6982671022415161,
"avg_line_length": 38.2400016784668,
"blob_id": "7cbb8bdc75922fe5487b0a4046b7ddde69e423a8",
"content_id": "ca588f02cdd163451bdd231b72461f17ebb1f8b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 25,
"path": "/euromav/right_cam.py",
"repo_name": "srikanthmalla/stereo",
"src_encoding": "UTF-8",
"text": "import numpy as np\n# General sensor definitions.\n# sensor_type: camera\n# comment: VI-Sensor cam1 (MT9M034)\n\n# Sensor extrinsics wrt. the body-frame.\n# T_BS:\n# cols: 4\n# rows: 4\ntransformation= np.array([[0.0125552670891, -0.999755099723, 0.0182237714554, -0.0198435579556],\n \t\t[0.999598781151, 0.0130119051815, 0.0251588363115, 0.0453689425024],\n \t\t[-0.0253898008918, 0.0179005838253, 0.999517347078, 0.00786212447038],\n \t\t[0.0, 0.0, 0.0, 1.0]],dtype=np.float64)\n\n# Camera specific definitions.\n# rate_hz: 20\nresolution=np.array([752, 480],dtype=np.float64)\ncamera_model='pinhole'\nintrinsics=np.array([457.587, 456.134, 379.999, 255.238],dtype=np.float64) #fu, fv, cu, cv\ncamera_matrix=np.array([[intrinsics[0],0,intrinsics[2]],\n\t\t\t\t\t\t[0,intrinsics[1],intrinsics[3]],\n\t\t\t\t\t\t[0,0,1]],dtype=np.float64)\n# distortion_coefficients\ndist_coeffs=np.array([-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05],np.float64)\ndistortion_model='radial-tangential'\n"
},
{
"alpha_fraction": 0.42424243688583374,
"alphanum_fraction": 0.6969696879386902,
"avg_line_length": 35.62963104248047,
"blob_id": "92248569a38564a1d73fe00158753075b80ac0a6",
"content_id": "acf4a1b5a7fe2df0892cb5897030a92a1a406815",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 27,
"path": "/euromav/left_cam.py",
"repo_name": "srikanthmalla/stereo",
"src_encoding": "UTF-8",
"text": "import numpy as np\n# General sensor definitions.\n# sensor_type: camera\n# comment: VI-Sensor cam0 (MT9M034)\n\n# Sensor extrinsics wrt. the body-frame.\n# T_BS:\n# cols: 4\n# rows: 4\n\ntransformation=np.array( [[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975],\n \t\t[0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],\n \t\t\t[-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],\n \t\t[0.0, 0.0, 0.0, 1.0]],dtype=np.float64)\n\n# Camera specific definitions.\n# rate_hz: 20\nresolution=np.array([752, 480],dtype=np.float64)\ncamera_model= 'pinhole'\nintrinsics=np.array([458.654, 457.296, 367.215, 248.375],dtype=np.float64) #fu, fv, cu, cv\ncamera_matrix=np.array([[intrinsics[0],0,intrinsics[2]],\n\t\t\t\t\t\t[0,intrinsics[1],intrinsics[3]],\n\t\t\t\t\t\t[0,0,1]],dtype=np.float64)\n\n# distortion_coefficients\ndist_coeffs=np.array([-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05],dtype=np.float64)\ndistortion_model= 'radial-tangential'\n\n"
}
] | 3 |
bevin13/dbassign3
|
https://github.com/bevin13/dbassign3
|
245724614304789c3d8461e5c9b59da0b1eecf89
|
fc5eefa0768c995b24ae4e08c52cee6c23e0c22b
|
481bf639bd4caeff0190f4af7821c812a9be8221
|
refs/heads/main
| 2023-08-17T08:52:22.497196 | 2021-09-30T07:15:01 | 2021-09-30T07:15:01 | 411,617,056 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6967545747756958,
"alphanum_fraction": 0.727180540561676,
"avg_line_length": 63.733333587646484,
"blob_id": "8802e8b08979ab4294677ca9e9469de04fc795aa",
"content_id": "77e4ed953859688dde10ae20efac4c4653a846db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 998,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 15,
"path": "/q1.py",
"repo_name": "bevin13/dbassign3",
"src_encoding": "UTF-8",
"text": "import sqlite3\r\nimport pandas as pd\r\ncon = sqlite3.connect('database.sqlite')\r\ncur = con.cursor()\r\n\r\nprint(\"Names of both the Home Teams and Away Teams in each match played in 2015 and FTHG = 5 : \")\r\nprint(con.execute(\"Select HomeTeam,AwayTeam from Matches where Season ='2015' and FTHG ='5'\").fetchall())\r\nprint(\"Details of the matches where Arsenal is the Home Team and FTA is “A” : \")\r\nprint(con.execute(\"Select * from Matches where HomeTeam = 'Arsenal' and FTR = 'A'\").fetchall())\r\nprint(\"Matches from the 2012 season till the 2015 season where Away Team is Bayern Munich and FTHG > 2 : \")\r\nprint(con.execute(\"Select HomeTeam,AwayTeam,FTR from Matches where Season BETWEEN 2012 AND 2015 and AwayTeam = 'Bayern Munich' and FTHG > 2\").fetchall())\r\nprint(\"Matches where the Home Team name begins with “A” and Away Team name begins with “M” : \")\r\nprint(con.execute(\"Select HomeTeam,AwayTeam,FTR from Matches where HomeTeam LIKE 'A%' and AwayTeam LIKE 'M%'\").fetchall())\r\n\r\ncon.close()\r\n"
}
] | 1 |
vetletm/hacking-portal
|
https://github.com/vetletm/hacking-portal
|
9bc4bb1e83c13ef0614697d0733dba5878454ce2
|
e6d617c26c0ab216fce182afeb2d8d2a408e14f1
|
fa9a2e9dfbc10b162ba15ee647116a03151250cc
|
refs/heads/master
| 2020-04-04T19:26:55.926440 | 2018-11-19T12:07:17 | 2018-11-19T12:07:17 | 156,206,385 | 0 | 0 | null | 2018-11-05T11:24:27 | 2018-11-19T09:02:04 | 2018-11-19T09:51:23 |
Go
|
[
{
"alpha_fraction": 0.6843156814575195,
"alphanum_fraction": 0.6933066844940186,
"avg_line_length": 23.414634704589844,
"blob_id": "17d8664071a9ea711181ea20359394f56833c740",
"content_id": "f76a9ed54efa26bc83150220852e246b546f5f61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1001,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 41,
"path": "/build/inventory",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n#\n# the inventory from Terraform Inventory does not give the hosts their floating IPs,\n# so we wrap the script and create our own inventory\n#\n\nimport re\nimport json\nimport subprocess\n\n# get json inventory from local terraform state file\n# (by default: .terraform/terraform.tfstate)\np = subprocess.Popen(\n\t['terraform-inventory', '-list'],\n\tstdout=subprocess.PIPE,\n\tstderr=subprocess.PIPE)\nout, err = p.communicate()\n\n# get a inventory json list from the output\ninventory = json.loads(out.strip().decode())\n\n# prepare a new inventory\nnew_inventory = {\n\t'all': {'hosts': []},\n\t'docker': [],\n\t'kali': [],\n}\n\n# iterate through the IPs and add them to the inventory\nfor host in ('docker', 'kali'):\n\ti = 1\n\tfor ip in inventory[host]:\n\t\tif ip.startswith('10.212.'):\n\t\t\tnew_inventory[host].append(ip)\n\t\t\tnew_inventory['all']['hosts'].append(ip)\n\t\t\tnew_inventory['{}{}'.format(host, i)] = [ip]\n\t\t\ti += 1\n\n# dump the inventory as a json string\nprint(json.dumps(new_inventory, indent=4))\n"
},
{
"alpha_fraction": 0.6585510969161987,
"alphanum_fraction": 0.6627078652381897,
"avg_line_length": 24.515151977539062,
"blob_id": "d0cfb1ba603bf1fd8a0786539b53f043308c9060",
"content_id": "9020784544a6dd30e6d1cb7cb859b1885f929685",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 1684,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 66,
"path": "/templates/login.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package templates\n\n// Login HTML template\nconst Login = `\n{{define \"title\"}}Login{{end}}\n\n{{define \"navigation\"}}{{end}}\n\n{{define \"body\"}}\n<div class='container w-50'>\n\t<header class='m-5'>\n\t\t<h1>Hacking Portal</h1>\n\t</header>\n\n\t<div class='col-md-5'>\n\t\t<form id='form' onsubmit='javascript:;'>\n\t\t\t<p>Use your NTNU credentials</p>\n\t\t\t<div class='form-group'>\n\t\t\t\t<input class='form-control' id='username' placeholder='Username' name='username' autofocus>\n\t\t\t</div>\n\t\t\t<div class='form-group'>\n\t\t\t\t<input class='form-control' id='password' placeholder='Password' name='password' type='password'>\n\t\t\t</div>\n\t\t\t<button type='submit' class='btn btn-sm btn-success float-right'>Login</button>\n\t\t</form>\n\t</div>\n</div>\n{{end}}\n\n{{define \"scripts\"}}\n<script type=\"text/javascript\">\n\t$('button[type=\"submit\"]').click(function(e){\n\t\te.preventDefault();\n\n\t\tvar username = document.getElementById('username');\n\t\tvar password = document.getElementById('password');\n\n\t\tif(username.value == \"\"){\n\t\t\tusername.setCustomValidity(\"Use your NTNU username\");\n\t\t\tusername.reportValidity();\n\t\t\treturn;\n\t\t} else\n\t\t\tusername.setCustomValidity(\"\");\n\n\t\tif(password.value == \"\"){\n\t\t\tpassword.setCustomValidity(\"This is required\");\n\t\t\tpassword.reportValidity();\n\t\t\treturn;\n\t\t} else\n\t\t\tpassword.setCustomValidity(\"\");\n\n\t\t$.ajax({\n\t\t\ttype: 'POST',\n\t\t\turl: '/login',\n\t\t\tdata: JSON.stringify({username: username.value, password: password.value}),\n\t\t\tcontentType: 'application/json; charset=UTF-8'\n\t\t}).done(function(){\n\t\t\twindow.location.pathname = '/groups'\n\t\t}).fail(function(){\n\t\t\tpassword.setCustomValidity(\"Username or password was incorrect\");\n\t\t\tpassword.reportValidity();\n\t\t});\n\t});\n</script>\n{{end}}\n`\n"
},
{
"alpha_fraction": 0.5561797618865967,
"alphanum_fraction": 0.779026210308075,
"avg_line_length": 37.14285659790039,
"blob_id": "9e461f1d5b90aaafa0a52bb27bdc5e85403f9d7a",
"content_id": "cc94693feb1949f9aa748996ea00d34258d210a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go Module",
"length_bytes": 534,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 14,
"path": "/go.mod",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "module hacking-portal\n\nrequire (\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8\n\tgithub.com/go-chi/chi v3.3.3+incompatible\n\tgithub.com/google/uuid v1.0.0\n\tgithub.com/gophercloud/gophercloud v0.0.0-20181114204705-3a7818a07cfc\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgithub.com/stretchr/testify v1.2.2\n\tgopkg.in/asn1-ber.v1 v1.0.0-20170511165959-379148ca0225 // indirect\n\tgopkg.in/ldap.v2 v2.5.1\n\tgopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect\n)\n"
},
{
"alpha_fraction": 0.5994647741317749,
"alphanum_fraction": 0.6097234487533569,
"avg_line_length": 24.191011428833008,
"blob_id": "69d55cb8e34cbb23ad78a301b5a2a491850fd524",
"content_id": "b3758d27f75c49f6e3018686774352b1157a7673",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2242,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 89,
"path": "/templates/groups.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package templates\n\n// Groups HTML template\nconst Groups = `\n{{define \"title\"}}Groups{{end}}\n\n{{define \"body\"}}\n<div class='container w-50'>\n\t<header class='m-5'>\n\t\t<h1>Groups</h1>\n\t\t<p>You are not currently in a group, to use the portal please join one.</p>\n\t</header>\n\n\t{{range .Groups}}\n\t<div class='card mb-3' data-group-id='{{.ID}}'>\n\t\t<div class='card-header'>\n\t\t\t<h5 class='float-left'>Group {{.ID}}</h5>\n\t\t\t{{if .Full}}\n\t\t\t<button type='button' class='btn btn-secondary btn-sm float-right join disabled'>Full</button>\n\t\t\t{{else}}\n\t\t\t<button type='button' class='btn btn-primary btn-sm float-right join'>Join</button>\n\t\t\t{{end}}\n\t\t</div>\n\t\t<div class='card-body p-2'>\n\t\t\t{{if .Members}}\n\t\t\t\t<h6 class='card-text m-1'>Members:</h6>\n\t\t\t\t<ul>\n\t\t\t\t{{range .Members}}\n\t\t\t\t\t{{if .Name}}\n\t\t\t\t\t<li class='card-text m-1'>{{.Name}}</li>\n\t\t\t\t\t{{else}}\n\t\t\t\t\t<li class='card-text m-1'>{{.ID}}</li>\n\t\t\t\t\t{{end}}\n\t\t\t\t{{end}}\n\t\t\t\t</ul>\n\t\t\t{{else}}\n\t\t\t\t<h6 class='card-text m-1'>No members</h6>\n\t\t\t{{end}}\n\t\t</div>\n\t</div>\n\t{{end}}\n</div>\n{{end}}\n\n{{define \"scripts\"}}\n<script type=\"text/javascript\">\n\t$('button.join').click(function(){\n\t\tvar button = $(this);\n\t\tvar groupID = button.parent().parent().data('group-id');\n\n\t\t// prevent clicking other buttons\n\t\tif($(this).hasClass('disabled'))\n\t\t\treturn;\n\t\t$('button.join').addClass('disabled');\n\n\t\t// reset button color\n\t\tbutton.removeClass('btn-danger');\n\t\tbutton.addClass('btn-secondary');\n\n\t\t// add some visual response that we're joining a group\n\t\tbutton.text('Joining');\n\t\tbutton.append('<i class=\"fa fa-spinner fa-spin ml-2\"></i>');\n\n\t\t// attempt to join group\n\t\t$.ajax({\n\t\t\ttype: 'POST',\n\t\t\turl: '/groups/join',\n\t\t\tdata: JSON.stringify({groupID: groupID}),\n\t\t\tcontentType: 'application/json; charset=UTF-8'\n\t\t}).done(function(){\n\t\t\t// redirect to group page\n\t\t\tsetTimeout(function(){ // sleep for demo purposes\n\t\t\t\twindow.location.pathname = '/group';\n\t\t\t}, 2500);\n\t\t}).fail(function(){\n\t\t\t// update the button text\n\t\t\tbutton.text('Join');\n\n\t\t\t// give visual indicator that it failed\n\t\t\tbutton.addClass('btn-danger');\n\t\t\tbutton.removeClass('btn-secondary');\n\t\t}).always(function(){\n\t\t\t// unlock the buttons\n\t\t\t$('.join:not(.btn-secondary)').removeClass('disabled');\n\t\t});\n\t});\n</script>\n{{end}}\n`\n"
},
{
"alpha_fraction": 0.6739926934242249,
"alphanum_fraction": 0.6739926934242249,
"avg_line_length": 26.299999237060547,
"blob_id": "47a18488424f19d7332d098a81a53766e5ed2c8b",
"content_id": "315dc777d2dd2143439a7733aac0db7f8ddfee8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 10,
"path": "/models/machine.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package models\n\n// Machine stores information about server in OpenStack\ntype Machine struct {\n\tName string `bson:\"name\"`\n\tUUID string `bson:\"uuid\"`\n\tGroupID int `bson:\"groupID\"`\n\tGroupIndex int `bson:\"groupIndex\"`\n\tAddress string `bson:\"address\"`\n}\n"
},
{
"alpha_fraction": 0.7045454382896423,
"alphanum_fraction": 0.7045454382896423,
"avg_line_length": 15.5,
"blob_id": "b283c08e9d0a7f443df92202cb527745467a0d58",
"content_id": "75e08034e4dc50ce4c9cbc61e32986d835f80502",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/models/group.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package models\n\n// Group stores information about a given group\ntype Group struct {\n\tID int\n\tFull bool\n\tMembers []Student\n}\n"
},
{
"alpha_fraction": 0.6399155259132385,
"alphanum_fraction": 0.6504752039909363,
"avg_line_length": 34.074073791503906,
"blob_id": "e810f01aa128227103905e73ce1892f81e05fe56",
"content_id": "52649f44f3f4fa717faff532ea220c90c5365ed4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 27,
"path": "/templates/navigation.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package templates\n\n// Navigation HTML template\nconst Navigation = `\n{{define \"navigation\"}}\n<nav class='navbar navbar-fixed-top bg-secondary'>\n\t<div class='container-fluid'>\n\t\t<div class='navbar-header w-100'>\n\t\t\t<img class='float-left' src='/static/images/ntnu-logo.svg'/>\n\t\t\t<h4 class='float-left pl-3 text-white'>Hacking Portal</h4>\n\t\t\t<div class='float-right pr-3 dropdown' style='cursor: pointer;'>\n\t\t\t\t<a class='dropdown-toggle' role='button' id='user-menu' data-toggle='dropdown' aria-haspopup='true' aria-expanded='false'>\n\t\t\t\t\t<img class='rounded' src='/static/images/stock-avatar.png'/>\n\t\t\t\t</a>\n\t\t\t\t<div class='dropdown-menu' aria-labelledby='user-menu'>\n\t\t\t\t\t<h5 class='dropdown-header'>{{.User.Name}}</h5>\n\t\t\t\t\t{{if ne .User.GroupID 0}}\n\t\t\t\t\t<a class='dropdown-item leave' href='/group/leave'>Leave Group</a>\n\t\t\t\t\t{{end}}\n\t\t\t\t\t<a class='dropdown-item' href='/logout'>Log out</a>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>\n\t</div>\n</nav>\n{{end}}\n`\n"
},
{
"alpha_fraction": 0.7116730809211731,
"alphanum_fraction": 0.7141566872596741,
"avg_line_length": 27.947711944580078,
"blob_id": "c8b129120f5373657f8b80f01f61c3e17d9838dc",
"content_id": "e352e799bde58e9f4bed2b8c4b6584d443bcf4a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 4429,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 153,
"path": "/routes/groups.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"hacking-portal/db\"\n\t\"hacking-portal/models\"\n\t\"hacking-portal/templates\"\n\n\t\"github.com/go-chi/chi\"\n)\n\n// GroupsEndpoint interfaces for all the methods are expected to be provided.\ntype GroupsEndpoint struct {\n\tStudents db.StudentStorage\n}\n\ntype groupsPageData struct {\n\tUser models.Student\n\tGroups []models.Group\n}\n\n// GetGroups renders a view of all student groups\nfunc (storage *GroupsEndpoint) GetGroups(w http.ResponseWriter, r *http.Request) {\n\t// get the user from the session\n\tcookie, _ := r.Cookie(\"session_token\")\n\tsession := sessions[cookie.Value]\n\n\t// get the actual sessionUser object from the username\n\tsessionUser, err := storage.Students.FindByID(session.Username)\n\tif err != nil {\n\t\t// sessionUser doesn't exist yet, we'll have to create it\n\t\t// this will happen on first visit\n\t\tsessionUser = models.Student{ID: session.Username, Name: session.DisplayName}\n\n\t\terr = storage.Students.Upsert(sessionUser)\n\t\tif err != nil {\n\t\t\t// something went horribly wrong\n\t\t\thttp.Error(w, \"Unable to initiate user\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif sessionUser.GroupID != 0 {\n\t\t// the user already has a group, redirect em\n\t\thttp.Redirect(w, r, \"/group\", http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\t// prepare page data\n\tpageData := groupsPageData{User: sessionUser}\n\n\t// get the groups\n\tgroups, err := storage.Students.FindGroups()\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to get groups\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// maps are intentionally randomized in order, so we have to get an ordered slice of it\n\tvar groupKeys []int\n\tfor key := range groups {\n\t\tgroupKeys = append(groupKeys, key)\n\t}\n\tsort.Ints(groupKeys)\n\n\t// iterate over each group and fill in the page data\n\tfor _, groupID := range groupKeys {\n\t\tnumMembers := groups[groupID]\n\n\t\t// get all group members\n\t\tif groupMembers, err := storage.Students.FindByGroup(groupID); err != nil {\n\t\t\thttp.Error(w, \"Unable to parse groups\", http.StatusInternalServerError)\n\t\t} else {\n\t\t\t// append the group data and members to the page data\n\t\t\tpageData.Groups = append(pageData.Groups, models.Group{\n\t\t\t\tID: groupID,\n\t\t\t\tFull: numMembers == 3, // hardcode much\n\t\t\t\tMembers: groupMembers,\n\t\t\t})\n\t\t}\n\t}\n\n\t// append empty group at the end so people can join an empty one\n\tnextGroupID := len(pageData.Groups) + 1\n\tpageData.Groups = append(pageData.Groups, models.Group{\n\t\tID: nextGroupID,\n\t})\n\n\t// prepare and ensure validity of template files\n\ttpl := template.Must(template.New(\"layout\").Parse(templates.Layout + templates.Navigation + templates.Groups))\n\n\t// render the templates with data\n\ttpl.ExecuteTemplate(w, \"layout\", pageData)\n}\n\n// PostJoinGroup handles group join requests\nfunc (storage *GroupsEndpoint) PostJoinGroup(w http.ResponseWriter, r *http.Request) {\n\t// get the user from the session\n\tcookie, _ := r.Cookie(\"session_token\")\n\tsession := sessions[cookie.Value]\n\n\t// get the actual sessionUser object from the username\n\tsessionUser, err := storage.Students.FindByID(session.Username)\n\tif err != nil {\n\t\thttp.Error(w, \"Invalid user session\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif sessionUser.GroupID != 0 {\n\t\thttp.Error(w, \"User already in a group\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar payload map[string]interface{}\n\n\t// attempt to decode and validate the body contents, then get the student information\n\tif err := json.NewDecoder(r.Body).Decode(&payload); err != nil {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t} else if groupID, ok := payload[\"groupID\"]; !ok {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t} else if reflect.TypeOf(groupID).Kind() != reflect.Float64 {\n\t\thttp.Error(w, \"Invalid group ID\", http.StatusBadRequest)\n\t} else if groupID.(float64) <= 0 {\n\t\thttp.Error(w, \"Invalid group ID\", http.StatusBadRequest)\n\t} else {\n\t\tsessionUser.GroupID = int(groupID.(float64))\n\n\t\t// attempt to update the sessionUser's group ID\n\t\tif err := storage.Students.Upsert(sessionUser); err != nil {\n\t\t\thttp.Error(w, \"Unable to join group\", http.StatusInternalServerError)\n\t\t} else {\n\t\t\t// render a successful message\n\t\t\tfmt.Fprint(w, \"OK\")\n\t\t}\n\t}\n}\n\n// GroupsRouter sets up routing for the group enrollment view\nfunc GroupsRouter() chi.Router {\n\tep := GroupsEndpoint{new(db.StudentDatabase)}\n\n\tr := chi.NewRouter()\n\tr.Get(\"/\", ep.GetGroups)\n\tr.Post(\"/join\", ep.PostJoinGroup)\n\n\treturn r\n}\n"
},
{
"alpha_fraction": 0.7379767894744873,
"alphanum_fraction": 0.7495853900909424,
"avg_line_length": 23.1200008392334,
"blob_id": "c000e2d069b23a023a302b26b42ab7699d051ce2",
"content_id": "99f7c6ea40452a84124781f38e0d71ede1bbaafb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 25,
"path": "/Dockerfile",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "FROM golang:1.11 AS builder\n\n# set working directory\nWORKDIR /build\n\n# copy our entire structure\nCOPY . ./\n\n# build the app (statically)\nRUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o /portal .\n\n# create the image from scratch\nFROM scratch\n\n# copy the app\nCOPY --from=builder /portal /usr/bin/local/portal\n\n# copy the static files\nCOPY --from=builder /build/static/ /var/www/static/\n\n# copy root certificates\nCOPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt\n\n# set entrypoint (our application)\nENTRYPOINT [\"/usr/bin/local/portal\"]\n"
},
{
"alpha_fraction": 0.6903073191642761,
"alphanum_fraction": 0.701789915561676,
"avg_line_length": 25.4375,
"blob_id": "0ead2aa510464264a979ca4f2873c238de890a6c",
"content_id": "82a519e7f47ecaf7dfe5f4a7254c4b1eeff8a4a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2961,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 112,
"path": "/routes/admin_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"hacking-portal/models\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/go-chi/chi\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestAdminDashboard(t *testing.T) {\n\tcookie := mockSession(\"test\", true)\n\n\t// create a request to pass to the handler\n\treq := httptest.NewRequest(\"GET\", \"/\", nil)\n\treq.AddCookie(&cookie)\n\n\t// create a response recorder to record the response from the handler\n\tres := httptest.NewRecorder()\n\n\t// prepare the endpoint with mocked storage\n\tep := AdminEndpoint{\n\t\tMachines: new(mockMachineStorage),\n\t\tStudents: new(mockStudentStorage),\n\t}\n\n\t// serve the handler\n\thandler := http.HandlerFunc(ep.GetDashboard)\n\thandler.ServeHTTP(res, req)\n\n\t// test the status\n\trequire.Equal(t, http.StatusOK, res.Code, \"handler returned wrong status code\")\n}\n\nfunc TestPostMachineAssign(t *testing.T) {\n\tmdb := new(mockMachineStorage)\n\tmdb.Upsert(models.Machine{\n\t\tName: \"test1\",\n\t\tUUID: \"1111\",\n\t\tGroupID: 1,\n\t\tAddress: \"1.1.1.1\",\n\t})\n\n\ttestData := []struct {\n\t\tbody string\n\t\tcode int\n\t}{\n\t\t{body: ``, code: http.StatusBadRequest},\n\t\t{body: `{\"machineUUID\":\"0000\"}`, code: http.StatusBadRequest},\n\t\t{body: `{\"groupID\":0}`, code: http.StatusBadRequest},\n\t\t{body: `{\"machineUUID\":\"1111\",\"groupID\":1}`, code: http.StatusOK},\n\t\t{body: `{\"machineUUID\":\"1111\",\"groupID\":-1}`, code: http.StatusInternalServerError},\n\t\t{body: `{\"machineUUID\":\"0000\",\"groupID\":1}`, code: http.StatusNotFound},\n\t}\n\n\tfor _, data := range testData {\n\t\t// create a request to pass to the handler\n\t\treq := httptest.NewRequest(\"POST\", \"/\", bytes.NewBuffer([]byte(data.body)))\n\n\t\t// create a response recorder to record the response from the handler\n\t\tres := httptest.NewRecorder()\n\n\t\t// prepare the endpoint with mocked storage\n\t\tep := AdminEndpoint{\n\t\t\tMachines: mdb,\n\t\t\tStudents: new(mockStudentStorage),\n\t\t}\n\n\t\t// serve the handler\n\t\thandler := http.HandlerFunc(ep.PostMachineAssign)\n\t\thandler.ServeHTTP(res, req)\n\n\t\t// test the status\n\t\trequire.Equal(t, data.code, res.Code, \"handler returned wrong status code\")\n\t}\n}\n\nfunc TestAdminMachineRestart(t *testing.T) {\n\t// create a request to pass to the handler\n\treq := httptest.NewRequest(\"POST\", \"/\", nil)\n\n\t// create a response recorder to record the response from the handler\n\tres := httptest.NewRecorder()\n\n\t// preprare a new context\n\tctx := chi.NewRouteContext()\n\tctx.URLParams.Add(\"uuid\", \"0000\")\n\treq = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, ctx))\n\n\t// prepare the endpoint with mocked storage\n\tep := AdminEndpoint{\n\t\tMachines: new(mockMachineStorage),\n\t\tStudents: new(mockStudentStorage),\n\t}\n\n\t// serve the handler\n\thandler := http.HandlerFunc(ep.PostMachineRestart)\n\thandler.ServeHTTP(res, req)\n\n\t// test the status\n\trequire.Equal(t, http.StatusNotFound, res.Code, \"handler returned wrong status code\")\n}\n\nfunc TestAdminRouter(t *testing.T) {\n\tvar r *chi.Mux\n\tassert.IsType(t, r, AdminRouter())\n}\n"
},
{
"alpha_fraction": 0.7131751179695129,
"alphanum_fraction": 0.7139934301376343,
"avg_line_length": 27.09195327758789,
"blob_id": "6d5d9402e421d79a24afabdb8def3a35eb52de52",
"content_id": "13a77f61ea508b4643d11d8d04203e75660d74ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2444,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 87,
"path": "/db/student.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package db\n\nimport (\n\t\"hacking-portal/models\"\n\n\t\"github.com/globalsign/mgo\"\n\t\"github.com/globalsign/mgo/bson\"\n)\n\n// students collection structure:\n// { id, alias, name, groupID }\n\n// StudentStorage is an interface describing the methods of the StudentDatabase struct\ntype StudentStorage interface {\n\tFindAll() ([]models.Student, error)\n\tFindByID(string) (models.Student, error)\n\tFindByName(string) (models.Student, error)\n\tFindByGroup(int) ([]models.Student, error)\n\tFindGroups() (map[int]int, error)\n\tUpsert(models.Student) error\n}\n\n// StudentDatabase is an implementation of the storage for all Student-related methods\ntype StudentDatabase struct{}\n\n// FindAll returns an array of all the students\nfunc (StudentDatabase) FindAll() ([]models.Student, error) {\n\tvar students []models.Student\n\terr := db.C(\"students\").Find(nil).All(&students)\n\treturn students, err\n}\n\n// FindByID returns a single student by ID (username)\nfunc (StudentDatabase) FindByID(id string) (models.Student, error) {\n\tvar student models.Student\n\terr := db.C(\"students\").Find(bson.M{\"id\": id}).One(&student)\n\treturn student, err\n}\n\n// FindByName returns a single student by name\nfunc (StudentDatabase) FindByName(name string) (models.Student, error) {\n\tvar student models.Student\n\terr := db.C(\"students\").Find(bson.M{\"name\": name}).One(&student)\n\treturn student, err\n}\n\n// FindByGroup finds all students in a certain group\nfunc (StudentDatabase) FindByGroup(groupID int) ([]models.Student, error) {\n\tvar students []models.Student\n\terr := db.C(\"students\").Find(bson.M{\"groupID\": groupID}).All(&students)\n\treturn students, err\n}\n\n// FindGroups returns a map of all group IDs with the number of members\nfunc (StudentDatabase) FindGroups() (map[int]int, error) {\n\tgroupIDs := map[int]int{}\n\tvar students []models.Student\n\n\t// get all students from the database\n\tif err := db.C(\"students\").Find(nil).All(&students); err != nil {\n\t\treturn groupIDs, err\n\t}\n\n\t// populate array with unique group IDs\n\tfor _, student := range students {\n\t\tgroupID := student.GroupID\n\t\tif groupID != 0 {\n\t\t\tif _, isset := groupIDs[groupID]; !isset {\n\t\t\t\tgroupIDs[groupID] = 1\n\t\t\t} else {\n\t\t\t\tgroupIDs[groupID]++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn groupIDs, nil\n}\n\n// Upsert adds/updates the student to the database\nfunc (StudentDatabase) Upsert(student models.Student) error {\n\t_, err := db.C(\"students\").Find(bson.M{\"id\": student.ID}).Apply(mgo.Change{\n\t\tUpdate: student,\n\t\tUpsert: true,\n\t}, nil)\n\n\treturn err\n}\n"
},
{
"alpha_fraction": 0.6923728585243225,
"alphanum_fraction": 0.7029660940170288,
"avg_line_length": 26.126436233520508,
"blob_id": "1af237b2abb442513b954c4a88e222072c310801",
"content_id": "bdce457544f8504015ddcf30dbec29f02fb1e866",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2360,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 87,
"path": "/db/student_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package db\n\nimport (\n\t\"testing\"\n\n\t\"hacking-portal/models\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestStudentUpsert(t *testing.T) {\n\t// new database type\n\ttdb := StudentDatabase{}\n\n\tvar err error\n\t// attempt to insert a few students, asserting the output\n\terr = tdb.Upsert(models.Student{\"one\", \"One One\", 10})\n\trequire.Nil(t, err, \"failed to insert student 1\")\n\terr = tdb.Upsert(models.Student{\"two\", \"Two Two\", 0})\n\trequire.Nil(t, err, \"failed to insert student 2\")\n\terr = tdb.Upsert(models.Student{\"three\", \"Three Three\", 10})\n\trequire.Nil(t, err, \"failed to insert student 3\")\n}\n\nfunc TestStudentFindAll(t *testing.T) {\n\t// new database type\n\ttdb := StudentDatabase{}\n\n\t// attempt to find students\n\tstudents, err := tdb.FindAll()\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get students\")\n\trequire.Len(t, students, 3) // this runs after upsert, so there should be 3\n}\n\nfunc TestStudentFindByID(t *testing.T) {\n\t// new database type\n\ttdb := StudentDatabase{}\n\n\t// attempt to find student by ID\n\tstudent, err := tdb.FindByID(\"one\") // from the Upsert test\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get student\")\n\trequire.EqualValues(t, \"one\", student.ID)\n\trequire.EqualValues(t, 10, student.GroupID)\n\trequire.EqualValues(t, \"One One\", student.Name)\n}\n\nfunc TestStudentFindByName(t *testing.T) {\n\t// new database type\n\ttdb := StudentDatabase{}\n\n\t// attempt to find student by name\n\tstudent, err := tdb.FindByName(\"Three Three\") // from the Upsert test\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get student\")\n\trequire.EqualValues(t, \"three\", student.ID)\n\trequire.EqualValues(t, 10, student.GroupID)\n\trequire.EqualValues(t, \"Three Three\", student.Name)\n}\n\nfunc TestStudentFindByGroup(t *testing.T) {\n\t// new database type\n\ttdb := StudentDatabase{}\n\n\t// attempt to find students by group ID\n\tstudents, err := tdb.FindByGroup(10) // from the Upsert test\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get students\")\n\trequire.Len(t, students, 2) // this runs after upsert, so there should be 2\n}\n\nfunc TestStudentFindGroups(t *testing.T) {\n\t// new database type\n\ttdb := StudentDatabase{}\n\n\t// attempt to find students by group ID\n\tgroupIDs, err := tdb.FindGroups()\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get group IDs\")\n\trequire.Len(t, groupIDs, 1) // student 1 and 3 are in group 10, student 2 doesn't have group (0)\n}\n"
},
{
"alpha_fraction": 0.6807106733322144,
"alphanum_fraction": 0.6890863180160522,
"avg_line_length": 25.266666412353516,
"blob_id": "c29effbf5f2db7a31e155fa84fa4656c7153efb0",
"content_id": "4c85a38c184b1a74e238fe70127b65f5b1e1d728",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 3940,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 150,
"path": "/routes/group_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"context\"\n\t\"hacking-portal/models\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/go-chi/chi\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestGroupDashboard(t *testing.T) {\n\tsdb := new(mockStudentStorage)\n\tsdb.Upsert(models.Student{\"actual\", \"Actual User\", 1})\n\n\tmdb := new(mockMachineStorage)\n\tmdb.Upsert(models.Machine{\n\t\tName: \"test1\",\n\t\tUUID: \"1111\",\n\t\tGroupID: 1,\n\t\tAddress: \"1.1.1.1\",\n\t})\n\n\ttestData := []struct {\n\t\tcookie http.Cookie\n\t\tcode int\n\t}{\n\t\t{cookie: mockSession(\"test\", false), code: http.StatusTemporaryRedirect},\n\t\t{cookie: mockSession(\"actual\", true), code: http.StatusOK},\n\t}\n\n\tfor _, data := range testData {\n\t\t// create a request to pass to the handler\n\t\treq := httptest.NewRequest(\"GET\", \"/\", nil)\n\t\treq.AddCookie(&data.cookie)\n\n\t\t// create a response recorder to record the response from the handler\n\t\tres := httptest.NewRecorder()\n\n\t\t// prepare the endpoint with mocked storage\n\t\tep := GroupEndpoint{\n\t\t\tMachines: mdb,\n\t\t\tStudents: sdb,\n\t\t}\n\n\t\t// serve the handler\n\t\thandler := http.HandlerFunc(ep.GetDashboard)\n\t\thandler.ServeHTTP(res, req)\n\n\t\t// test the status\n\t\trequire.Equal(t, data.code, res.Code, \"handler returned wrong status code\")\n\t}\n}\n\nfunc TestGroupMachineRestart(t *testing.T) {\n\tmdb := new(mockMachineStorage)\n\tmdb.Upsert(models.Machine{\n\t\tName: \"test1\",\n\t\tUUID: \"1111\",\n\t\tGroupID: 2,\n\t\tAddress: \"1.1.1.1\",\n\t})\n\n\tsdb := new(mockStudentStorage)\n\tsdb.Upsert(models.Student{\"ungrouped\", \"Ungrouped User\", 0})\n\tsdb.Upsert(models.Student{\"grouped\", \"Grouped User\", 1})\n\n\ttestData := []struct {\n\t\tuuid string\n\t\tcode int\n\t\tcookie http.Cookie\n\t}{\n\t\t{uuid: \"\", code: http.StatusBadRequest, cookie: mockSession(\"invalid\", false)},\n\t\t{uuid: \"\", code: http.StatusBadRequest, cookie: mockSession(\"ungrouped\", true)},\n\t\t{uuid: \"0000\", code: http.StatusBadRequest, cookie: mockSession(\"grouped\", true)},\n\t\t{uuid: \"1111\", code: http.StatusBadRequest, cookie: mockSession(\"grouped\", true)},\n\t}\n\n\tfor _, data := range testData {\n\t\t// create a request to pass to the handler\n\t\treq := httptest.NewRequest(\"POST\", \"/\", nil)\n\t\treq.AddCookie(&data.cookie)\n\n\t\t// create a response recorder to record the response from the handler\n\t\tres := httptest.NewRecorder()\n\n\t\t// prepare the endpoint with mocked storage\n\t\tep := GroupEndpoint{\n\t\t\tStudents: sdb,\n\t\t\tMachines: mdb,\n\t\t}\n\n\t\t// prepare context\n\t\tctx := chi.NewRouteContext()\n\t\tctx.URLParams.Add(\"uuid\", data.uuid)\n\t\treq = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, ctx))\n\n\t\t// serve the handler\n\t\thandler := http.HandlerFunc(ep.PostMachineRestart)\n\t\thandler.ServeHTTP(res, req)\n\n\t\t// test the status\n\t\trequire.Equal(t, data.code, res.Code, \"handler returned wrong status code\")\n\t}\n}\n\nfunc TestGetLeaveGroup(t *testing.T) {\n\tsdb := new(mockStudentStorage)\n\tsdb.Upsert(models.Student{\"ungrouped\", \"Ungrouped User\", 0})\n\tsdb.Upsert(models.Student{\"grouped\", \"Grouped User\", 1})\n\n\ttestData := []struct {\n\t\tcode int\n\t\tcookie http.Cookie\n\t}{\n\t\t{code: http.StatusBadRequest, cookie: mockSession(\"invalid\", false)},\n\t\t{code: http.StatusBadRequest, cookie: mockSession(\"ungrouped\", true)},\n\t\t{code: http.StatusTemporaryRedirect, cookie: mockSession(\"grouped\", true)},\n\t}\n\n\tfor _, data := range testData {\n\t\t// create a request to pass to the handler\n\t\treq := httptest.NewRequest(\"POST\", \"/\", nil)\n\t\treq.AddCookie(&data.cookie)\n\n\t\t// create a response recorder to record the response from the handler\n\t\tres := httptest.NewRecorder()\n\n\t\t// prepare the endpoint with mocked storage\n\t\tep := GroupEndpoint{\n\t\t\tStudents: sdb,\n\t\t\tMachines: new(mockMachineStorage),\n\t\t}\n\n\t\t// serve the handler\n\t\thandler := http.HandlerFunc(ep.GetLeaveGroup)\n\t\thandler.ServeHTTP(res, req)\n\n\t\t// test the status\n\t\trequire.Equal(t, data.code, res.Code, \"handler returned wrong status code\")\n\t}\n}\n\nfunc TestGroupRouter(t *testing.T) {\n\tvar r *chi.Mux\n\tassert.IsType(t, r, GroupRouter())\n}\n"
},
{
"alpha_fraction": 0.7544204592704773,
"alphanum_fraction": 0.7583497166633606,
"avg_line_length": 22.136363983154297,
"blob_id": "be0f35a04c6954dfe3c4dcfd7f8b9047d059d242",
"content_id": "1c0671f08aa892017a5d44f6e220aaf6be5a6713",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 509,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 22,
"path": "/build/README.md",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "### Terraform\n\nUsed to create the VMs, their networks, floating IPs etc on OpenStack. \nUsage:\n```bash\n# source OpenStack RC v3 file\nsource openstack.rc\n\n# deploy with Terraform\nterraform init\nterraform apply\n```\n\n### Ansible\n\nUsed to install the software on all the machines, including Docker, MongoDB and the Go application, latter two inside a Docker Swarm.\n\nBefore running, make sure to populate the credentials in `group_vars/docker1.yml`. \nUsage:\n```bash\nansible-playbook -i inventory playbook.yml\n```\n"
},
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7979910969734192,
"avg_line_length": 66.19999694824219,
"blob_id": "ad97d539c49817ad72e694f09541d9eeec50bd54",
"content_id": "6dd6cb3874462a2da69991fe169097ebb0196e27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2689,
"license_type": "no_license",
"max_line_length": 461,
"num_lines": 40,
"path": "/README.md",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "# Hacking Portal\n\nThis project deploys a web interface to manage and assign Kali VMs in a lab environment used for the Ethical Hacking project in the IMT3004 course at NTNU Gjøvik. It allows an admin (lecturer) to assign up to three Kali VMs to each group of students, and it allows students to restart their assigned VMs without having access to the OpenStack environment in where they are hosted. This was originally a part of the PEMA bachelor assignment for the class of '16.\n\nThe original scope was to authenticate using LDAP and NTNU's own authentication infrastructure, integrate with OpenStack for VM assignments and management, store intermediate information in a MongoDB database, all of which hosted in a Docker Swarm on VMs in NTNU's own OpenStack environment. The scope also included a tasks/answers interface, as well as sessions stored in the database, but time didn't allow it.\n\nTechnologies used:\n- Go\n- MongoDB\n- OpenStack\n- Docker (Swarm)\n- Terraform (orchestrating the test deployment)\n- Ansible (provision the test deployment)\n\nDifficulties:\n- We had some issues with MongoDB replicas in Docker Swarm, so we ended up running a single instance\n\t- Documentation on this was not easy to come by\n- We also had some issues (initially) with the OpenStack package for Go\n\t- Documentation was outdated and the package was very complex, but it was the only option\n- Adding tests for OpenStack and LDAP libraries deemed to be unfeasible\n\t- We settled for testing our own original code\n- The OpenStack package was a pain to understand and navigate documentation for\n\nWhat could be improved:\n- Further developing the application to include more of the specs from the original assignment (PEMA)\n\t- Tasks/answers is a highly wanted feature\n- Storing sessions in the database to allow load balancing\n- Sessions could be handled with a 3rd-party library, such as [gorilla/sessions](https://github.com/gorilla/sessions)\n- Handing the creation of Kali machines in-house to allow management and distribution of private keys\n- Metalinting\n\t- The remaining issues with gometalinter is a difference in opinion\n\nWe managed to get a working example with a good enough UI/UX using the Bootstrap CSS framework, enough to impress the IMT3004 lecturers that suggested the project topic.\n\nWe estimate having used 30-40 hours _each_ during the development of this project, way beyond the 20 hour minimum.\nWe learned the value of describing an aptly sized scope beforehand (which we slightly failed), and the value of good documentation (which we struggled with finding).\n\n## Usage\n\nDeploy with Terraform and Ansible (see the `/build` directory), then visit the public IP of any of the docker VMs.\n"
},
{
"alpha_fraction": 0.6676492094993591,
"alphanum_fraction": 0.6705968976020813,
"avg_line_length": 20.887096405029297,
"blob_id": "014eeff4d86adee4e628654b8b518a3fa195bd4f",
"content_id": "7a96b495be316aa9d9b03e8f2821be6d6a867a8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 1357,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 62,
"path": "/main.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package main\n\nimport (\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"hacking-portal/db\"\n\t\"hacking-portal/openstack\"\n\t\"hacking-portal/routes\"\n\n\t\"github.com/go-chi/chi\"\n)\n\nfunc main() {\n\t// initialize the database connection\n\tdb.Init(\n\t\tos.Getenv(\"DB_URL\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASS\"))\n\n\t// initialize session routing\n\troutes.InitAuthentication(\n\t\tos.Getenv(\"LDAP_ADDR\"),\n\t\tos.Getenv(\"LDAP_DC\"),\n\t\tos.Getenv(\"COURSE_CODE\"),\n\t\tos.Getenv(\"ADMINS\"))\n\n\t// initialize openstack connection\n\topenstack.Init()\n\n\t// set up routing\n\tr := chi.NewRouter()\n\tr.Use(routes.SessionHandler)\n\tr.Get(\"/login\", routes.GetLogin)\n\tr.Post(\"/login\", routes.PostLogin)\n\tr.Get(\"/logout\", routes.GetLogout)\n\n\t// let the remaining sub-routes handle themselves\n\tr.Mount(\"/groups\", routes.GroupsRouter())\n\tr.Mount(\"/group\", routes.GroupRouter())\n\tr.Mount(\"/admin\", routes.AdminRouter())\n\n\t// serve static files directly\n\tfs := http.FileServer(http.Dir(\"/var/www/static/\"))\n\tr.Handle(\"/static/*\", http.StripPrefix(\"/static/\", fs))\n\n\t// set default route\n\tr.Get(\"/*\", routes.RedirectLogin)\n\n\t// attempt to get the port from the environment\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\t// set a default port since it wasn't provided\n\t\tport = \"8080\"\n\t}\n\n\t// start webserver\n\tlog.Printf(\"Serving on port %s...\\n\", port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, r))\n}\n"
},
{
"alpha_fraction": 0.6873857378959656,
"alphanum_fraction": 0.6873857378959656,
"avg_line_length": 16.645160675048828,
"blob_id": "8450509ad8c974ff7160fe3995cb5a8b40663837",
"content_id": "ec1af85fa09b0162eed8cbe5fa2eb8e98a635708",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 31,
"path": "/db/db_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package db\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/globalsign/mgo/dbtest\"\n)\n\nfunc TestMain(m *testing.M) {\n\t// temp directory to store test database\n\ttempDir, _ := ioutil.TempDir(\"\", \"testing\")\n\n\t// start the test database server and get a session\n\tvar server dbtest.DBServer\n\tserver.SetPath(tempDir)\n\tsession := server.Session()\n\n\t// set the database variable to the database in a session\n\tdb = session.DB(\"testing\")\n\n\t// run the tests\n\tret := m.Run()\n\n\t// cleanup\n\tdb.DropDatabase()\n\tsession.Close()\n\tserver.Stop()\n\tos.Exit(ret)\n}\n"
},
{
"alpha_fraction": 0.724994421005249,
"alphanum_fraction": 0.7258894443511963,
"avg_line_length": 29.4013614654541,
"blob_id": "147050557d8179525997c358453ff48904ff879c",
"content_id": "a9e3d246776567f6ec60eb06341908aea01795d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 4469,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 147,
"path": "/routes/admin.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"net/http\"\n\t\"sort\"\n\n\t\"hacking-portal/db\"\n\t\"hacking-portal/models\"\n\t\"hacking-portal/openstack\"\n\t\"hacking-portal/templates\"\n\n\t\"github.com/go-chi/chi\"\n)\n\n// AdminEndpoint is an implementation of the endpoint for all Admin-related methods.\n// Database interfaces for all the methods are expected to be provided.\ntype AdminEndpoint struct {\n\tMachines db.MachineStorage\n\tStudents db.StudentStorage\n}\n\ntype adminPageData struct {\n\tUser models.Student\n\tMachines []models.Machine\n\tGroups []models.Group\n}\n\n// GetDashboard renders a view of the administration interface\nfunc (storage *AdminEndpoint) GetDashboard(w http.ResponseWriter, r *http.Request) {\n\t// get the user from the session\n\tcookie, _ := r.Cookie(\"session_token\")\n\tsession := sessions[cookie.Value]\n\n\t// get the actual sessionUser object from the username\n\tsessionUser, err := storage.Students.FindByID(session.Username)\n\tif err != nil {\n\t\t// sessionUser doesn't exist yet, we'll have to create it\n\t\t// this will happen on first visit\n\t\tsessionUser = models.Student{ID: session.Username, Name: session.DisplayName}\n\n\t\terr = storage.Students.Upsert(sessionUser)\n\t\tif err != nil {\n\t\t\t// something went horribly wrong\n\t\t\thttp.Error(w, \"Unable to initiate user\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// prepare page data\n\tpageData := adminPageData{User: sessionUser}\n\n\t// get the groups\n\tgroups, err := storage.Students.FindGroups()\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to get groups\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// maps are intentionally randomized in order, so we have to get an ordered slice of it\n\tvar groupKeys []int\n\tfor key := range groups {\n\t\tgroupKeys = append(groupKeys, key)\n\t}\n\tsort.Ints(groupKeys)\n\n\t// iterate over each group and fill in the page data\n\tfor _, groupID := range groupKeys {\n\t\t// append the group data and members to the page data\n\t\tpageData.Groups = append(pageData.Groups, models.Group{ID: groupID})\n\t}\n\n\t// get the machines from the database\n\tif pageData.Machines, err = storage.Machines.FindAll(); err != nil {\n\t\thttp.Error(w, \"unable to grab machines\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// prepare and ensure validity of template files\n\ttpl := template.Must(template.New(\"layout\").Parse(templates.Layout + templates.Navigation + templates.Admin))\n\n\t// render the templates with data\n\ttpl.ExecuteTemplate(w, \"layout\", pageData)\n}\n\n// PostMachineRestart handles machine restart requests\nfunc (storage *AdminEndpoint) PostMachineRestart(w http.ResponseWriter, r *http.Request) {\n\t// get machine UUID from URL path\n\tuuid := chi.URLParam(r, \"machineUUID\")\n\n\t// - lists machines and their assigned groups\n\tif _, err := storage.Machines.FindByID(uuid); err != nil {\n\t\thttp.Error(w, \"Couldn't get machines from db\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Attempt to reboot the server\n\tif openstack.Reboot(uuid) != nil {\n\t\thttp.Error(w, \"Failed to reboot machine\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"OK\")\n}\n\n// PostMachineAssign handles machine group assignment requests\nfunc (storage *AdminEndpoint) PostMachineAssign(w http.ResponseWriter, r *http.Request) {\n\tvar payload map[string]interface{}\n\n\t// attempt to decode and validate the body contents, then get the machine information\n\tif err := json.NewDecoder(r.Body).Decode(&payload); err != nil {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t} else if groupID, ok := payload[\"groupID\"]; !ok {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t} else if machineUUID, ok := payload[\"machineUUID\"]; !ok {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t} else if machine, err := storage.Machines.FindByID(machineUUID.(string)); err != nil {\n\t\thttp.Error(w, \"Could not find machine\", http.StatusNotFound)\n\t} else {\n\t\t// Set new group id\n\t\tmachine.GroupID = int(groupID.(float64))\n\n\t\t// attempt to update the machine in database\n\t\tif storage.Machines.Upsert(machine) != nil {\n\t\t\thttp.Error(w, \"Could not update machine\", http.StatusInternalServerError)\n\t\t} else {\n\t\t\tfmt.Fprint(w, \"OK\")\n\t\t}\n\t}\n}\n\n// AdminRouter sets up routing for the administration web interface\nfunc AdminRouter() chi.Router {\n\tep := AdminEndpoint{\n\t\tMachines: new(db.MachineDatabase),\n\t\tStudents: new(db.StudentDatabase),\n\t}\n\n\tr := chi.NewRouter()\n\tr.Get(\"/\", ep.GetDashboard)\n\tr.Post(\"/restart/{machineUUID:[A-Za-z0-9-]+}\", ep.PostMachineRestart)\n\tr.Post(\"/assign\", ep.PostMachineAssign)\n\n\treturn r\n}\n"
},
{
"alpha_fraction": 0.6582781672477722,
"alphanum_fraction": 0.6609271764755249,
"avg_line_length": 25.964284896850586,
"blob_id": "3dd43447f392d19beefdd82656f1563dc2834cb5",
"content_id": "4ab965de25d58439da6761b9cdcea049f37517ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 755,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 28,
"path": "/templates/layout.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package templates\n\n// Layout HTML template\nconst Layout = `\n{{define \"layout\"}}\n<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<meta charset='utf-8'>\n\t\t<meta name='viewport' content='width=device-width, initial-scale=1, shrink-to-fit=no'>\n\t\t<title>{{template \"title\" .}}</title>\n\n\t\t<link rel='stylesheet' href='/static/css/libs/bootstrap.min.css'>\n\t\t<link rel='stylesheet' href='/static/css/libs/fontawesome.all.css'>\n\t\t<link rel='stylesheet' href='/static/css/tweaks.css'>\n\t</head>\n\t<body>\n\t\t{{template \"navigation\" .}}\n\t\t{{template \"body\" .}}\n\n\t\t<script src='/static/js/libs/jquery.min.js'></script>\n\t\t<script src='/static/js/libs/popper.min.js'></script>\n\t\t<script src='/static/js/libs/bootstrap.min.js'></script>\n\t\t{{template \"scripts\"}}\n\t</body>\n</html>\n{{end}}\n`\n"
},
{
"alpha_fraction": 0.6139650940895081,
"alphanum_fraction": 0.6219451427459717,
"avg_line_length": 26.46575355529785,
"blob_id": "29b502c47995468b7f0d0cfd4278de2c74806207",
"content_id": "37476137f0e53899b9af6aa582403463210d298b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2005,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 73,
"path": "/templates/group.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package templates\n\n// Group HTML template\nconst Group = `\n{{define \"title\"}}Group {{.User.GroupID}}{{end}}\n\n{{define \"body\"}}\n{{$machines := .Machines}}\n<div class='container w-50'>\n\t<header class='m-5'>\n\t\t<h1>Group {{.User.GroupID}}</h1>\n\t</header>\n\n\t<div class='d-flex flex-column'>\n\t\t<div class='row-md-4'>\n\t\t\t<h3>Kali Machines</h3>\n\t\t\t{{if .Machines}}\n\t\t\t<div class='list-group pb-2'>\n\t\t\t{{range $i, $machine := $machines}}\n\t\t\t\t<div class='list-group-item clearfix'>\n\t\t\t\t\t<span class='float-left'>Kali {{inc $i}} <a class='d-inline pl-3' href='#'>{{$machine.Address}}</a></span>\n\t\t\t\t\t<span class='float-right' data-kali-uuid='{{$machine.UUID}}'>\n\t\t\t\t\t\t<a class='btn btn-sm btn-outline-default border restart' href='#' data-toggle='tooltip' title='Restart'>\n\t\t\t\t\t\t\t<span class='fas fa-redo' aria-hidden='true'></span>\n\t\t\t\t\t\t</a>\n\t\t\t\t\t</span>\n\t\t\t\t</div>\n\t\t\t{{end}}\n\t\t\t</div>\n\t\t\t{{end}}\n\t\t</div>\n\t</div>\n</div>\n{{end}}\n\n{{define \"scripts\"}}\n<script type=\"text/javascript\">\n\t$('[data-toggle=\"tooltip\"]').tooltip() // enable tooltips\n\t$('a.restart').click(function(){\n\t\tif(confirm('This will forcefully restart the machine, losing all unsaved progress. Are you sure you want to do this?')){\n\t\t\tvar button = $(this);\n\t\t\tvar machineUUID = button.parent().data('kali-uuid');\n\n\t\t\t// prevent spamming\n\t\t\tif(button.hasClass('disabled'))\n\t\t\t\treturn;\n\t\t\tbutton.addClass('disabled');\n\n\t\t\t// reset button color\n\t\t\tbutton.removeClass('btn-outline-danger');\n\t\t\tbutton.addClass('btn-outline-default');\n\n\t\t\t// attempt to restart the machine\n\t\t\t$.ajax({\n\t\t\t\ttype: 'POST',\n\t\t\t\turl: '/group/restart/' + machineUUID\n\t\t\t}).fail(function(){\n\t\t\t\t// give visual indicator that it failed\n\t\t\t\tbutton.addClass('btn-outline-danger');\n\t\t\t\tbutton.removeClass('btn-outline-default');\n\t\t\t});\n\n\t\t\t// lock and spin the button for one minute\n\t\t\tbutton.children().addClass('fa-spin');\n\t\t\tsetTimeout(function(){\n\t\t\t\tbutton.removeClass('disabled');\n\t\t\t\tbutton.children().removeClass('fa-spin');\n\t\t\t}, 60 * 1000);\n\t\t}\n\t});\n</script>\n{{end}}\n`\n"
},
{
"alpha_fraction": 0.6815642714500427,
"alphanum_fraction": 0.6815642714500427,
"avg_line_length": 21.375,
"blob_id": "665690213e846555553326a2c4a0b911753fe92a",
"content_id": "d1122b752c4c5282ffc87b06b84a568b104d51ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/models/student.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package models\n\n// Student stores information about the student\ntype Student struct {\n\tID string `bson:\"id\"`\n\tName string `bson:\"name\"`\n\tGroupID int `bson:\"groupID\"`\n}\n"
},
{
"alpha_fraction": 0.6028469800949097,
"alphanum_fraction": 0.6083036661148071,
"avg_line_length": 28.47552490234375,
"blob_id": "460a7762d0c3ed8c6999e8539366f8893d06d06d",
"content_id": "dc39b67c0b0ee9c493f631a7512034260b4cdc7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 4215,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 143,
"path": "/templates/admin.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package templates\n\n// Admin HTML template\nconst Admin = `\n{{define \"title\"}}Administration{{end}}\n\n{{define \"body\"}}\n{{$machines := .Machines}}\n{{$groups := .Groups}}\n<div class='container w-50'>\n\t<header class='m-5'>\n\t\t<h1>Administration</h1>\n\t</header>\n\n\t<div class='d-flex flex-column'>\n\t\t<div class='row-md-4'>\n\t\t\t<h3>Assign Kali machines to groups</h3>\n\t\t\t{{if .Machines}}\n\t\t\t<div class='list-group pb-2'>\n\t\t\t\t{{range $machine := $machines}}\n\t\t\t\t<div class='list-group-item clearfix' id='{{$machine.UUID}}'>\n\t\t\t\t\t<span class='float-left'>{{$machine.Name}}</span>\n\t\t\t\t\t<a class='float-left pl-3' href='#'>{{$machine.Address}}</a>\n\t\t\t\t\t<span class='float-left pl-3'>-></span>\n\t\t\t\t\t<div class='float-left pl-4 dropdown'>\n\t\t\t\t\t\t<button class='btn btn-sm btn-light dropdown-toggle' data-toggle='dropdown' aria-haspopup='true' aria-expanded='false'>\n\t\t\t\t\t\t\t{{if eq $machine.GroupID 0}}\n\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t{{else}}\n\t\t\t\t\t\t\tGroup {{$machine.GroupID}}\n\t\t\t\t\t\t\t{{end}}\n\t\t\t\t\t\t</button>\n\t\t\t\t\t\t<div class='dropdown-menu groups'>\n\t\t\t\t\t\t\t{{if eq $machine.GroupID 0}}\n\t\t\t\t\t\t\t<a class='dropdown-item disabled' href='#' data-id='0'>None</a>\n\t\t\t\t\t\t\t{{else}}\n\t\t\t\t\t\t\t<a class='dropdown-item' href='#' data-id='0'>None</a>\n\t\t\t\t\t\t\t{{end}}\n\t\t\t\t\t\t\t{{range $group := $groups}}\n\t\t\t\t\t\t\t{{if eq $machine.GroupID $group.ID}}\n\t\t\t\t\t\t\t<a class='dropdown-item disabled' href='#' data-id='{{$group.ID}}'>Group {{$group.ID}}</a>\n\t\t\t\t\t\t\t{{else}}\n\t\t\t\t\t\t\t<a class='dropdown-item' href='#' data-id='{{$group.ID}}'>Group {{$group.ID}}</a>\n\t\t\t\t\t\t\t{{end}}\n\t\t\t\t\t\t\t{{end}}\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<span class='float-right' data-kali-index='{{.GroupIndex}}'>\n\t\t\t\t\t\t<a class='btn btn-sm btn-outline-default border restart' href='#' data-toggle='tooltip' title='Restart'>\n\t\t\t\t\t\t\t<span class='fas fa-redo' aria-hidden='true'></span>\n\t\t\t\t\t\t</a>\n\t\t\t\t\t</span>\n\t\t\t\t</div>\n\t\t\t\t{{end}}\n\t\t\t</div>\n\t\t\t{{end}}\n\t\t</div>\n\t</div>\n</div>\n{{end}}\n\n{{define \"scripts\"}}\n<script type=\"text/javascript\">\n\t$('[data-toggle=\"tooltip\"]').tooltip() // enable tooltips\n\t$('.dropdown-menu.groups a').click(function(){\n\t\tvar uuid = $(this).parent().parent().parent().attr('id');\n\t\tvar button = $('#' + uuid + ' .dropdown-toggle');\n\t\tvar groupID = $(this).data('id');\n\t\tvar groupName = $(this).text();\n\t\tvar groupNameOld = button.text();\n\n\t\t// prevent spamming\n\t\tif(button.hasClass('disabled'))\n\t\t\treturn;\n\t\tbutton.addClass('disabled');\n\n\t\t// reset button color\n\t\tbutton.removeClass('btn-outline-danger');\n\t\tbutton.addClass('btn-light');\n\n\t\t// replace button text with spinner\n\t\tbutton.html('<i class=\"fa fa-spinner fa-spin\"></i>');\n\n\t\t// attempt to store the change\n\t\t$.ajax({\n\t\t\ttype: 'POST',\n\t\t\turl: '/admin/assign',\n\t\t\tdata: JSON.stringify({groupID: groupID, machineUUID: uuid}),\n\t\t\tcontentType: 'application/json; charset=UTF-8'\n\t\t}).done(function(){\n\t\t\t// update the button text\n\t\t\tbutton.text(groupName);\n\n\t\t\t// enable the menu buttons\n\t\t\t$(this).siblings().removeClass('disabled');\n\t\t\t$(this).addClass('disabled');\n\t\t}).fail(function(){\n\t\t\t// update the button text\n\t\t\tbutton.text(groupNameOld);\n\n\t\t\t// give visual indicator that it failed\n\t\t\tbutton.addClass('btn-outline-danger');\n\t\t\tbutton.removeClass('btn-light');\n\t\t}).always(function(){\n\t\t\t// unlock the button\n\t\t\tbutton.removeClass('disabled');\n\t\t});\n\t});\n\t$('a.restart').click(function(){\n\t\tif(confirm('This will forcefully restart the machine, losing all unsaved progress. Are you sure you want to do this?')){\n\t\t\tvar button = $(this);\n\t\t\tvar uuid = $(this).parent().parent().attr('id');\n\n\t\t\t// prevent spamming\n\t\t\tif(button.hasClass('disabled'))\n\t\t\t\treturn;\n\t\t\tbutton.addClass('disabled');\n\n\t\t\t// reset button color\n\t\t\tbutton.removeClass('btn-outline-danger');\n\t\t\tbutton.addClass('btn-outline-default');\n\n\t\t\t// attempt to restart the machine\n\t\t\t$.ajax({\n\t\t\t\ttype: 'POST',\n\t\t\t\turl: '/admin/restart/' + uuid\n\t\t\t}).fail(function(){\n\t\t\t\t// give visual indicator that it failed\n\t\t\t\tbutton.addClass('btn-outline-danger');\n\t\t\t\tbutton.removeClass('btn-outline-default');\n\t\t\t});\n\n\t\t\t// lock and spin the button for one minute\n\t\t\tbutton.children().addClass('fa-spin');\n\t\t\tsetTimeout(function(){\n\t\t\t\tbutton.removeClass('disabled');\n\t\t\t\tbutton.children().removeClass('fa-spin');\n\t\t\t}, 60 * 1000);\n\t\t}\n\t});\n</script>\n{{end}}\n`\n"
},
{
"alpha_fraction": 0.6688741445541382,
"alphanum_fraction": 0.6708609461784363,
"avg_line_length": 19.6849308013916,
"blob_id": "b73bd2d6e11b6dfa460689981eccf55c9a8405f1",
"content_id": "8c9ad69a74f89079d42e8793b27ea5c4186ad940",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 1510,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 73,
"path": "/routes/storage_student_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"errors\"\n\t\"hacking-portal/models\"\n)\n\ntype mockStudentStorage struct {\n\tdata map[string]models.Student\n}\n\nfunc (s mockStudentStorage) FindAll() ([]models.Student, error) {\n\tstudents := make([]models.Student, 0, len(s.data))\n\tfor _, student := range s.data {\n\t\tstudents = append(students, student)\n\t}\n\treturn students, nil\n}\n\nfunc (s mockStudentStorage) FindByID(id string) (models.Student, error) {\n\tvar st models.Student\n\tif student, ok := s.data[id]; ok {\n\t\treturn student, nil\n\t}\n\treturn st, errors.New(\"\")\n}\n\nfunc (s mockStudentStorage) FindByName(name string) (models.Student, error) {\n\tvar st models.Student\n\tfor _, student := range s.data {\n\t\tif student.Name == name {\n\t\t\treturn student, nil\n\t\t}\n\t}\n\n\treturn st, errors.New(\"\")\n}\n\nfunc (s mockStudentStorage) FindByGroup(groupID int) ([]models.Student, error) {\n\tvar students []models.Student\n\tfor _, student := range s.data {\n\t\tif student.GroupID == groupID {\n\t\t\tstudents = append(students, student)\n\t\t}\n\t}\n\n\treturn students, nil\n}\n\nfunc (s mockStudentStorage) FindGroups() (map[int]int, error) {\n\tgroupIDs := map[int]int{}\n\tfor _, student := range s.data {\n\t\tgroupID := student.GroupID\n\t\tif groupID != 0 {\n\t\t\tif _, isset := groupIDs[groupID]; !isset {\n\t\t\t\tgroupIDs[groupID] = 1\n\t\t\t} else {\n\t\t\t\tgroupIDs[groupID]++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn groupIDs, nil\n}\n\nfunc (s *mockStudentStorage) Upsert(student models.Student) error {\n\tif s.data == nil {\n\t\ts.data = map[string]models.Student{}\n\t}\n\n\ts.data[student.ID] = student\n\treturn nil\n}\n"
},
{
"alpha_fraction": 0.7240170240402222,
"alphanum_fraction": 0.725770115852356,
"avg_line_length": 30.690475463867188,
"blob_id": "c14ba57118a74a142fd24dbd104be307b4d2e01e",
"content_id": "0a1d28db3986b1d3feee54a350394d74b1258b12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 3993,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 126,
"path": "/routes/group.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"fmt\"\n\t\"html/template\"\n\t\"net/http\"\n\n\t\"hacking-portal/db\"\n\t\"hacking-portal/models\"\n\t\"hacking-portal/openstack\"\n\t\"hacking-portal/templates\"\n\n\t\"github.com/go-chi/chi\"\n)\n\n// GroupEndpoint is an implementation of the endpoint for all Group-related methods.\n// Database interfaces for all the methods are expected to be provided.\ntype GroupEndpoint struct {\n\tMachines db.MachineStorage\n\tStudents db.StudentStorage\n}\n\ntype groupPageData struct {\n\tUser models.Student\n\tMachines []models.Machine\n}\n\n// GetDashboard renders a view of the group interface\nfunc (storage *GroupEndpoint) GetDashboard(w http.ResponseWriter, r *http.Request) {\n\t// get the user from the session\n\tcookie, _ := r.Cookie(\"session_token\")\n\tsession := sessions[cookie.Value]\n\n\t// get the actual sessionUser object from the username\n\tsessionUser, err := storage.Students.FindByID(session.Username)\n\tif err != nil || sessionUser.GroupID == 0 {\n\t\t// sessionUser doesn't exist or has no group affiliation, redirect em'\n\t\thttp.Redirect(w, r, \"/groups\", http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\t// prepare page data\n\tpageData := groupPageData{User: sessionUser}\n\n\t// get the machines\n\tif pageData.Machines, err = storage.Machines.FindByGroup(sessionUser.GroupID); err != nil {\n\t\thttp.Error(w, \"unable to get machines\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// add some handy functions to the template engine\n\tfuncMap := template.FuncMap{\n\t\t\"inc\": func(i int) int {\n\t\t\treturn i + 1\n\t\t},\n\t}\n\n\t// prepare and ensure validity of template files\n\ttpl := template.Must(template.New(\"layout\").Funcs(funcMap).Parse(templates.Layout + templates.Navigation + templates.Group))\n\n\t// render the templates with data\n\ttpl.ExecuteTemplate(w, \"layout\", pageData)\n}\n\n// PostMachineRestart handles a group's machine restart requests\nfunc (storage *GroupEndpoint) PostMachineRestart(w http.ResponseWriter, r *http.Request) {\n\t// get the user from the session\n\tcookie, _ := r.Cookie(\"session_token\")\n\tsession := sessions[cookie.Value]\n\n\t// get uuid from URL path\n\tuuid := chi.URLParam(r, \"machineUUID\")\n\n\t// Compare requested machine's group id to user's group id, and reboot\n\tif sessionUser, err := storage.Students.FindByID(session.Username); err != nil {\n\t\thttp.Error(w, \"Invalid user session\", http.StatusBadRequest)\n\t} else if sessionUser.GroupID == 0 {\n\t\thttp.Error(w, \"Invalid user session\", http.StatusBadRequest)\n\t} else if machine, err := storage.Machines.FindByID(uuid); err != nil {\n\t\thttp.Error(w, \"Invalid machine\", http.StatusBadRequest)\n\t} else if machine.GroupID != sessionUser.GroupID {\n\t\thttp.Error(w, \"Invalid machine\", http.StatusBadRequest)\n\t} else if openstack.Reboot(uuid) != nil {\n\t\thttp.Error(w, \"Could not reboot machine\", http.StatusInternalServerError)\n\t} else {\n\t\tfmt.Fprint(w, \"OK\")\n\t}\n}\n\n// GetLeaveGroup handles group leave requests\nfunc (storage *GroupEndpoint) GetLeaveGroup(w http.ResponseWriter, r *http.Request) {\n\t// get the user from the session\n\tcookie, _ := r.Cookie(\"session_token\")\n\tsession := sessions[cookie.Value]\n\n\t// attempt to get the student information, validating it\n\tif student, err := storage.Students.FindByID(session.Username); err != nil {\n\t\thttp.Error(w, \"Unable to get student data\", http.StatusBadRequest)\n\t} else if student.GroupID == 0 {\n\t\thttp.Error(w, \"Student is not in a group\", http.StatusBadRequest)\n\t} else {\n\t\tstudent.GroupID = 0\n\n\t\tif err := storage.Students.Upsert(student); err != nil {\n\t\t\thttp.Error(w, \"Unable to leave group\", http.StatusInternalServerError)\n\t\t} else {\n\t\t\t// redirect to the groups view\n\t\t\thttp.Redirect(w, r, \"/groups\", http.StatusTemporaryRedirect)\n\t\t}\n\t}\n}\n\n// GroupRouter sets up routing for the group dashboard view\nfunc GroupRouter() chi.Router {\n\tep := GroupEndpoint{\n\t\tMachines: new(db.MachineDatabase),\n\t\tStudents: new(db.StudentDatabase),\n\t}\n\n\tr := chi.NewRouter()\n\tr.Get(\"/\", ep.GetDashboard)\n\tr.Post(\"/restart/{machineUUID:[A-Za-z0-9-]+}\", ep.PostMachineRestart)\n\tr.Get(\"/leave\", ep.GetLeaveGroup)\n\n\treturn r\n}\n"
},
{
"alpha_fraction": 0.6790909171104431,
"alphanum_fraction": 0.6790909171104431,
"avg_line_length": 22.913043975830078,
"blob_id": "9bbf5afd87808259e1f51ac25455f0f4881fa94c",
"content_id": "7a93679d925104f60bc012a8759671960e6d6931",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 1100,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 46,
"path": "/db/db.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package db\n\nimport (\n\t\"log\"\n\n\t\"github.com/globalsign/mgo\"\n)\n\nvar db *mgo.Database\n\n// Init initializes a new database session to a MongoDB instance at a given path.\n// The database session is used throughout the package, so the database must be initialized for the rest of the package to work correctly.\nfunc Init(path, name, user, pass string) {\n\t// bail out if any of the environment variables were missing\n\tif path == \"\" {\n\t\tlog.Fatal(\"Missing env DB_URL\")\n\t}\n\tif name == \"\" {\n\t\tlog.Fatal(\"Missing env DB_NAME\")\n\t}\n\tif user == \"\" {\n\t\tlog.Fatal(\"Missing env DB_USER\")\n\t}\n\tif pass == \"\" {\n\t\tlog.Fatal(\"Missing env DB_PASS\")\n\t}\n\n\t// connect to the mongo database with the given credentials\n\tsession, err := mgo.DialWithInfo(&mgo.DialInfo{\n\t\tAddrs: []string{path},\n\t\tDatabase: name,\n\t\tUsername: user,\n\t\tPassword: pass,\n\t})\n\n\tif err != nil {\n\t\t// we failed to connect, bail out!\n\t\tlog.Fatal(\"Failed to connect to database\")\n\t} else {\n\t\t// log a successful connection\n\t\tlog.Println(\"Database connected\")\n\t}\n\n\t// store the database session so we're able to use it later\n\tdb = session.DB(name)\n}\n"
},
{
"alpha_fraction": 0.6812307834625244,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 21.56944465637207,
"blob_id": "06ca84c8036f11e3c7fae3a22e1f099aa361775d",
"content_id": "a53b944a52ebd8f649e68e8a7b54c5c4ebe51690",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 1625,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 72,
"path": "/db/machine_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package db\n\nimport (\n\t\"testing\"\n\n\t\"hacking-portal/models\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestMachineUpsert(t *testing.T) {\n\t// new database type\n\ttdb := MachineDatabase{}\n\n\t// attempt to insert\n\terr := tdb.Upsert(models.Machine{\n\t\tUUID: \"1234\",\n\t\tName: \"foo\",\n\t\tGroupID: 1,\n\t})\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to insert machine\")\n}\n\nfunc TestMachineFindAll(t *testing.T) {\n\t// new database type\n\ttdb := MachineDatabase{}\n\n\t// attempt to find all machines\n\tmachines, err := tdb.FindAll()\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get machines\")\n\trequire.Len(t, machines, 1) // this runs after upsert, so there should be 1\n}\n\nfunc TestMachineFindByID(t *testing.T) {\n\t// new database type\n\ttdb := MachineDatabase{}\n\n\t// attempt to find machine by ID\n\tmachine, err := tdb.FindByID(\"1234\") // from the Upsert test\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get single machine\")\n\trequire.EqualValues(t, \"foo\", machine.Name)\n}\n\nfunc TestMachineFindByName(t *testing.T) {\n\t// new database type\n\ttdb := MachineDatabase{}\n\n\t// attempt to find machine by ID\n\tmachine, err := tdb.FindByName(\"foo\") // from the Upsert test\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get single machine\")\n\trequire.EqualValues(t, \"1234\", machine.UUID)\n}\n\nfunc TestMachineFindByGroup(t *testing.T) {\n\t// new database type\n\ttdb := MachineDatabase{}\n\n\t// attempt to find machines by group\n\tmachines, err := tdb.FindByGroup(1) // from the Upsert test\n\n\t// assert output\n\trequire.Nil(t, err, \"failed to get machines\")\n\trequire.Len(t, machines, 1) // this runs after upsert, so there should be 1\n}\n"
},
{
"alpha_fraction": 0.6805664896965027,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 19.5,
"blob_id": "e178a65c73ddde8657ff42dc50b075cc2980683c",
"content_id": "e753e7fbb7f56e9266c00cc36a01649f84906f1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 1271,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 62,
"path": "/routes/storage_machine_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"errors\"\n\t\"hacking-portal/models\"\n)\n\ntype mockMachineStorage struct {\n\tdata map[string]models.Machine\n}\n\nfunc (s mockMachineStorage) FindAll() ([]models.Machine, error) {\n\tmachines := make([]models.Machine, 0, len(s.data))\n\tfor _, machine := range s.data {\n\t\tmachines = append(machines, machine)\n\t}\n\treturn machines, nil\n}\n\nfunc (s *mockMachineStorage) FindByID(uuid string) (models.Machine, error) {\n\tvar m models.Machine\n\tif machine, ok := s.data[uuid]; ok {\n\t\treturn machine, nil\n\t}\n\treturn m, errors.New(\"\")\n}\n\nfunc (s *mockMachineStorage) FindByName(name string) (models.Machine, error) {\n\tvar m models.Machine\n\tfor _, machine := range s.data {\n\t\tif machine.Name == name {\n\t\t\treturn machine, nil\n\t\t}\n\t}\n\treturn m, errors.New(\"\")\n}\n\nfunc (s mockMachineStorage) FindByGroup(groupID int) ([]models.Machine, error) {\n\tvar machines []models.Machine\n\tfor _, machine := range s.data {\n\t\tif machine.GroupID == groupID {\n\t\t\tmachines = append(machines, machine)\n\t\t}\n\t}\n\n\treturn machines, nil\n}\n\nfunc (s *mockMachineStorage) Upsert(machine models.Machine) error {\n\tif s.data == nil {\n\t\ts.data = map[string]models.Machine{}\n\t}\n\n\tif machine.GroupID == -1 {\n\t\treturn errors.New(\"\")\n\t}\n\n\tmachine.GroupID = 0\n\ts.data[machine.UUID] = machine\n\n\treturn nil\n}\n"
},
{
"alpha_fraction": 0.6889556646347046,
"alphanum_fraction": 0.6923365592956543,
"avg_line_length": 28.910112380981445,
"blob_id": "22fe591b533f43886d79bce171b29ca8bb9f0b76",
"content_id": "7e6e2894bb79d3f0d686273ffc590a7b04a31d3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2662,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 89,
"path": "/routes/groups_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"bytes\"\n\t\"hacking-portal/models\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/go-chi/chi\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestGetGroups(t *testing.T) {\n\tsdb := new(mockStudentStorage)\n\tsdb.Upsert(models.Student{\"actual\", \"Actual User\", 1})\n\n\ttestData := []struct {\n\t\tcookie http.Cookie\n\t\tcode int\n\t}{\n\t\t{cookie: mockSession(\"test\", false), code: http.StatusOK},\n\t\t{cookie: mockSession(\"actual\", true), code: http.StatusTemporaryRedirect},\n\t}\n\n\tfor _, data := range testData {\n\t\t// create a request to pass to the handler\n\t\treq := httptest.NewRequest(\"GET\", \"/\", nil)\n\t\treq.AddCookie(&data.cookie)\n\n\t\t// create a response recorder to record the response from the handler\n\t\tres := httptest.NewRecorder()\n\n\t\t// prepare the endpoint with mocked storage\n\t\tep := GroupsEndpoint{sdb}\n\n\t\t// serve the handler\n\t\thandler := http.HandlerFunc(ep.GetGroups)\n\t\thandler.ServeHTTP(res, req)\n\n\t\t// test the status\n\t\trequire.Equal(t, data.code, res.Code, \"handler returned wrong status code\")\n\t}\n}\n\nfunc TestPostJoinGroup(t *testing.T) {\n\tsdb := new(mockStudentStorage)\n\tsdb.Upsert(models.Student{\"ungrouped\", \"Ungrouped User\", 0})\n\tsdb.Upsert(models.Student{\"grouped\", \"Grouped User\", 1})\n\n\ttestData := []struct {\n\t\tbody string\n\t\tcode int\n\t\tcookie http.Cookie\n\t}{\n\t\t{body: ``, code: http.StatusBadRequest, cookie: mockSession(\"ungrouped\", true)},\n\t\t{body: `{\"foo\":0}`, code: http.StatusBadRequest, cookie: mockSession(\"ungrouped\", true)},\n\t\t{body: `{\"groupID\":1}`, code: http.StatusBadRequest, cookie: mockSession(\"invalid\", false)},\n\t\t{body: `{\"groupID\":1}`, code: http.StatusBadRequest, cookie: mockSession(\"grouped\", true)},\n\t\t{body: `{\"groupID\":\"1\"}`, code: http.StatusBadRequest, cookie: mockSession(\"ungrouped\", true)},\n\t\t{body: `{\"groupID\":0}`, code: http.StatusBadRequest, cookie: mockSession(\"ungrouped\", true)},\n\t\t{body: `{\"groupID\":1}`, code: http.StatusOK, cookie: mockSession(\"ungrouped\", true)},\n\t}\n\n\tfor _, data := range testData {\n\t\t// create a request to pass to the handler\n\t\treq := httptest.NewRequest(\"POST\", \"/\", bytes.NewBuffer([]byte(data.body)))\n\t\treq.AddCookie(&data.cookie)\n\n\t\t// create a response recorder to record the response from the handler\n\t\tres := httptest.NewRecorder()\n\n\t\t// prepare the endpoint with mocked storage\n\t\tep := GroupsEndpoint{sdb}\n\n\t\t// serve the handler\n\t\thandler := http.HandlerFunc(ep.PostJoinGroup)\n\t\thandler.ServeHTTP(res, req)\n\n\t\t// test the status\n\t\trequire.Equal(t, data.code, res.Code, \"handler returned wrong status code\")\n\t}\n}\n\nfunc TestGroupsRouter(t *testing.T) {\n\tvar r *chi.Mux\n\tassert.IsType(t, r, GroupsRouter())\n}\n"
},
{
"alpha_fraction": 0.7356194853782654,
"alphanum_fraction": 0.7356194853782654,
"avg_line_length": 28.639345169067383,
"blob_id": "4f5b1752bd5d77354765c1c92394a80fd0ae9810",
"content_id": "f4d4b3f8bf3eb1134d6b3cf20f558424a879c20b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 1808,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 61,
"path": "/db/machine.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package db\n\nimport (\n\t\"hacking-portal/models\"\n\n\t\"github.com/globalsign/mgo\"\n\t\"github.com/globalsign/mgo/bson\"\n)\n\n// machines collection structure:\n// { uuid, name, groupID }\n\n// MachineStorage is an interface describing the methods of the MachineDatabase struct\ntype MachineStorage interface {\n\tFindAll() ([]models.Machine, error)\n\tFindByID(string) (models.Machine, error)\n\tFindByName(string) (models.Machine, error)\n\tFindByGroup(int) ([]models.Machine, error)\n\tUpsert(models.Machine) error\n}\n\n// MachineDatabase is an implementation of the storage for all Machine-related methods\ntype MachineDatabase struct{}\n\n// FindAll returns an array of all the machines\nfunc (MachineDatabase) FindAll() ([]models.Machine, error) {\n\tvar machines []models.Machine\n\terr := db.C(\"machines\").Find(nil).All(&machines)\n\treturn machines, err\n}\n\n// FindByID returns a single machine by ID\nfunc (MachineDatabase) FindByID(uuid string) (models.Machine, error) {\n\tvar machine models.Machine\n\terr := db.C(\"machines\").Find(bson.M{\"uuid\": uuid}).One(&machine)\n\treturn machine, err\n}\n\n// FindByName returns a single machine by name\nfunc (MachineDatabase) FindByName(name string) (models.Machine, error) {\n\tvar machine models.Machine\n\terr := db.C(\"machines\").Find(bson.M{\"name\": name}).One(&machine)\n\treturn machine, err\n}\n\n// FindByGroup finds all machines in a certain group\nfunc (MachineDatabase) FindByGroup(groupID int) ([]models.Machine, error) {\n\tvar machines []models.Machine\n\terr := db.C(\"machines\").Find(bson.M{\"groupID\": groupID}).All(&machines)\n\treturn machines, err\n}\n\n// Upsert adds/updates the machine to the database\nfunc (MachineDatabase) Upsert(machine models.Machine) error {\n\t_, err := db.C(\"machines\").Find(bson.M{\"name\": machine.Name}).Apply(mgo.Change{\n\t\tUpdate: machine,\n\t\tUpsert: true,\n\t}, nil)\n\n\treturn err\n}\n"
},
{
"alpha_fraction": 0.6975036859512329,
"alphanum_fraction": 0.6980910301208496,
"avg_line_length": 25.6015625,
"blob_id": "baff5b914ccc37f4f834073ec213bb2c875950a7",
"content_id": "b981a660b4a487c5c76a399d3126f72968261888",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 3405,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 128,
"path": "/openstack/openstack.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package openstack\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"hacking-portal/db\"\n\n\t\"github.com/gophercloud/gophercloud\"\n\t\"github.com/gophercloud/gophercloud/openstack\"\n\t\"github.com/gophercloud/gophercloud/openstack/compute/v2/servers\"\n)\n\n// For connecting to openstack\nvar client *gophercloud.ServiceClient\n\n// Reboot takes server UUID and attempts to reboot it\nfunc Reboot(uuid string) error {\n\tmachines := new(db.MachineDatabase)\n\t// Check if uuid is in database\n\t_, err := machines.FindByID(uuid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Attempt to reboot the server and return the error\n\terr = servers.Reboot(client, uuid, servers.RebootOpts{Type: servers.SoftReboot}).ExtractErr()\n\tif err != nil {\n\t\tlog.Println(\"Server\", uuid, \"failed to reboot\")\n\t\treturn err\n\t}\n\n\tlog.Println(\"Server\", uuid, \"was rebooted\")\n\treturn err\n}\n\n// Status takes server UUID and checks its status\nfunc Status(uuid string) (string, error) {\n\tmachines := new(db.MachineDatabase)\n\t// Check if uuid is in database\n\t_, err := machines.FindByID(uuid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Get the server object\n\tserver, err := servers.Get(client, uuid).Extract()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Print the status and return\n\tlog.Println(\"Server\", server.ID, \"is\", server.Status)\n\treturn server.Status, err\n}\n\n// getFloating find the floating IP and returns it\nfunc getFloating(server servers.Server) string {\n\t// Iterate through Addresses until floating is found\n\tfor _, networkAddresses := range server.Addresses {\n\t\tfor _, element := range networkAddresses.([]interface{}) {\n\t\t\taddress := element.(map[string]interface{})\n\n\t\t\tif address[\"OS-EXT-IPS:type\"] == \"floating\" {\n\t\t\t\treturn address[\"addr\"].(string)\n\t\t\t}\n\t\t}\n\t}\n\t// If nothing was found\n\treturn \"\"\n}\n\n// Init attempts to setup a connection\nfunc Init() {\n\tmachines := new(db.MachineDatabase)\n\n\t// source options from environment\n\tauthOpts, err := openstack.AuthOptionsFromEnv()\n\tif err != nil {\n\t\tlog.Fatal(\"Attempted to set authoptions, error: \", err)\n\t}\n\tauthOpts.DomainName = os.Getenv(\"OS_USER_DOMAIN_NAME\")\n\n\t// authenticate with the OpenStack API\n\tprovider, err := openstack.AuthenticatedClient(authOpts)\n\tif err != nil {\n\t\tlog.Fatal(\"Attempted to set provider, error: \", err)\n\t}\n\n\t// grab a new compute client\n\tif client, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{\n\t\tRegion: os.Getenv(\"OS_REGION_NAME\"),\n\t}); err != nil {\n\t\tlog.Fatal(\"Failed to initialize OpenStack client\", err)\n\t}\n\n\t// grab a list of servers, which is paginated\n\tallPages, err := servers.List(client, servers.ListOpts{}).AllPages()\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to get server list from OpenStack\", err)\n\t}\n\n\t// get all the servers from the paginated list\n\tallServers, err := servers.ExtractServers(allPages)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to get all servers from OpenStack\", err)\n\t}\n\n\t// iterate through all servers and attempt to put them into the database\n\tfor _, server := range allServers {\n\t\tif strings.HasPrefix(strings.ToLower(server.Name), \"kali\") {\n\t\t\t// machine found, update in database\n\t\t\tmachine, err := machines.FindByName(server.Name)\n\t\t\tif err != nil {\n\t\t\t\t// machine doesn't exist, let's add it\n\t\t\t\tmachine.Name = server.Name\n\t\t\t}\n\n\t\t\tmachine.UUID = server.ID\n\t\t\tmachine.Address = getFloating(server)\n\n\t\t\tif err := machines.Upsert(machine); err != nil {\n\t\t\t\tlog.Fatal(\"Attempted to insert new machine into db, error:\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.6774327754974365,
"alphanum_fraction": 0.6793734431266785,
"avg_line_length": 23.45423698425293,
"blob_id": "b18733e06ef5b84cbe9555c7706843cf26ef6d8a",
"content_id": "f108954715a04452c8a72805d97f17f5ce0535de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 7214,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 295,
"path": "/routes/sessions.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"log\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"hacking-portal/templates\"\n\n\t\"github.com/google/uuid\"\n\t\"gopkg.in/ldap.v2\"\n)\n\n// Session stores user, token, and expiration\ntype Session struct {\n\tUsername string\n\tDisplayName string\n\tAccessLevel accessLevel\n\tExpires time.Time\n}\n\n// Credentials stores username and password\ntype Credentials struct {\n\tPassword string `json:\"password\"`\n\tUsername string `json:\"username\"`\n}\n\ntype accessLevel int\n\nconst (\n\tnoAccess accessLevel = 0\n\tstudentAccess accessLevel = 1\n\tadminAccess accessLevel = 2\n)\n\nvar (\n\tsessions map[string]Session\n\tadminUsers map[string]bool\n\tldapAddress string\n\tldapDC string\n\tcourseFilter string\n)\n\nfunc getLDAPConnection() (*ldap.Conn, error) {\n\t// return a secure LDAP connection\n\tconn, err := ldap.Dial(\"tcp\", fmt.Sprintf(\"%s:%d\", ldapAddress, 389))\n\tif err != nil {\n\t\treturn conn, err\n\t}\n\n\treturn conn, conn.StartTLS(&tls.Config{InsecureSkipVerify: true})\n}\n\nfunc authenticateUser(creds Credentials) error {\n\t// get an LDAP connection\n\tconn, err := getLDAPConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// attempt to bind the credentials to LDAP, returning the possible error\n\treturn conn.Bind(fmt.Sprintf(\"uid=%s,ou=people,%s\", creds.Username, ldapDC), creds.Password)\n}\n\nfunc verifyAccess(username string) (accessLevel, error) {\n\t// return early if the user is a pre-defined admin\n\tif _, exists := adminUsers[username]; exists {\n\t\treturn adminAccess, nil\n\t}\n\n\t// get an LDAP connection\n\tconn, err := getLDAPConnection()\n\tif err != nil {\n\t\treturn noAccess, err\n\t}\n\n\t// get the course group information\n\tresults, err := conn.Search(ldap.NewSearchRequest(\n\t\t\"ou=groups,\"+ldapDC,\n\t\tldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,\n\t\tcourseFilter, []string{}, nil,\n\t))\n\tif err != nil {\n\t\treturn noAccess, err\n\t}\n\n\t// parse the results and find the user\n\tfor _, entry := range results.Entries {\n\t\tfor _, member := range entry.GetAttributeValues(\"memberUid\") {\n\t\t\tif member == username {\n\t\t\t\treturn studentAccess, nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// user is not part of the course\n\treturn noAccess, nil\n}\n\nfunc getUserDisplayName(username string) (string, error) {\n\t// get an LDAP connection\n\tconn, err := getLDAPConnection()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// search for user info\n\tresults, err := conn.Search(ldap.NewSearchRequest(\n\t\t\"ou=people,\"+ldapDC,\n\t\tldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,\n\t\tfmt.Sprintf(\"(uid=%s)\", username), []string{}, nil,\n\t))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// find and return the user's full name\n\tfor _, entry := range results.Entries {\n\t\tname := entry.GetAttributeValue(\"displayName\")\n\t\tif name != \"\" {\n\t\t\treturn name, nil\n\t\t}\n\t}\n\n\t// fall back to the username\n\treturn username, nil\n}\n\n// InitAuthentication sets required values for LDAP connection\nfunc InitAuthentication(addr, dn, courseCode, admins string) {\n\tif addr == \"\" {\n\t\tlog.Fatal(\"LDAP address must be provided\")\n\t}\n\tif dn == \"\" {\n\t\tlog.Fatal(\"LDAP domain name must be provided\")\n\t}\n\tif courseCode == \"\" {\n\t\tlog.Fatal(\"CourseCode must be provided\")\n\t}\n\tif admins == \"\" {\n\t\tlog.Fatal(\"Admin list must be provided\")\n\t}\n\n\tadminUsers = make(map[string]bool)\n\tsessions = make(map[string]Session)\n\n\tfor _, admin := range strings.Split(admins, \" \") {\n\t\tadminUsers[admin] = true\n\t}\n\n\tldapAddress = addr\n\tldapDC = dn\n\tcourseFilter = fmt.Sprintf(\"(cn=fs_%s_1)\", courseCode)\n}\n\n// SessionHandler checks for a session and handles it accordingly\nfunc SessionHandler(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif strings.HasPrefix(r.URL.Path, \"/admin\") || strings.HasPrefix(r.URL.Path, \"/group\") {\n\t\t\tcookie, err := r.Cookie(\"session_token\")\n\t\t\tif err != nil {\n\t\t\t\t// no session\n\t\t\t\thttp.Redirect(w, r, \"/login\", http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t} else if _, exists := sessions[cookie.Value]; !exists {\n\t\t\t\t// we are not aware of the user's cookie, force em to log out\n\t\t\t\thttp.Redirect(w, r, \"/logout\", http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t} else if sessions[cookie.Value].Expires.Before(time.Now()) {\n\t\t\t\t// session has expired, log the user out\n\t\t\t\thttp.Redirect(w, r, \"/logout\", http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar path string\n\t\t\tsession := sessions[cookie.Value]\n\t\t\tswitch session.AccessLevel {\n\t\t\tcase adminAccess:\n\t\t\t\tpath = \"/admin\"\n\t\t\tcase studentAccess:\n\t\t\t\tpath = \"/group\"\n\t\t\t}\n\n\t\t\tif path != \"\" {\n\t\t\t\t// refresh the cookie\n\t\t\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\t\t\tName: \"session_token\",\n\t\t\t\t\tValue: cookie.Value,\n\t\t\t\t\tPath: \"/\",\n\t\t\t\t\tExpires: time.Now().Add(1 * time.Hour),\n\t\t\t\t})\n\n\t\t\t\tif strings.HasPrefix(r.URL.Path, path) {\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t} else {\n\t\t\t\t\thttp.Redirect(w, r, path, http.StatusTemporaryRedirect)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thttp.Redirect(w, r, \"/login\", http.StatusTemporaryRedirect)\n\t\t\t}\n\t\t} else {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t}\n\t})\n}\n\n// GetLogin routes invalid users to the login page\nfunc GetLogin(w http.ResponseWriter, r *http.Request) {\n\t// prepare and ensure validity of template files\n\ttpl := template.Must(template.New(\"layout\").Parse(templates.Layout + templates.Login))\n\n\t// render the templates\n\ttpl.ExecuteTemplate(w, \"layout\", nil)\n}\n\n// PostLogin validates the user and creates session\nfunc PostLogin(w http.ResponseWriter, r *http.Request) {\n\tvar creds Credentials\n\tif err := json.NewDecoder(r.Body).Decode(&creds); err != nil {\n\t\t// If the structure of the body is wrong, return an HTTP error\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := authenticateUser(creds); err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"invalid credentials\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlevel, err := verifyAccess(creds.Username)\n\tif err != nil || level == noAccess {\n\t\thttp.Error(w, \"inaccessible\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tname, err := getUserDisplayName(creds.Username)\n\tif err != nil {\n\t\thttp.Error(w, \"failed to get user name\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsessionToken := uuid.New().String()\n\texpiration := time.Now().Add(1 * time.Hour)\n\n\tsessions[sessionToken] = Session{\n\t\tUsername: creds.Username,\n\t\tDisplayName: name,\n\t\tAccessLevel: level,\n\t\tExpires: expiration,\n\t}\n\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: \"session_token\",\n\t\tValue: sessionToken,\n\t\tPath: \"/\",\n\t\tExpires: expiration,\n\t})\n\n\tfmt.Fprint(w, \"OK\")\n}\n\n// GetLogout handles user logouts\nfunc GetLogout(w http.ResponseWriter, r *http.Request) {\n\tcookie, err := r.Cookie(\"session_token\")\n\tif err != nil || cookie.Value == \"\" {\n\t\t// cookie already doesn't exist, just redirect\n\t\thttp.Redirect(w, r, \"/login\", http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\t// remove the session from our token storage\n\tdelete(sessions, cookie.Value)\n\n\t// create a new, dead cookie\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: \"session_token\",\n\t\tValue: \"\",\n\t\tPath: \"/\",\n\t\tExpires: time.Now(),\n\t})\n\n\t// redirec to the login page\n\thttp.Redirect(w, r, \"/login\", http.StatusTemporaryRedirect)\n}\n\n// RedirectLogin sends the user to the login page\nfunc RedirectLogin(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r, \"/login\", http.StatusTemporaryRedirect)\n}\n"
},
{
"alpha_fraction": 0.6980234384536743,
"alphanum_fraction": 0.6980234384536743,
"avg_line_length": 23.836362838745117,
"blob_id": "1fddbba64d0b1aa5b07e04dfc799a0e6b8d43159",
"content_id": "595766f2edc2abefe5b49098f814f2b414bdbbec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 2732,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 110,
"path": "/routes/sessions_test.go",
"repo_name": "vetletm/hacking-portal",
"src_encoding": "UTF-8",
"text": "package routes\n\nimport (\n\t\"bytes\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc mockSession(user string, valid bool) http.Cookie {\n\tif sessions == nil {\n\t\tsessions = make(map[string]Session)\n\t}\n\n\tif !valid {\n\t\treturn http.Cookie{\n\t\t\tName: \"session_token\",\n\t\t\tValue: user,\n\t\t\tExpires: time.Now(),\n\t\t}\n\t}\n\n\texpiration := time.Now().Add(time.Minute)\n\tcookie := http.Cookie{\n\t\tName: \"session_token\",\n\t\tValue: user,\n\t\tExpires: expiration,\n\t}\n\n\tsessions[user] = Session{\n\t\tUsername: user,\n\t\tExpires: expiration,\n\t}\n\n\treturn cookie\n}\n\nfunc TestGetLogin(t *testing.T) {\n\t// create a request to pass to the handler\n\treq := httptest.NewRequest(\"GET\", \"/\", nil)\n\n\t// create a response recorder to record the response from the handler\n\tres := httptest.NewRecorder()\n\n\t// serve the handler\n\thandler := http.HandlerFunc(GetLogin)\n\thandler.ServeHTTP(res, req)\n\n\t// test the status\n\trequire.Equal(t, http.StatusOK, res.Code, \"handler returned wrong status code\")\n}\n\nfunc TestPostLogin(t *testing.T) {\n\t// create a request to pass to the handler\n\treq := httptest.NewRequest(\"POST\", \"/\", bytes.NewBuffer([]byte(\"\")))\n\n\t// create a response recorder to record the response from the handler\n\tres := httptest.NewRecorder()\n\n\t// serve the handler\n\thandler := http.HandlerFunc(PostLogin)\n\thandler.ServeHTTP(res, req)\n\n\t// test the status\n\trequire.Equal(t, http.StatusBadRequest, res.Code, \"handler returned wrong status code\")\n}\n\nfunc TestGetLogout(t *testing.T) {\n\ttestData := []struct {\n\t\tcookie http.Cookie\n\t\tcode int\n\t}{\n\t\t{cookie: http.Cookie{Name: \"invalid\", Value: \"\", Expires: time.Now()}, code: http.StatusTemporaryRedirect},\n\t\t{cookie: http.Cookie{Name: \"session_token\", Value: \"test\", Expires: time.Now().Add(time.Minute)}, code: http.StatusTemporaryRedirect},\n\t}\n\n\tfor _, data := range testData {\n\t\t// create a request to pass to the handler\n\t\treq := httptest.NewRequest(\"GET\", \"/\", nil)\n\t\treq.AddCookie(&data.cookie)\n\n\t\t// create a response recorder to record the response from the handler\n\t\tres := httptest.NewRecorder()\n\n\t\t// serve the handler\n\t\thandler := http.HandlerFunc(GetLogout)\n\t\thandler.ServeHTTP(res, req)\n\n\t\t// test the status\n\t\trequire.Equal(t, data.code, res.Code, \"handler returned wrong status code\")\n\t}\n}\n\nfunc TestRedirectLogin(t *testing.T) {\n\t// create a request to pass to the handler\n\treq := httptest.NewRequest(\"GET\", \"/\", nil)\n\n\t// create a response recorder to record the response from the handler\n\tres := httptest.NewRecorder()\n\n\t// serve the handler\n\thandler := http.HandlerFunc(RedirectLogin)\n\thandler.ServeHTTP(res, req)\n\n\t// test the status\n\trequire.Equal(t, http.StatusTemporaryRedirect, res.Code, \"handler returned wrong status code\")\n}\n"
}
] | 32 |
VictoriaGrillo-132/PiProject-132
|
https://github.com/VictoriaGrillo-132/PiProject-132
|
41249e6f88f263dbe2611e5e43fc67f546eb12db
|
5929baf312640196096f383b4c7e17a0a9ce5d95
|
f16e834d47fa48ce23714aed606822f8b3103a40
|
refs/heads/main
| 2023-03-09T10:46:39.712286 | 2021-02-26T04:54:44 | 2021-02-26T04:54:44 | 322,356,022 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5807960033416748,
"alphanum_fraction": 0.5995004773139954,
"avg_line_length": 40.82222366333008,
"blob_id": "f852915792f3279b9b3a1c188e8b62b02f08562f",
"content_id": "aa039d2fa0d483dda919684cc3c94c155e08bf27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18819,
"license_type": "no_license",
"max_line_length": 465,
"num_lines": 450,
"path": "/main.py",
"repo_name": "VictoriaGrillo-132/PiProject-132",
"src_encoding": "UTF-8",
"text": "#####################################################################################\n# Team Name: Team Java\n# Group Members: Austen Belmonte, Josue Gaona, Victoria Grillo\n# Description: Pi-Project. Study game that uses variations of different style games.\n#####################################################################################\n\nfrom tkinter import * \nimport RPi.GPIO as GPIO\nfrom random import randint\nfrom time import sleep\nLetters=(\"Times New Roman\", 14)\nFONT = \"Times New Roman\"\n\n#Setting up the leds and buttons\nGPIO.setmode(GPIO.BCM)\nled1 = 18 #red LED\nled2 = 17 #blue LED\nbutton1 = 26 #with red LED\nbutton2 = 13 #with blue LED\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(led2, GPIO.OUT)\nGPIO.setup(button1, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\nGPIO.setup(button2, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n\n\n#This could possibly useless and be moved into the JepdyBoard class\nclass ValsandAns():\n questions = [[\"How is Abstraction defined as?\",\"Multiple inheritance is _______?\",\"A Queue is a _____.\",\"A Dictionary can be defined as?\",\"Zero in a state means it is what?\"],\n [\"True or False: A pyhton list size is not fixed.\",\"True or False: A Stack is a FIFO Structure.\",\"What does R in Flip Flop stand for?\",\"_ _ _ _ _ _ _-This word means it selects a register.\",\"_ _ _ _ _ _ _ (space) _ _ _ _ _ _ _-Is the name of an equation in computer science.\"], \n [\"What does S in Flip Flop stand for?\",\"An abbreviation for multiplexer is?\",\"What letter holds the state in a flip flop until signed changes it?\",\"A Flip Flop is used to store what two numbers? (seperate by comma: ex:2,4)\",\"What is 5 in binary?\"]]\n def __init__(self, points):\n self.points = points\n\n @property\n def points(self):\n return self._points\n\n @points.setter\n def points(self, value):\n self._points = value\n \n def Questions(self, location):\n JepdyBoard.Que.config(state=NORMAL)\n JepdyBoard.Que.delete(\"1.0\", END)\n #print question to question box\n JepdyBoard.Que.insert(END, ValsandAns.questions[location[0]][location[1]])\n JepdyBoard.Que.config(state=DISABLED)\n \n def ChoosePerson(self):\n global led1, led2, button1, button2\n if GPIO.input(button1) == GPIO.HIGH:\n GPIO.output(led1, GPIO.HIGH)\n sleep(.5)\n GPIO.output(led1, GPIO.LOW)\n return 1\n elif GPIO.input(button2) == GPIO.HIGH:\n GPIO.output(led2, GPIO.HIGH)\n sleep(.5)\n GPIO.output(led2, GPIO.LOW)\n return 2\n\nclass Player:\n def __init__(self, name):\n self.name = name\n self._points = 0\n\n @property\n def points(self):\n return self._points\n \n @points.setter\n def points(self, value):\n self._points = value\n PlayersFrame.updatePlayerLabels(self)\n\n @property\n def active(self):\n return self._active\n \n @active.setter\n def active(self, value):\n self._active = value\n PlayersFrame.updatePlayerLabels(self)\n\nclass PlayerFrame(Frame): \n def __init__(self, parent):\n Frame.__init__(self, parent)\n PlayerFrame.Player1Name = Label(self, font=(FONT, 24))\n PlayerFrame.Player1Name.bind(\"<Button-1>\", self.NameChange)\n PlayerFrame.Player2Name = Label(self, font=(FONT, 24))\n PlayerFrame.Player2Name.bind(\"<Button-1>\", self.NameChange)\n\n PlayerFrame.Player1Points = Label(self, font=(FONT, 24))\n PlayerFrame.Player2Points = Label(self, font=(FONT, 24))\n\n PlayerFrame.Player1Name.grid(row=0, column=0, sticky=NSEW, padx=(15,0), pady=(10,0))\n PlayerFrame.Player2Name.grid(row=0, column=5, sticky=NSEW, padx=(0,15), pady=(10,0))\n PlayerFrame.Player1Points.grid(row=1, column=0, sticky=NSEW, padx=0, pady=0)\n PlayerFrame.Player2Points.grid(row=1, column=5, sticky=NSEW, padx=0, pady=0)\n\n PlayerFrame.infoLabel = Label(self, text=\"Ready!\", font=(FONT, 28), wraplength=560)\n PlayerFrame.infoLabel.grid(row=3, column=4, sticky=NSEW, pady=0)\n PlayerFrame.infoLabel.after(800, lambda: PlayerFrame.infoLabel.configure(text=\"Ready! Set!\"))\n PlayerFrame.infoLabel.after(1600, lambda: PlayerFrame.infoLabel.configure(text=\"Ready! Set! Study!\"))\n self.grid_columnconfigure(0, weight=0)\n self.grid_columnconfigure(4, weight=1)\n self.pack(anchor=N, expand=1, fill=X)\n self.activePlayer = 0\n\n @property\n def activePlayer(self):\n return self._activePlayer\n\n @activePlayer.setter\n def activePlayer(self, value):\n self._activePlayer = value\n self.updatePlayerLabels(Players[value])\n\n def updatePlayerLabels(self, player): \n PlayerFrame.Player1Name.configure(text=Players[0].name, foreground=[\"Gold2\", \"Black\"][self.activePlayer])\n PlayerFrame.Player2Name.configure(text=Players[1].name, foreground=[\"Black\", \"Gold2\"][self.activePlayer])\n PlayerFrame.Player1Points.configure(text=\"$ {}\".format(Players[0].points), foreground=[\"Gold2\", \"Black\"][self.activePlayer])\n PlayerFrame.Player2Points.configure(text=\"$ {}\".format(Players[1].points), foreground=[\"Black\", \"Gold2\"][self.activePlayer])\n\n def addPoints(self, pts):\n Players[self.activePlayer].points += pts\n \n def Switch(self):\n #swap active players\n self.activePlayer = abs(self.activePlayer-1)\n\n def NameChange(self, e):\n print(\"It dosen't work\")\n\nclass TypeQuestion(Frame):\n #nothing is right\n right_ans=[\"True\",\"False\",\"Reset\",\"Decoder\",\"Boolean Algebra\"]\n def __init__(self, parent, question, location):\n Frame.__init__(self, parent)\n\n TypeQuestion.the_que=question\n TypeQuestion.the__answer=location[1]\n self.location = location\n\n #Question Box\n TypeQuestion.Que = Text(self, bg=\"light grey\", height=3, font=Letters)\n TypeQuestion.Que.insert(\"1.0\", question)\n TypeQuestion.Que.pack(anchor=N, fill=X)\n TypeQuestion.Que.config(state=DISABLED)\n\n #Entry for Answering\n TypeQuestion.Ans = Entry(self, font=Letters)\n TypeQuestion.Ans.bind(\"<Return>\", self.process)\n TypeQuestion.Ans.pack(anchor=S, fill=X, side=BOTTOM)\n TypeQuestion.Ans.focus()\n\n #Back Button\n TypeQuestion.back = Button(self, text=\"Give up\", command=lambda Exit=\"give up\": self.Leave(Exit))\n TypeQuestion.back.pack(anchor=SW, expand=1)\n\n def process(self, event):\n response = TypeQuestion.Ans.get()\n response = response.lower()\n\n if response == TypeQuestion.right_ans[TypeQuestion.the__answer].lower():\n self.Right()\n else:\n self.Wrong()\n TypeQuestion.Ans.delete(0, END) \n\n def Right(self):\n PlayersFrame.addPoints(600)\n print(self.location)\n GBoard.DisableBtn(self.location)\n self.Leave(\"Right\")\n\n def Wrong(self):\n PlayersFrame.addPoints(-600)\n PlayersFrame.Switch()\n PlayerFrame.infoLabel.configure(text=\"Wrong!\", foreground=\"red\", font=(FONT, 38))\n\n def Leave(self, Exit):\n if Exit == \"give up\":\n PlayerFrame.infoLabel.configure(text=\"Correct Answer: {}\".format(TypeQuestion.right_ans[TypeQuestion.the__answer]), foreground=\"red\", font=(FONT, 28))\n GBoard.DisableBtn(self.location)\n self.pack_forget()\n GBoard.pack(expand=1, fill=BOTH)\n else:\n self.pack_forget()\n GBoard.pack(expand=1, fill=BOTH)\n PlayerFrame.infoLabel.configure(text=\"Correct!\", foreground=\"green\", font=(FONT, 38))\n\nclass TypeMulti(Frame):\n right_ans=[\"Ability to ignore details of parts of a system\",\"Class inherits two or more superclasses\",\"FIFO\",\"Associative arrays\",\"Low\",\"Set\",\"MUX\", \"Q\", \"0 or 1\", \"0101\"]\n wrong_ans=[[\"Increase the system by expanding the data\",\"Ability to focus on one part of a system\",\"Idea of multiple methods in a class\"],[\"Class inherits only one superclass\",\"self contained component at a program\",\"To links b/w separate units of a program\"],[\"LIFO\",\"LILO\",\"FILO\"],[\"Disassociative arrays\",\"Maps values to keys\",\"Where keys can be changed\"],[\"High\",\"0\",\"1\"],[\"Stop\",\"Start\",\"Step\"],[\"MLT\",\"M\",\"MTX\"],[\"S\",\"R\",\"F\"],[\"1 or 2\",\"3 or 4\",\"2 or 4\"]]\n def __init__(self, parent, question, location):\n Frame.__init__(self, parent)\n\n TypeMulti.the_que=question\n #location of Jeporady button that we came from\n self.location = location\n\n #Question Box\n TypeMulti.Que = Text(self, bg=\"light grey\", width=70, height=3, font=Letters)\n TypeMulti.Que.insert(\"1.0\", question)\n TypeMulti.Que.grid(row=0, column=1, columnspan=2,sticky=N)\n TypeMulti.Que.config(state=DISABLED)\n\n #Answer Choices\n the_ans_row = randint(1,2)\n the_ans_col = randint(1,2)\n wrongs = 2\n for row_index in range(1,3):\n Grid.rowconfigure(self, row_index, weight=1)\n #number of columns\n for col_index in range(1,3):\n Grid.columnconfigure(self, col_index, weight=1)\n if row_index == the_ans_row and col_index == the_ans_col:\n btn = Button(self, width=20, command=lambda Right=1: TypeMulti.Right(self), text=\"{}\".format(TypeMulti.right_ans[location[1]]), font=Letters)\n else:\n btn = Button(self, width=20, command=lambda Wrong=1: TypeMulti.Wrong(self), text=\"{}\".format(TypeMulti.wrong_ans[location[1]][wrongs]), font=Letters)\n wrongs -= 1\n\n if row_index == 1:\n btn.grid(row=row_index, column=col_index, sticky=NSEW, padx=20, pady=20)\n else:\n btn.grid(row=row_index, column=col_index, sticky=NSEW, padx=20, pady=20)\n\n #Back Button\n TypeMulti.back = Button(self, text=\"Give up\", command=lambda Exit=\"give up\": TypeMulti.Leave(self, Exit))\n TypeMulti.back.grid(row = 3, column=0, sticky=N+W+S)\n self.pack(expand=1, fill=BOTH)\n\n def Right(self):\n PlayersFrame.addPoints(300)\n # disable the button we came from\n GBoard.DisableBtn(self.location)\n self.Leave(\"Right\")\n \n \n def Wrong(self):\n PlayersFrame.addPoints(-300)\n PlayersFrame.Switch()\n #TODO better way to show that you are wrong.\n PlayerFrame.infoLabel.configure(text=\"Wrong!\", foreground=\"red\")\n\n\n def Leave(self, Exit):\n if Exit == \"give up\":\n PlayerFrame.infoLabel.configure(text=\"Correct Answer: {}\".format(TypeMulti.right_ans[self.location[1]]), foreground=\"red\")\n GBoard.DisableBtn(self.location)\n self.pack_forget()\n GBoard.pack(expand=1, fill=BOTH)\n else:\n self.pack_forget()\n GBoard.pack(expand=1, fill=BOTH)\n PlayerFrame.infoLabel.configure(text=\"Correct!\", foreground=\"green\")\n\n#Just here for when we come up with 3rd game\nclass TypeGuess(Frame):\n chances = 3\n right_ans=[\"set\",\"mux\",\"q\",\"0,1\",\"0101\"]\n qwert = [[\"q\",\"w\",\"e\",\"r\",\"t\",\"y\",\"u\",\"i\",\"o\",\"p\"],[\"a\",\"s\",\"d\",\"f\",\"g\",\"h\",\"j\",\"k\",\"l\",\",\"],[\"z\",\"x\",\"c\",\"v\",\"b\",\"n\",\"m\",\"0\",\"1\"]]\n def __init__(self, parent, question, location):\n self.akeys = []\n Frame.__init__(self, parent)\n self.location = location\n TypeGuess.the_answer=TypeGuess.right_ans[location[1]]\n qwerty=TypeGuess.qwert\n TypeGuess.question = question\n \n #Question Box\n TypeGuess.blanks = \"_\" * len(TypeGuess.the_answer)\n TypeGuess.Que = Text(self, bg=\"light grey\", height=4, font=Letters, wrap=WORD)\n TypeGuess.Que.insert(\"1.0\", question + \"\\n\" + TypeGuess.blanks)\n TypeGuess.Que.grid(row=0, column=3, columnspan=4, sticky=N)\n TypeGuess.Que.config(state=DISABLED)\n\n #Buttons for Letters\n for row_ind in range(1, len(qwerty)+1):\n Grid.rowconfigure(self, row_ind, weight=1)\n #individual keys\n for keys in range(len(qwerty[row_ind-1])):\n Grid.columnconfigure(self, keys, weight=1)\n key = Button(self, height=30, width=30, font=(FONT, 14), command=lambda letter=(row_ind-1,keys): TypeGuess.process(self, letter), text=\"{}\".format(qwerty[row_ind-1][keys]))\n self.akeys.append(key)\n if row_ind == 1:\n key.grid(row=row_ind, column=keys, sticky=NSEW, pady=(20, 10))\n elif row_ind == 3:\n key.grid(row=row_ind, column=keys+1, sticky=NSEW, pady=10)\n else:\n key.grid(row=row_ind, column=keys, sticky=NSEW, pady=10)\n\n #Back Button\n TypeGuess.back = Button(self, text=\"Give Up\", command=lambda Exit=\"give up\": TypeGuess.Leave(self, Exit))\n TypeGuess.back.grid(row = 4, column=0, sticky=N+W+S)\n \n # Creating main label\n TypeGuess.display_used = StringVar()\n TypeGuess.display = Label(self, textvariable=TypeGuess.display_used, font=Letters)\n TypeGuess.display.grid(row=0, column=0, sticky=N+W+S+E, columnspan=2)\n TypeGuess.display_used.set(\"Used Letters:\\n\")\n\n TypeGuess.display_strikes = StringVar()\n TypeGuess.display_again = Label(self, textvariable=TypeGuess.display_strikes, font=Letters)\n TypeGuess.display_again.grid(row=0, column=7, sticky=N+W+S+E, columnspan=2)\n TypeGuess.display_strikes.set(\"Strikes:\\n\")\n\n def process(self, event):\n TypeGuess.Que.config(state=NORMAL)\n chara = TypeGuess.qwert[event[0]][event[1]]\n Used = TypeGuess.display_used.get()\n Used += chara\n TypeGuess.display_used.set(Used)\n\n if chara in TypeGuess.the_answer:\n blanklist = list(TypeGuess.blanks)\n print(blanklist)\n indexes = [i for i, letter in enumerate(TypeGuess.the_answer) if letter == chara]\n print(indexes)\n for _ in indexes:\n blanklist[_] = (chara)\n\n \n TypeGuess.blanks = \"\".join(blanklist)\n TypeGuess.Que.delete(\"1.0\", END)\n TypeGuess.Que.insert(\"1.0\", TypeGuess.question + \"\\n\" + TypeGuess.blanks)\n if \"_\" not in TypeGuess.blanks:\n self.Guessed()\n else:\n Strikes = TypeGuess.display_strikes.get()\n Strikes += \"X\"\n TypeGuess.display_strikes.set(Strikes)\n TypeGuess.chances -= 1\n\n if TypeGuess.chances < 0:\n #PlayersFrame.addPoints(-900)\n PlayersFrame.Switch()\n TypeGuess.display_strikes.set(\"Strikes:\\n\")\n TypeGuess.chances = 3\n \n TypeGuess.Que.config(state=DISABLED)\n self.DisableKeys(event)\n \n def DisableKeys(self, event):\n if event[0] == 2:\n self.akeys[(event[0]*10) + (event[1]-1)].configure(state=\"disabled\") \n else:\n self.akeys[(event[0]*10) + event[1]].configure(state=\"disabled\")\n \n \n\n def Guessed(self):\n GBoard.DisableBtn(self.location)\n PlayersFrame.addPoints(900)\n self.Leave(\"Guessed\")\n\n def Leave(self, Exit):\n if Exit == \"give up\":\n PlayerFrame.infoLabel.configure(text=\"Correct Answer: {}\".format(TypeGuess.right_ans[self.location[1]]), foreground=\"red\")\n GBoard.DisableBtn(self.location)\n self.pack_forget()\n GBoard.pack(expand=1, fill=BOTH)\n else:\n self.pack_forget()\n GBoard.pack(expand=1, fill=BOTH)\n PlayerFrame.infoLabel.configure(text=\"Correct!\", foreground=\"green\")\n\nclass JepdyBoard(Frame):\n #cp=ValsandAns.ChoosePerson()\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.btns = []\n\n def Setup(self):\n #Create jeoparady board buttons\n for row_index in range(2,5):\n Grid.rowconfigure(self, row_index, weight=1)\n #number of columns\n for col_index in range(5):\n Grid.columnconfigure(self, col_index, weight=1)\n btn = Button(self, command=lambda location=((row_index-2),col_index): JepdyBoard.BtnClick(self, location), text=\"${}\".format(300*(row_index-1)), font=Letters)\n self.btns.append(btn)\n if row_index == 2:\n btn.grid(row=row_index, column=col_index, sticky=N+S+E+W, padx=10, pady=(20, 10))\n else:\n btn.grid(row=row_index, column=col_index, sticky=N+S+E+W, padx=10, pady=10)\n self.pack(anchor=N, expand=1, fill=BOTH)\n\n def DisableBtn(self, location):\n self.btns[(location[0] * 5) + location[1]].configure(state=\"disabled\")\n\n def BtnClick(self, location):\n #location is (row,column)\n #print(location)\n #Remove the jeoparady board\n self.pack_forget()\n if location[0] == 1:\n QFrame = TypeQuestion(window, ValsandAns.questions[location[0]][location[1]], location)\n PlayerFrame.infoLabel.configure(text=\"\")\n QFrame.pack(expand=1, fill=BOTH)\n QFrame.after(300, GBoard.waitplayer)\n \n elif location[0] == 0:\n MFrame = TypeMulti(window, ValsandAns.questions[location[0]][location[1]], location)\n PlayerFrame.infoLabel.configure(text=\"\")\n #MFrame.pack(expand=1, fill=BOTH)\n MFrame.after(300, GBoard.waitplayer)\n\n elif location[0] == 2:\n GFrame = TypeGuess(window, ValsandAns.questions[location[0]][location[1]], location)\n PlayerFrame.infoLabel.configure(text=\"\")\n GFrame.pack(expand=1, fill=BOTH)\n GFrame.after(300, GBoard.waitplayer)\n\n def flashLED(self):\n player = PlayersFrame.activePlayer\n if (player == 0):\n GPIO.output(led1, GPIO.HIGH)\n GPIO.output(led2, GPIO.LOW)\n elif (player == 1):\n GPIO.output(led2, GPIO.HIGH)\n GPIO.output(led1, GPIO.LOW)\n self.after(50, self.flashLED)\n\n def waitplayer(self):\n #where the player waits till someone \"buzzes\" in to answer\n while(True):\n if GPIO.input(button1) == GPIO.HIGH:\n PlayersFrame.activePlayer = 0\n self.after(10, self.flashLED)\n return\n \n elif GPIO.input(button2) == GPIO.HIGH:\n PlayersFrame.activePlayer = 1\n self.after(10, self.flashLED)\n return\n \nwindow = Tk()\nwindow.title(\"Ready Set Study!\")\nWIDTH = 800\nHEIGHT = 420\nwindow.geometry(\"{}x{}\".format(WIDTH, HEIGHT))\n#Make players frame and get their name somehow\nGBoard = JepdyBoard(window)\nPlayers = [Player(\"Player 1\"), Player(\"Player 2\")]\nPlayersFrame = PlayerFrame(window)\nGBoard.Setup()\nwindow.mainloop()\nGPIO.cleanup()"
},
{
"alpha_fraction": 0.7250000238418579,
"alphanum_fraction": 0.762499988079071,
"avg_line_length": 19.125,
"blob_id": "7f6c5833b22c0f9995b4fdb596b950ad6f8016c1",
"content_id": "42e1ae364f6cc270c30cd50e9aee7d578f527d55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 8,
"path": "/README.md",
"repo_name": "VictoriaGrillo-132/PiProject-132",
"src_encoding": "UTF-8",
"text": "# PiProject-132\nPi Project-Winter CSC 132\n\n## Things to do\n- Make Title Screen\n- Choose font\n- add backgrounds for everything\n- Add correct and incorent noises"
}
] | 2 |
rootsergio/hashbot
|
https://github.com/rootsergio/hashbot
|
d1fcf2ba38c7698b664920388c7d1e758cbc6da1
|
4a91035e1bc84deff6d35fe9292e5d678b33008f
|
3fdf0ce22b0e38d8974a8b91ab4799c24800cf3b
|
refs/heads/main
| 2023-03-14T23:13:18.776052 | 2021-03-23T14:16:57 | 2021-03-23T14:16:57 | 323,444,412 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7957559823989868,
"alphanum_fraction": 0.7957559823989868,
"avg_line_length": 28.076923370361328,
"blob_id": "718fea4451e278ca834b5e635caec7d6d64da8fd",
"content_id": "d77386d9128afa207132ad8d3eac05fc03950b25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 13,
"path": "/bot.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "import logging\nfrom aiogram import Bot, Dispatcher\nfrom config.config import settings\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\n\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=settings.BOT_TOKEN)\ndp = Dispatcher(bot, storage=MemoryStorage())\n\n\nasync def send_message(message, chat_id=None):\n await bot.send_message(chat_id=chat_id, text=message)"
},
{
"alpha_fraction": 0.699999988079071,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 20.428571701049805,
"blob_id": "cb2467b27e45704e7eb4cc4fb975931159014470",
"content_id": "e17e7a00b93fbdadf6c207fa33c5fcf399febe57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 7,
"path": "/main.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "from aiogram import executor\nfrom bot import dp\nfrom handlers import *\n\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)\n"
},
{
"alpha_fraction": 0.6538108587265015,
"alphanum_fraction": 0.6538108587265015,
"avg_line_length": 30.941177368164062,
"blob_id": "c0a69113599ddecec498c214a379c1eee9b8985f",
"content_id": "78b5249ceff20e1c584b3256472ff32b5d74d479",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 34,
"path": "/response_to_user.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "from database import DatabaseTlgBot, DatabaseHashtopolis\nfrom aiogram import Bot\nfrom config.config import settings\nimport asyncio\n\nTOKEN = settings.BOT_TOKEN\nbot = Bot(TOKEN)\n\ndb_hashtopolis = DatabaseHashtopolis()\ndb_tlg = DatabaseTlgBot()\n\n\ndef get_found_password():\n found_passwords = db_hashtopolis.get_cracked_hashes()\n chat_id_list = {rec.get('chat_id') for rec in found_passwords}\n # print(chat_id_list)\n passwords_for_transmission = {}\n for chat_id in chat_id_list:\n passwords_for_transmission[chat_id] = {rec.get('hash'): rec.get('plaintext') for rec in found_passwords\n if rec.get('chat_id') == chat_id}\n return passwords_for_transmission\n\n\nasync def sending_user_data():\n found_password = get_found_password()\n message = ''\n for chat_id, values in found_password.items():\n for hash, plaintext in values.items():\n message += f\"{hash} {plaintext}\\n\"\n await bot.send_message(chat_id=chat_id, text=message)\n\n\nif __name__ == '__main__':\n asyncio.run(sending_user_data())\n\n\n\n"
},
{
"alpha_fraction": 0.7234042286872864,
"alphanum_fraction": 0.7234042286872864,
"avg_line_length": 51.22222137451172,
"blob_id": "0a8c7408c56b45c311fba34286a10d968eb5e128",
"content_id": "06fe37272ac930d0f6ab13e229d2a57f5c444970",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 644,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 9,
"path": "/handlers/default_handler.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "from aiogram import types\nfrom bot import dp\n\n\[email protected]_handler(content_types=types.ContentTypes.ANY)\nasync def all_other_messages(message: types.Message):\n await message.answer(\"Бот используется для восстановления пароля из хэш суммы.\\n\"\n \"Данный бот предназначен исключительно для восстановления собственных забытых и утерянных \"\n \"паролей. Для начала работы можете использовать команды\\n/start\\n/help\\n/decrypt\")\n"
},
{
"alpha_fraction": 0.5734042525291443,
"alphanum_fraction": 0.6046542525291443,
"avg_line_length": 39.64864730834961,
"blob_id": "e4f3ab3485da73917ddf44bb12020fe3df3410c8",
"content_id": "a4f67ded4321144ae4a9d78826ba7092a27b3e2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8054,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 185,
"path": "/tools.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "import re\nfrom api_hashtopolis import HashtopolisUserApi\nfrom database import DatabaseTlgBot, DatabaseHashtopolis\n\nID_SALTED_ALGORITHM = [20, 2811, 120]\nID_WIFI_ALGORITHM = [2500]\n\nAVAILABLE_ALGORITHMS = {\n 0: {\n 'name': 'MD5',\n 'check': re.compile(r\"^([a-fA-F\\d]{32})$\")\n },\n 3200: {\n 'name': 'bcrypt,blowfish(openbsd)',\n 'check': re.compile(r\"^(\\$2[ayb]\\$.{56})$\")\n },\n 1000: {\n 'name': 'NTLM',\n 'check': re.compile(r\"^([a-fA-F\\d]{32})$\")\n },\n 2500: {\n 'id': 'WPA/WPA2',\n 'check': re.compile(r\"^(\\$2[ayb]\\$.{56})$\")\n },\n 10000: {\n 'name': 'Django (PBKDF2-SHA256)',\n 'check': re.compile(r\"^(pbkdf2_sha256\\$\\d{5}\\$.{56}=)$\")\n },\n 20: {\n 'name': 'md5($salt.$pass)',\n 'check': re.compile(r\"^([a-fA-F\\d]{32}):([a-zA-Z\\d]{32})$\")\n # 'check': re.compile(r\"^([a-fA-F\\d]{32}):([a-zA-Z\\d]+)\") # попадает любое кол-во символов соли\n },\n 500: {\n 'name': 'md5crypt, MD5(Unix), FreeBSD MD5, Cisco-IOS MD5 2',\n 'check': re.compile(r\"^(\\$1\\$\\S{8}\\$\\S{22})$\")\n },\n 2811: {\n 'name': 'IPB2+, MyBB1.2+',\n 'check': re.compile(r\"^([a-zA-Z\\d]{32}:[a-zA-Z\\d]{8})$\")\n },\n 300: {\n 'name': 'MySQL4.1/MySQL5+',\n 'check': re.compile(r\"^([a-zA-Z\\d]{40})$\")\n },\n 100: {\n 'name': 'SHA1',\n 'check': re.compile(r\"^([a-zA-Z\\d]{40})$\")\n },\n 400: {\n 'name': 'phpass, MD5(Wordpress), MD5(Joomla), MD5(phpBB3)',\n 'check': re.compile(r\"^(\\$H\\$9\\S{30})$\")\n },\n 120: {\n 'name': 'sha1($salt.$pass)',\n 'check': re.compile(r\"^([a-zA-Z\\d]{40}:[a-zA-Z\\d]{32})$\")\n },\n}\n\n\ndef get_algorithms_from_hash_list(hash_list):\n algorithms = list()\n for hash in hash_list:\n algorithm = [algorithm for algorithm, value in AVAILABLE_ALGORITHMS.items() if value.get('check').search(hash)]\n if algorithm and algorithm not in algorithms:\n algorithms.append(algorithm)\n if len(algorithms) > 1:\n return None\n if algorithms:\n return algorithms[0]\n return None\n\n\ndef check_hashes_against_the_algorithm(hashes_list, algorithm_id):\n \"\"\"\n Функция проверяет хэш на соответствие заданому алгоритму\n :param hashes_list:\n :param algorithm_id:\n :return:\n \"\"\"\n re_compile = AVAILABLE_ALGORITHMS.get(algorithm_id).get('check')\n correct_hashes = list()\n incorrect_hashes = list()\n verified_hashes = dict()\n for _hash in hashes_list:\n if re_compile.search(_hash):\n correct_hashes.append(_hash)\n else:\n incorrect_hashes.append(_hash)\n verified_hashes['correct'] = correct_hashes\n verified_hashes['incorrect'] = incorrect_hashes\n return verified_hashes\n\n\ndef get_run_supertask_id(hash_list_id: int) -> int:\n htapi = HashtopolisUserApi()\n tasks = htapi.listTasks()\n for task in tasks.get('tasks'):\n if task.get('hashlistId') == hash_list_id:\n return task.get('supertaskId')\n\n\ndef create_task(hash_list_name: str, hash_type_id: int, hashes: list, supertask_id: int, chat_id: int) -> bool:\n \"\"\"\n Создание задачи в Hashtopolis\n :param hash_list_name: имя создаваемого хэшлиста\n :param hash_type_id: id алгоритма\n :param hashes: список хэшей\n :param supertask_id: id supertask\n :param chat_id: id чата\n :return: True or False\n \"\"\"\n # Проверка на существование паролей в БД Hashtopolis для переданных хешей\n db_hashtopolis = DatabaseHashtopolis()\n db_tlgbot = DatabaseTlgBot()\n result = db_hashtopolis.check_hashes_in_available(hashes)\n if result: # Если в БД уже есть такие хэши\n # Смотрим, есть для них пароли\n cracked_hashes = {hash.get('hash') for hash in result if hash.get('plaintext')}\n if cracked_hashes: # Если пароли есть, создаём таск и добавляем ID хэшей в БД\n taskwrapper_id = db_tlgbot.get_taskwrapper_max_id()\n db_tlgbot.add_task(taskwrapper_id=taskwrapper_id, chat_id=chat_id, hashlist_id=None,\n supertask_id=supertask_id, priority=0)\n for hash in cracked_hashes:\n hash_id = db_hashtopolis.get_hash_id(hash=hash, is_cracked=1)\n db_tlgbot.add_hash(hashes_id=hash_id, taskwrapper_id=taskwrapper_id, is_cracked=1)\n hashes.remove(hash)\n # Проверяем наличие хэшей, для которых в БД нет пароля\n uncracked_hashes = {hash.get('hash') for hash in result if hash.get('hash') not in cracked_hashes}\n if uncracked_hashes:\n taskwrapper_id = db_tlgbot.get_taskwrapper_max_id()\n db_tlgbot.add_task(taskwrapper_id=taskwrapper_id, chat_id=chat_id, hashlist_id=None,\n supertask_id=supertask_id, priority=0, completed=True)\n # Если для хэша выполнены все таски, соответствующие супертаску, полученоому от пользователя\n for hash in uncracked_hashes:\n count_unfulfilled_tasks = db_hashtopolis.get_the_count_of_unfulfilled_tasks(hash=hash, supertask_id=supertask_id)\n if count_unfulfilled_tasks.get('COUNT(t.taskId)') == 0:\n # Добавляем этот хэш в таск как не взломанный\n hash_id = db_hashtopolis.get_hash_id(hash=hash)\n db_tlgbot.add_hash(hashes_id=hash_id, taskwrapper_id=taskwrapper_id, is_cracked=0)\n hashes.remove(hash)\n if not hashes:\n return True\n is_salted = False\n if hash_type_id in ID_SALTED_ALGORITHM:\n is_salted = True\n format = 0\n if hash_type_id in ID_WIFI_ALGORITHM:\n format = 1\n htapi = HashtopolisUserApi()\n response = htapi.create_hash_list(hash_list_name=hash_list_name, is_salted=is_salted, format=format,\n hash_type_id=hash_type_id, hashes=bytes('\\n'.join(hashes).encode('UTF-8')))\n if not response or response.get('response') == 'ERROR':\n return False\n hash_list_id = response.get('hashlistId')\n if not htapi.run_super_task(hash_list_id=hash_list_id, super_task_id=supertask_id):\n return False\n taskwrapper_id = get_run_supertask_id(hash_list_id=hash_list_id)\n if not taskwrapper_id:\n return False\n max_priority = db_tlgbot.get_last_priority()\n if not max_priority:\n # Если в таблице задач нет задачи с установленным приоритетом, берём максимальный приоритет среди активных задач\n response = htapi.listTasks()\n max_priority = response.get('tasks')[0].get('priority')\n response = htapi.set_supertask_priority(task_wrapper_id=taskwrapper_id, super_task_priority=max_priority - 1)\n if not response or response.get('response') == 'ERROR':\n return False\n db_tlgbot.add_task(chat_id=chat_id, hashlist_id=hash_list_id, supertask_id=supertask_id,\n taskwrapper_id=taskwrapper_id, priority=max_priority - 1)\n db_hashtopolis = DatabaseHashtopolis()\n hashes_id = db_hashtopolis.get_hash_id(hashlist_id=hash_list_id)\n db_tlgbot.add_hash(taskwrapper_id=taskwrapper_id, hashes_id=hashes_id)\n return True\n\n\nif __name__ == \"__main__\":\n pass\n # hashlist = ['004823ba55c79cd95a331b1283d8cbfc',\n # '0096f31cafba65a4719b644fdda7d885',\n # '00f3907872e28ec3cbd7608f3efc728f',\n # '0126e02d0a062ae96bb9f6053d26ef17',\n # '01a88086180795d2cc9a2d9ea348521d', ]\n # check_password_for_hash(hashlist)\n # print(check_hashes_against_the_algorithm(hashes, 0))\n"
},
{
"alpha_fraction": 0.7599244117736816,
"alphanum_fraction": 0.7599244117736816,
"avg_line_length": 30.176469802856445,
"blob_id": "e2d03792815ee14fdd6879e9ebb903d374e7a044",
"content_id": "2cb2dcd11fd69bcc7bab6e15613a2d2a9bc238d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 17,
"path": "/handlers/general_commands.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "from aiogram import types\nfrom bot import dp\n\n\[email protected]_handler(commands=['test'])\nasync def cmd_start(message: types.Message):\n await message.answer(f\"Информация о пользователе{message.chat.values}\")\n\n\[email protected]_handler(commands=['start'])\nasync def cmd_start(message: types.Message):\n await message.answer(\"Для начала работы используйте команду /decrypt\")\n\n\[email protected]_handler(commands=['help'])\nasync def cmd_start(message: types.Message):\n await message.answer(\"Для начала работы используйте команду /decrypt\")"
},
{
"alpha_fraction": 0.4982185363769531,
"alphanum_fraction": 0.5166270732879639,
"avg_line_length": 30.924171447753906,
"blob_id": "e6f8b60e0937f985ac4220ba3b0994e9359b1cbb",
"content_id": "9d74897de7790b981b2cbda9ecdf96bde7f38c65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6736,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 211,
"path": "/api_hashtopolis.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "import requests\nimport base64\nfrom config.config import settings\n\nAPI_URL = settings.API_URL\nAPI_KEY = settings.API_KEY\n\n\nclass HashtopolisUserApi:\n\n @staticmethod\n def _request(data):\n \"\"\"\n Return response\n :param data: API data\n :return: None or response.json()\n \"\"\"\n try:\n response = requests.post(API_URL, json=data)\n except (requests.RequestException, requests.ConnectionError, requests.HTTPError, requests.URLRequired) as err:\n print(err)\n return None\n if response.json().get('response') == 'ERROR':\n print(response.json().get('message'))\n return None\n return response.json()\n\n def set_active(self, agent_id: int, active: bool):\n \"\"\"\n Set active or inactive agent\n :param agent_id: agent id\n :param active: True or False\n :return: response or None\n response example:\n {'section': 'agent', 'request': 'setActive', 'response': 'OK'}\n \"\"\"\n data = {\n \"section\": \"agent\",\n \"request\": \"setActive\",\n \"active\": active,\n \"agentId\": agent_id,\n \"accessKey\": API_KEY\n }\n return self._request(data)\n\n def task_info(self, task_id):\n \"\"\"\n Return task info\n :param task_id: task id\n :return: response or None\n response example:\n {'section': 'task', 'request': 'getTask', 'response': 'OK', 'taskId': 33119, 'name': '4_ulm_001_my.rule',\n 'attack': '#HL# 4_ULM_001.txt -r my.rule -w3', 'chunksize': 600, 'color': '', 'benchmarkType': 'speed',\n 'statusTimer': 5, 'priority': 0, 'isCpuOnly': False, 'isSmall': False, 'skipKeyspace': 0, 'keyspace': 1868227,\n 'dispatched': 1868227, 'hashlistId': 4334, 'imageUrl': 'http://45.12.19.80/api/taskimg.php?task=33119',\n 'usePreprocessor': False, 'preprocessorId': 0, 'preprocessorCommand': '',\n 'files': [{'fileId': 4, 'filename': '4_ULM_001.txt', 'size': 18237746},\n {'fileId': 11, 'filename': 'my.rule', 'size': 42}],\n 'speed': 0, 'searched': 1868227, 'chunkIds': [50830], 'agents': [], 'isComplete': True, 'workPossible': True}\n \"\"\"\n data = {\n \"section\": \"task\",\n \"request\": \"getTask\",\n \"taskId\": task_id,\n \"accessKey\": API_KEY\n }\n return self._request(data)\n\n def create_hash_list(self, hash_list_name, is_salted: bool, format: int, hash_type_id: int, hashes: bytes):\n \"\"\"\n Create hash list\n :param hash_list_name: hash list name\n :param is_salted: Is salted: True or False\n :param format: Format hash: 0 or 1\n :param hash_type_id: Hash type\n :param hashes: Hashes\n :return: response or None\n response example:\n {\"section\": \"hashlist\",\n \"request\": \"createHashlist\",\n \"response\": \"OK\",\n \"hashlistId\": 101}\n \"\"\"\n hashes = base64.b64encode(hashes)\n hashes = hashes.decode('UTF-8')\n data = {\n \"section\": \"hashlist\",\n \"request\": \"createHashlist\",\n \"name\": hash_list_name,\n \"isSalted\": is_salted,\n \"isSecret\": True,\n \"isHexSalt\": False,\n \"separator\": \":\",\n \"format\": format,\n \"hashtypeId\": hash_type_id,\n \"accessGroupId\": 1,\n \"data\": hashes,\n \"useBrain\": False,\n \"brainFeatures\": 0,\n \"accessKey\": API_KEY\n }\n return self._request(data)\n\n def run_super_task(self, hash_list_id, super_task_id):\n \"\"\"\n Create a super task out of a configured preconfigured task collection\n :param hash_list_id: hash list id\n :param super_task_id: super task id\n :return: response or None\n response example:\n {\"section\": \"task\",\n \"request\": \"runSupertask\",\n \"response\": \"OK\"}\n \"\"\"\n data = {\n \"section\": \"task\",\n \"request\": \"runSupertask\",\n \"hashlistId\": hash_list_id,\n \"supertaskId\": super_task_id,\n \"crackerVersionId\": 1,\n \"accessKey\": API_KEY,\n }\n return self._request(data)\n\n def set_supertask_priority(self, task_wrapper_id, super_task_priority):\n \"\"\"\n Set the priority for a supertask.\n :param task_wrapper_id: task wrapper id\n :param super_task_priority: super task priority\n :return: response or None\n response example:\n {\"section\": \"task\",\n \"request\": \"setSupertaskPriority\",\n \"response\": \"OK\"}\n \"\"\"\n data = {\n \"section\": \"task\",\n \"request\": \"setSupertaskPriority\",\n \"supertaskId\": task_wrapper_id,\n \"supertaskPriority\": super_task_priority,\n \"accessKey\": API_KEY\n }\n return self._request(data)\n\n def listTasks(self):\n \"\"\"\n List all tasks on the server. There are two task types:\n 0 Normal Task\n 1 Supertask\n response example:\n {\n \"section\": \"task\",\n \"request\": \"listTasks\",\n \"response\": \"OK\",\n \"tasks\": [\n {\n \"taskId\": 7587,\n \"name\": \"test 2\",\n \"type\": 0,\n \"hashlistId\": 1,\n \"priority\": 5\n },\n {\n \"supertaskId\": 33,\n \"name\": \"Increment ?a\",\n \"type\": 1,\n \"hashlistId\": 1,\n \"priority\": 3\n }\n ]\n }\n :return:\n \"\"\"\n data = {\n \"section\": \"task\",\n \"request\": \"listTasks\",\n \"accessKey\": API_KEY\n }\n return self._request(data)\n\n def deleteHashlist(self, hashlist_id):\n \"\"\"\n Delete a hashlist and all according hashes.\n This will remove a hashlist from the superhashlists it is member of.\n :return:\n response example:\n {\n \"section\": \"hashlist\",\n \"request\": \"deleteHashlist\",\n \"response\": \"OK\"\n }\n \"\"\"\n data = {\n \"section\": \"hashlist\",\n \"request\": \"deleteHashlist\",\n \"hashlistId\": hashlist_id,\n \"accessKey\": API_KEY\n }\n return self._request(data)\n\n\nif __name__ == '__main__':\n ht = HashtopolisUserApi()\n response = ht.deleteHashlist(6494)\n print(response)\n # for task in response.get('tasks'):\n # priority = task.get('priority')\n # if priority > max_priority:\n # max_priority = priority\n # print(max_priority)\n # response = ht.set_supertask_priority(super_task_id=318, super_task_priority=8666)\n"
},
{
"alpha_fraction": 0.8021978139877319,
"alphanum_fraction": 0.8021978139877319,
"avg_line_length": 29.33333396911621,
"blob_id": "b69268bd38614e00213948be5a6f73036ba09d56",
"content_id": "daa4ca2959820b560ce20849e16348be51e0ecb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 3,
"path": "/handlers/__init__.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "from . import general_commands\nfrom . import hashes_commands\nfrom . import default_handler\n"
},
{
"alpha_fraction": 0.64462810754776,
"alphanum_fraction": 0.7190082669258118,
"avg_line_length": 23.399999618530273,
"blob_id": "37cd1878f86c9dbc20da9326a5cd02b24d88eb8d",
"content_id": "3bb6c813d3a5ac8ea7d526578cd491020ba6dbcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 121,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 5,
"path": "/config/settings.toml",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "# API hashtopolis\nAPI_URL = 'http://78.47.97.23/api/user.php'\n\n# WPA converter file name\nWPA_CONVERTER = 'cap2hccapx.exe'"
},
{
"alpha_fraction": 0.5995134115219116,
"alphanum_fraction": 0.6246272325515747,
"avg_line_length": 41.33222579956055,
"blob_id": "93cf64edcf20c3cd11fbb4c3a54995f1949b72f3",
"content_id": "3cc6b85d7d1dbbffbec0dcfd250ea0915ac4b49c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13423,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 301,
"path": "/database.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "# from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Boolean, Float, ForeignKey, func, desc\nfrom config.config import settings\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport pymysql.cursors\n\n\n# engine = create_engine('sqlite:///:memory:', echo=True)\nBase = declarative_base()\n\n\nclass User(Base):\n __tablename__ = 'users'\n\n chat_id = Column(Integer, primary_key=True, autoincrement=False)\n first_name = Column(String(50))\n last_name = Column(String(50))\n username = Column(String(50))\n language_code = Column(String(10))\n tasks_limit = Column(Integer, default=10) # Ограничение кол-ва принимаемых одновременно в работу хэшей\n\n def __repr__(self):\n return f\"<User(id={self.id}, fullname='{self.first_name} {self.last_name}', nickname={self.username})>\"\n\n\nclass Wallet(Base):\n __tablename__ = 'wallet'\n\n id = Column(Integer, primary_key=True)\n chat_id = Column(Integer, ForeignKey('users.chat_id'))\n btc = Column(Float, default=0)\n usd = Column(Float, default=0)\n rub = Column(Float, default=0)\n\n\nclass Task(Base):\n __tablename__ = 'tasks'\n\n taskwrapper_id = Column(Integer, primary_key=True, autoincrement=False)\n chat_id = Column(Integer, ForeignKey('users.chat_id'))\n hashlist_id = Column(Integer) # аналогичен значению поля hashtopolis.Hashlist(hashlistId)\n supertask_id = Column(Integer) # аналогичен значению поля hashtopolis.Supertask(supertaskId)\n completed = Column(Boolean) # Признак того, что выполнение задачи завершено\n priority = Column(Integer)\n is_send = Column(Boolean, default=False) # Признак того, что данные по задаче отправлены пользователю в чат\n\nclass Hashe(Base):\n __tablename__ = 'hashes'\n id = Column(Integer, primary_key=True)\n hash_id = Column(Integer, unique=False) # аналогичен значению поля hashtopolis.Hash(hashId)\n taskwrapper_id = Column(Integer, ForeignKey('tasks.taskwrapper_id'))\n is_cracked = Column(Boolean, default=False) # признак того, что пароль найден\n is_send = Column(Boolean, default=False) # Признак того, что данные по хэшу отправлены пользователю в чат\n\n\nclass Supertask(Base):\n __tablename__ = 'supertask'\n\n id = Column(Integer, primary_key=True, autoincrement=False) # аналогичен значению поля hashtopolis.Supertask(supertaskId)\n name = Column(String(100), nullable=False) # аналогичен значению поля hashtopolis.Supertask(supertaskName)\n price = Column(Float) # стоимость за хэш\n\n\nclass DatabaseTlgBot:\n __instance = None\n\n def __init__(self):\n if self.__initialized:\n return\n self.__initialized = True\n self.session = None\n self.engine = None\n self.connect()\n\n def __new__(cls):\n if cls.__instance is None:\n cls.__instance = super(DatabaseTlgBot, cls).__new__(cls)\n cls.__instance.__initialized = False\n return cls.__instance\n\n def _create_tables(self, table):\n Base.metadata.create_all(self.engine, tables=table)\n\n def connect(self, host=settings.DB_HOST, user=settings.DB_USER,\n password=settings.DB_PASSWORD, db_name=settings.DB_NAME):\n if not self.session:\n self.engine = create_engine(f'mysql+pymysql://{user}:{password}@{host}/{db_name}')\n Session = sessionmaker(bind=self.engine)\n self.session = Session()\n return self.session\n\n def close(self):\n self.session.close()\n self.close_engine()\n\n def close_engine(self):\n self.engine.dispose()\n\n def check_user_exist(self, chat_id):\n return self.session.query(User.chat_id).filter(User.chat_id == chat_id).scalar()\n\n def create_user(self, chat_id, first_name, last_name, username, language_code, tasks_limit=10):\n if self.check_user_exist(chat_id=chat_id):\n return\n user = User(chat_id=chat_id, first_name=first_name, last_name=last_name, username=username,\n language_code=language_code, tasks_limit=tasks_limit)\n wallet = Wallet(chat_id=chat_id)\n self.session.add(user)\n self.session.commit()\n self.session.add(wallet)\n self.session.commit()\n\n def get_count_active_task_for_user(self, chat_id):\n return self.session.query(Task).filter(Task.chat_id == chat_id).filter(Task.completed is False).count()\n\n def get_supertasks_info(self):\n return self.session.query(Supertask).all()\n\n def get_last_priority(self) -> int:\n return self.session.query(func.min(Task.priority)).filter(Task.completed == 0).one()[0]\n\n def get_taskwrapper_max_id(self):\n max_id = self.session.query(func.max(Task.taskwrapper_id)).one()[0]\n if max_id > 1000000:\n return max_id + 1\n if not max_id:\n return 1000000\n return max_id + 1000000\n\n def get_active_tasks(self):\n return self.session.query(Task).filter(Task.is_send == False).order_by(desc(Task.priority)).all()\n\n def allowed_accept_tasks(self, chat_id):\n task_limit_user = self.session.query(User.tasks_limit).filter(User.chat_id == chat_id).scalar()\n if task_limit_user:\n if self.get_count_active_task_for_user(chat_id) < task_limit_user:\n return True\n return False\n\n def add_task(self, taskwrapper_id, chat_id, hashlist_id, supertask_id, priority, completed=False):\n task = Task(taskwrapper_id=taskwrapper_id, chat_id=chat_id, hashlist_id=hashlist_id,\n supertask_id=supertask_id, priority=priority, completed=completed)\n self.session.add(task)\n self.session.commit()\n self.session.refresh(task)\n\n def add_hash(self, taskwrapper_id: int, hashes_id: list, is_cracked: int = 0):\n for hash_id in hashes_id:\n hash = Hashe(taskwrapper_id=taskwrapper_id, hash_id=hash_id, is_cracked=is_cracked)\n self.session.add(hash)\n self.session.commit()\n\n\nclass DatabaseHashtopolis:\n def __init__(self):\n self.connection = pymysql.connect(host=settings.DB_HOST, user=settings.DB_USER, password=settings.DB_PASSWORD,\n database=settings.DB_NAME_HASHTOPOLIS, cursorclass=pymysql.cursors.DictCursor)\n\n def get_hash_id(self, hashlist_id: int = None, hash: str = None, is_cracked = None) -> list:\n \"\"\"\n Возвращает hashId из Hashtopolis.Hash. В зависимости от полученного аргумента возвращает hashId всех хэшей,\n принадлежащих одному hashlist (аргумент hashlist_id) или\n hashId одного хэша, полученного в виде строки (аргумент hash). Если получен аргумент is_cracked, то возращает\n те хэши, для которых есть пароль\n :param hashlist_id: id hashlist\n :param hash: hash\n :return: Список hashId\n \"\"\"\n with self.connection.cursor() as cursor:\n if is_cracked:\n arg = \" AND isCracked = 1\"\n else:\n arg = ''\n if hashlist_id:\n sql = \"SELECT `hashId` FROM `Hash` WHERE `hashlistId`=%s %s\"\n sql += arg\n cursor.execute(sql, (hashlist_id, arg))\n if hash:\n sql = f\"SELECT `hashId` FROM `Hash` WHERE `hash`='{hash}' {arg} LIMIT 1\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return [i.get('hashId') for i in result]\n\n def get_hashlist_id(self, hashes_id: set):\n \"\"\"\n Возвращает hashlistId для списка hashId\n :param hashes_id:\n :return:\n \"\"\"\n with self.connection.cursor() as cursor:\n sql = f\"SELECT hashlistId FROM Hash WHERE hashId IN ({', '.join(str(h) for h in hashes_id)});\"\n cursor.execute(sql)\n return cursor.fetchall()\n\n def get_taskwrapper_id(self, hashlists_id: list):\n \"\"\"\n Возвращает taskwrapper_id, который выполнялись по переданным hashlists_id\n :param hashlist_id:\n :return:\n \"\"\"\n with self.connection.cursor() as cursor:\n sql = f\"SELECT tw.taskWrapperId FROM TaskWrapper tw WHERE tw.hashlistId \" \\\n f\"IN ({', '.join(str(h) for h in hashlists_id)}); \"\n cursor.execute(sql)\n return cursor.fetchall()\n\n def get_supertask_id(self, taskwrappers_id):\n \"\"\"\n Возвращает supertask_id, по которым выполнялись taskwrappers_id\n :param taskwrappers_id:\n :return:\n \"\"\"\n with self.connection.cursor() as cursor:\n sql = f\"SELECT s.supertaskId FROM TaskWrapper tw JOIN Supertask s ON tw.taskWrapperName = s.supertaskName\" \\\n f\" WHERE tw.taskWrapperId IN ({', '.join(str(h) for h in taskwrappers_id)});\"\n cursor.execute(sql)\n return cursor.fetchall()\n\n def get_the_count_of_unfulfilled_tasks(self, hash: str, supertask_id: int):\n with self.connection.cursor() as cursor:\n sql = f\"SELECT COUNT(t.taskId) FROM Hash h \" \\\n f\"LEFT JOIN TaskWrapper tw ON h.hashlistId = tw.hashlistId \" \\\n f\"LEFT JOIN Supertask s ON tw.taskWrapperName = s.supertaskName \" \\\n f\"LEFT JOIN Task t ON tw.taskWrapperId = t.taskWrapperId \" \\\n f\"LEFT JOIN Chunk c ON t.taskId = c.taskId \" \\\n f\"WHERE h.hash = '{hash}' AND (c.state IS NULL OR c.state <> 4) AND s.supertaskId = {supertask_id};\"\n cursor.execute(sql)\n return cursor.fetchall()\n\n def get_cracked_hashes(self):\n with self.connection.cursor() as cursor:\n sql = f\"SELECT DISTINCT(h.hash), h.plaintext, t.chat_id FROM hashtopolis.Hash h \" \\\n f\"JOIN tlgbot.hashes h1 ON h.hashId = h1.hash_id \" \\\n f\"JOIN tlgbot.tasks t ON t.taskwrapper_id = h1.taskwrapper_id \" \\\n f\"WHERE t.is_send = 0 AND h.isCracked = 1;\"\n cursor.execute(sql)\n return cursor.fetchall()\n\n def check_cracked_hash_for_taskwrapper(self, taskwrapper_id):\n with self.connection.cursor() as cursor:\n sql = \"SELECT h.hashId, h.plaintext FROM Hash h JOIN TaskWrapper tw ON tw.hashlistId = h.hashlistId \" \\\n \"WHERE tw.taskWrapperId = %s AND h.isCracked = 1;\"\n cursor.execute(sql, (taskwrapper_id,))\n return cursor.fetchall()\n\n def check_hashes_in_available(self, hashlist: list) -> list:\n \"\"\"\n Функция проверяет хэши по базе и возвращает хэш и пароль\n :param hashlist:\n :return:\n \"\"\"\n with self.connection.cursor() as cursor:\n sql = 'SELECT hashId, hash, plaintext FROM Hash WHERE Hash IN (\"{}\");' \\\n .format('\",\"'.join(hashlist))\n cursor.execute(sql)\n return cursor.fetchall()\n\n\nif __name__ == '__main__':\n pass\n db = DatabaseHashtopolis()\n for i in db.get_cracked_hashes():\n print(i)\n # db_hashtopolis = DatabaseHashtopolis()\n # hashlist = ['004823ba55c79cd95a331b1283d8cbfc',\n # '0096f31cafba65a4719b644fdda7d885',\n # '00f3907872e28ec3cbd7608f3efc728f',\n # '0126e02d0a062ae96bb9f6053d26ef17',\n # '01a88086180795d2cc9a2d9ea348521d',\n # '50b10dfde48eb2b7187a381a109bec9b',\n # '001aa4071306a0fe9f01f1f80a7ca950', ]\n # hashes_id = {22019, 2316}\n # print(db_hashtopolis.get_hash_id(hash='3207c6c97efc2ae56ea6f03ce29ff667'))\n # result = db_hashtopolis.check_hashes_in_available(hashlist)\n # cracked_hashes = {hash.get('hash') for hash in result if hash.get('plaintext')}\n # uncracked_hashes = {hash.get('hashId') for hash in result if hash.get('hash') not in cracked_hashes}\n # print(uncracked_hashes)\n # print(db_hashtopolis.get_hashlist_id(uncracked_hashes))\n # hashlist = [\n # '3a813688636ff02a7284261530b5c957',\n # '4e7d4b840ce4eb316467bc23484cec50',\n # ]\n # print(db_hashtopolis.get_the_count_of_unfulfilled_tasks('3a813688636ff02a7284261530b5c957', supertask_id=25))\n # print(set(i.items()))\n # print(type(res))\n # for i in range(10):\n # create_user(chat_id=123123, first_name='123', last_name='312', username='111', language_code='ке')\n # db.connect()\n # table_object = [Supertask.__table__, User.__table__, Wallet.__table__, Task.__table__, Hashe.__table__]\n # db.drop_tables(table_object)\n # db._create_tables(table_object)\n # res = db.get_last_priority()\n # print(res)\n # if not db.check_user_exist(123):\n # print('not user')\n # db.close()\n # db.close_engine()\n # db = DatabaseHashtopolis()\n # print(db.get_hash_id(6340))\n"
},
{
"alpha_fraction": 0.7079458832740784,
"alphanum_fraction": 0.7079458832740784,
"avg_line_length": 45.39215850830078,
"blob_id": "27c19ba88bbc19388f4aec169e2bfcbf7a0625ff",
"content_id": "71710df5f113c532877babb94f36a8a41ab75b56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5290,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 102,
"path": "/handlers/hashes_commands.py",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom tools import *\nfrom bot import dp\nfrom database import DatabaseTlgBot\nfrom datetime import datetime\n\n\nclass OrderHashDecryption(StatesGroup):\n waiting_for_algorithm = State()\n waiting_for_hashes = State()\n waiting_for_supertask = State()\n start_task = State()\n waiting_for_essid = State()\n\n\ndb = DatabaseTlgBot()\n\n\n# rh = recovery hashes\[email protected]_handler(commands=\"recovery\", state=\"*\")\nasync def algorithm_request(message: types.Message):\n chat_id = message.chat.id\n first_name = message.chat.first_name\n last_name = message.chat.last_name\n username = message.chat.username\n language_code = message.from_user.language_code\n db.create_user(chat_id=chat_id, first_name=first_name, last_name=last_name, username=username,\n language_code=language_code)\n template_message = ''\n for key, value in AVAILABLE_ALGORITHMS.items():\n template_message = template_message + f\"*{key}* {value.get('name')}\\n\"\n await message.answer(f\"{template_message}\\n\\nВыберите алгоритм, указав его номер\", parse_mode=\"Markdown\")\n await OrderHashDecryption.waiting_for_algorithm.set()\n\n\[email protected]_handler(state=OrderHashDecryption.waiting_for_algorithm, content_types=types.ContentTypes.TEXT)\nasync def get_hashes(message: types.Message, state: FSMContext):\n try:\n chosen_algorithm = int(message.text)\n except ValueError as err:\n chosen_algorithm = None\n if chosen_algorithm not in AVAILABLE_ALGORITHMS.keys():\n await message.answer(\"Указан некорректный номер алгоритма, попробуйте снова\")\n return\n await state.update_data(chosen_algorithm=chosen_algorithm)\n await OrderHashDecryption.waiting_for_hashes.set()\n await message.answer(\"Укажите в сообщении хэши, соответствующие выбранному алгоритму. \"\n \"Каждый хэш должен быть с новой строки\")\n\n\[email protected]_handler(state=OrderHashDecryption.waiting_for_hashes, content_types=types.ContentTypes.TEXT)\nasync def get_supertask(message: types.Message, state: FSMContext):\n # Блок обработки полученных от пользователя хэшей\n hashes = set(message.text.split(\"\\n\"))\n user_data = await state.get_data()\n algorithm_id = user_data.get('chosen_algorithm')\n verified_hashes = check_hashes_against_the_algorithm(hashes, algorithm_id)\n correct_hashes = verified_hashes.get('correct')\n incorrect_hashes = verified_hashes.get('incorrect')\n if not correct_hashes:\n await message.answer(f\"Переданные хэши не соответствуют выбранному алгоритму:\\n{incorrect_hashes}.\"\n f\"\\nПроверьте их корректность или обратитесь к разработчику.\")\n return\n # if incorrect_hashes:\n # incorrect_hashes_str = ', '.join(incorrect_hashes)\n # await message.answer(f\"Следующие хэши не соответствуют выбранному алгоритму:\\n{incorrect_hashes_str}.\"\n # f\"\\nПроверьте их корректность\")\n # Вывод списка супертасков\n supertasks_info = db.get_supertasks_info()\n supertask_id = list()\n template_message = ''\n for supertask in supertasks_info:\n template_message = template_message + f\" {str(supertask.id)} : {supertask.name}. \\nСтоимость за хэш: {str(supertask.price)}\\n\\n\"\n supertask_id.append(supertask.id)\n print(template_message)\n await message.answer(f\"{template_message}\\nВыберите шаблон для восстановления пароля, указав его номер\")\n await OrderHashDecryption.waiting_for_supertask.set()\n await state.update_data(list_supertask_id=supertask_id, hashes=verified_hashes.get('correct'))\n\n\[email protected]_handler(state=OrderHashDecryption.waiting_for_supertask, content_types=types.ContentTypes.TEXT)\nasync def create_task_handler(message: types.Message, state: FSMContext):\n user_data = await state.get_data()\n supertask_id = user_data.get('list_supertask_id')\n try:\n chosen_supertask = int(message.text)\n except ValueError as err:\n chosen_supertask = None\n if chosen_supertask not in supertask_id:\n await message.answer(\"Пожалуйста, укажите шаблон, используя клавиатуру ниже\")\n return\n hash_type_id = user_data.get('chosen_algorithm')\n hashes = user_data.get('hashes')\n if create_task(hash_list_name=f\"tbot_{message.chat.id}_{datetime.now().strftime('%Y%m%d_%H%m%S')}\",\n hash_type_id=hash_type_id, hashes=hashes, supertask_id=chosen_supertask, chat_id=message.chat.id):\n await state.finish()\n await message.answer(f\"Задание принято.\")\n return\n else:\n await message.answer(f\"Ошибка при назначении задания. Обратитесь в ТП.\")\n"
},
{
"alpha_fraction": 0.565432071685791,
"alphanum_fraction": 0.6419752836227417,
"avg_line_length": 19.25,
"blob_id": "9c8977943f72c69bad379cfa83a6f8ec60c0abb6",
"content_id": "235dd5566d75c46279d6266b61f19949d5804446",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 20,
"path": "/pyproject.toml",
"repo_name": "rootsergio/hashbot",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"hashbot\"\nversion = \"0.1.0\"\ndescription = \"\"\nauthors = [\"Shevchenko Sergey <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.8.0\"\naiogram = \"^2.11.2\"\nrequests = \"^2.25.1\"\ndynaconf = \"^3.1.2\"\nSQLAlchemy = \"^1.3.22\"\nmysql-connector-python = \"^8.0.22\"\nPyMySQL = \"^1.0.2\"\n\n[tool.poetry.dev-dependencies]\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n"
}
] | 12 |
gnowledge/gstudio-docker
|
https://github.com/gnowledge/gstudio-docker
|
9b97625885057584adebb6f4ad5bc349fa6eb580
|
5d81e16240391557c891c1e440d30d2e8722b641
|
0e59aa8a1b7e967f30e774c0e28a9a44b7393f56
|
refs/heads/master
| 2021-07-09T12:37:19.803012 | 2020-07-20T06:54:20 | 2020-07-20T06:54:20 | 38,933,886 | 6 | 19 | null | 2015-07-11T17:12:30 | 2019-10-24T11:37:03 | 2020-07-20T06:54:21 |
Shell
|
[
{
"alpha_fraction": 0.7582781314849854,
"alphanum_fraction": 0.7582781314849854,
"avg_line_length": 19.066667556762695,
"blob_id": "69740b2d01bbab9623ae464ae48e0f292420ebd6",
"content_id": "d4ec4b13acc33bc504f5e23e0f6593d0bb92ab3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 15,
"path": "/scripts/patch-rollback/2.1/rollback-patch-2.1-container.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# File that will be triggered from host system\n\n# change directory to /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/ \ncd /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/\n\n# git checkout .\ngit checkout .\n\n# git stash\ngit stash\n\n# git checkout clixserver branch\ngit checkout clixserver\n\n"
},
{
"alpha_fraction": 0.6868189573287964,
"alphanum_fraction": 0.7307556867599487,
"avg_line_length": 43.4375,
"blob_id": "5994fe8819b3f3c50ec4f1cd72fcbaf3e8e0195a",
"content_id": "93a29a308653a13498bd09c5bc52be3589321ebe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2845,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 64,
"path": "/scripts/code-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#This script calls various scripts inside the gstudio container for the updation of codes\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n#for filename\n\n#patch=$(basename $(tar -tf /mnt/patch-*.tar.gz | head -n 1));\n#update_patch=\"${filename%.*.*}\";\n#patch=\"patch-7a6c2ac-r5-20190221\";\n#patch=\"patch-26eaf18-r5-20190320\";\npatch=\"update-patch-c0463c5-r6-20190718\";\n\n#code to run the script named git-offline-code-update.sh inside the container started\n\necho -e \"\\n${cyan}copying updated patch from /mnt/${patch} to /home/docker/code/ in gstudio container ${reset}\";\nsudo rsync -avPhz /mnt/update-patch-r6/${patch} /home/core/code/ ;\n\necho -e \"\\n${cyan}Updating offline patch ${reset}\";\ndocker exec -it gstudio /bin/sh -c \"/bin/bash /home/docker/code/${patch}/code-updates/git-offline-update.sh\";\n\n#code to run the script named git-offline-code-update.sh inside the container started\n\n#code to copy user-csvs of sp99, sp100 and cc inside the container started\n\nval=\"cc\";\n\necho -e \"\\n${cyan} Copying the sp99, sp100 and cc user csvs to the user-csvs folder inside the container ${reset}\";\nsudo rsync -avPhz /home/core/user-csvs/sp/sp99_users.csv /home/core/code/user-csvs/; #copying sp99 user csvs\nsudo rsync -avPhz /home/core/user-csvs/sp/sp100_users.csv /home/core/code/user-csvs/; #copying sp100 user csvs\nsudo rsync -avPhz /home/core/user-csvs/${val}/cc_users.csv /home/core/code/user-csvs/; #copying cc user csvs\n\n#code to copy user-csvs of sp99, sp100 and cc inside the container ended\n\n# Code To change the permissions of user-csvs folder\necho -e \"\\n${cyan} Changing the permissions of /home/core/user-csvs folder\"\nsudo chown root:root /home/core/user-csvs ;\nsudo chmod +xr /home/core/user-csvs ;\n\n#code to run the script named python-files-exec.sh inside the container started\n\necho -e \"\\n${cyan}Executing the python files ${reset}\";\ndocker exec -it gstudio /bin/sh -c \"/bin/bash /home/docker/code/${patch}/code-updates/python-files-exec.sh\"; \n\n#code to run the script named python-files-exec.sh inside the container ended\n\n\n#code to copy backup-old-server-data.sh and Execute-get_all_users_activity_timestamp_csvs.sh to /home/core\n\necho -e \"\\n${cyan}Copying the scripts for old server data backup and getting all user activity timestamp csvs to /home/core\";\nsudo rsync -avPhz /home/core/code/scripts/backup-old-server-data.sh /home/core/ ;\nsudo rsync -avPhz /home/core/code/scripts/Execute-get_all_users_activity_timestamp_csvs.sh /home/core/ ;\nsudo rsync -avPhz /mnt/update-patch-r6/${patch}/code-updates/execute-ActivityTimestamp-process.sh /home/core/ ;\n\n"
},
{
"alpha_fraction": 0.6470016241073608,
"alphanum_fraction": 0.6846029162406921,
"avg_line_length": 27.831775665283203,
"blob_id": "0f76fd12efde53573eaffd52aa5d5cad560e6fb7",
"content_id": "47e6b84e067e0e57701ba2fbca6bfbde05f45ebf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3085,
"license_type": "no_license",
"max_line_length": 197,
"num_lines": 107,
"path": "/scripts/git-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Script for git pull\n# Default branch name is master\n\nbranch_name=\"$1\";\nif [[ \"$1\" == \"\" ]]; then\n echo -e 'Please provide the branch names like: \\n 1. master \\n 2. alpha \\n 3. mongokit \\n 4. dlkit \\n';\n read branch_name ;\nfi\n \necho -e \"USER input : $branch_name\";\nbranch_name=\"alpha\";\n\n\nudate=\"$(date +%Y%m%d-%H%M%S)\"\nmkdir -p /home/docker/code/git-update/$udate\n\n\necho \"Server update (git) starting\"\n\necho \"[run] enabling maintenance templete\"\nmv /home/docker/code/maintenance/maintenance.disable /home/docker/code/maintenance/maintenance.enable\n\necho \"[run] go to the code folder - gstudio\"\ncd /home/docker/code/gstudio/\n\necho \"[run] Execute git logs (before)\"\ngit log >> /home/docker/code/git-update/$udate/git-update-before.log\n\necho \"[run] Execute git status\"\ngit status\n\necho \"[run] Execute git diff\"\ngit diff\n\necho \"[run] Execute git stash\"\ngit stash\n\n\n\n\necho \"[run] Git Pull started\"\n\necho \"[run] Execute git pull\"\ngit pull origin $branch_name \n\necho \"[run] Git Pull completed\"\n\n\necho \"[run] Execute git stash\"\ngit stash apply\n\necho \"[run] Execute git stash drop\"\ngit stash drop\n\necho \"[run] Execute git status\"\ngit status\n\necho \"[run] Execute git diff\"\ngit diff\n\n\necho \"[run] go to the code folder - gstudio/gnowsys-dev\"\ncd /home/docker/code/gstudio/gnowsys-ndf/\n\n# echo \"[run] get updated additional schema STs, ATs and RTs\"\n# cp -v /home/docker/code/gstudio/doc/schema_directory/* /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/ndf/management/commands/schema_files/\n\n# echo \"[run] create or update gstudio schema in mongodb\"\n# python manage.py syncdb # Mrunal M. Nachankar : Mon, 22-02-2016 01:00:PM : \"--noinput\" argument not needed as by now we have a default user as administrator \n# python manage.py filldb\n# python manage.py create_schema STs_run1.csv\n# python manage.py create_schema ATs.csv\n# python manage.py create_schema RTs.csv\n# python manage.py create_schema STs_run2.csv\n\n\necho \"[run] fab update_data\"\t\t\t\t\t\t\t\t\t\t# Mrunal M. Nachankar : Mon, 05-03-2017 06:06:AM \nfab update_data\t # Mrunal M. Nachankar : Mon, 05-03-2017 06:06:AM \n\n\necho \"[run] property_order_reset\"\t\t\t\t\t\t\t\t\t\t# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \necho \"execfile('property_order_reset.py')\" | python manage.py shell\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\necho \"[run] create_auth_objs.py\" ;\t\t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \necho \"execfile('../doc/deployer/create_auth_objs.py')\" | python manage.py shell ;\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\necho \"[run] Sync_existing\"\t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \npython manage.py sync_existing_documents\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\n\necho \"[run] Execute git logs (after)\"\ngit log >> /home/docker/code/git-update/$udate/git-update-after.log\n\n\necho \"[run] Bower install\"\nbower install --allow-root\n\necho \"[run] collectstatic\"\necho yes | python manage.py collectstatic\n\n\necho \"[run] enabling maintenance templete\"\nmv /home/docker/code/maintenance/maintenance.enable /home/docker/code/maintenance/maintenance.disable\n\necho \"Server update (git) finished successfully\"\n"
},
{
"alpha_fraction": 0.6202531456947327,
"alphanum_fraction": 0.6455696225166321,
"avg_line_length": 14.800000190734863,
"blob_id": "b6324ca443c1a629aaea42deab7c39d58b93ea08",
"content_id": "badc1549d378223311398cec40f9188e01f8c664",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 79,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 5,
"path": "/scripts/csv-to-gz.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n# create gz of the file name passed as argument\n\ngzip -c $1 > $1.gz\n"
},
{
"alpha_fraction": 0.5676229596138,
"alphanum_fraction": 0.5891393423080444,
"avg_line_length": 21.697673797607422,
"blob_id": "12fed8f69cc3a55f16fe17e09ca00243a6b11f71",
"content_id": "5301307801a520e9bdb341baf426f50235624186",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 976,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 43,
"path": "/scripts/mongodb-service-check.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Mrunal : 0604216 - Ref : http://linoxide.com/linux-shell-script/script-check-service-up/ \nif [ \"$#\" = 0 ]; then\n\n echo \"Usage $0 \"\n exit 1\n\nfi\n\ndatet=`date`;\necho \"Date-time : $datet\";\n\nservice=$1\n\nis_running=`ps aux | grep -v grep| grep -v \"$0\" | grep $service| wc -l | awk '{print $1}'`\nmongo_n=`tail /var/log/mongodb/mongod.log | grep \"waiting for connections on port 27017\"`\n\nif [[ $is_running != \"0\" ]] && [[ $mongo_n != \"\" ]]; then\n\n echo \"Service $service is running\"\n\nelse\n\n echo\n initd=`ls /etc/init.d/ | grep $service | wc -l | awk '{ print $1 }'`\n\n if [ $initd = \"1\" ]; then\n\n\tstartup=`ls /etc/init.d/ | grep $service`\n\techo -n \"Found startap script /etc/init.d/${startup}. Start it? Y/n ? \"\n\tread answer\n\tif [ $answer = \"y\" -o $answer = \"Y\" ]; then\n\t echo \"Starting service...\"\n\t /etc/init.d/${startup} start\n\tfi\n\n else\n \techo \"started here \" ;\n\tnumactl --interleave=all mongod --config /root/mongod.conf &\n fi\n\nfi\n"
},
{
"alpha_fraction": 0.4896174967288971,
"alphanum_fraction": 0.5056466460227966,
"avg_line_length": 47.157894134521484,
"blob_id": "ab3cf3bac45abe6c6d1d5deccdd4ccd6cb118088",
"content_id": "4949bd809ca1df544c0c74aaa09eb1e6d0e2ff13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2745,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 57,
"path": "/scripts/internet-check.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n{\n#--------------------------------------------------------------------------------------------------------------#\n# File name : internet-check.sh\n# File creation : Mrunal Nachankar\n# Description :\n# Check for internet connection\n# Build Docker-Image via docker build command (using Dockerfile)\n#--------------------------------------------------------------------------------------------------------------#\n\n\n# Mrunal : Set HOME variable in deploy.conf\nfile=`readlink -e -f $0`\nfile1=`echo $file | sed -e 's/\\/scripts.*//'` ; \nfile2=`echo $file1 | sed -e 's/\\//\\\\\\\\\\//g'` ;\n# file3=`echo $file1 | sed -e 's:/:\\\\\\/:g'` ;\nsed -e \"/HOME/ s/=.*;/=$file2;/\" -i $file1/confs/deploy.conf;\n#more $file1/confs/deploy.conf | grep HOME; \n\nsource $file1/confs/deploy.conf\n\n INT_COM=\"\";\n echo -e \"\\nWe are checking for Internet connection\" | sed -e \"s/^/$(date +%Y%m%d-%H%M) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n INT_COM=`ping www.google.com -c 5 | sed -e \"s/^/$(date +%Y%b%d-%H%M) /\"` \n# echo -e \"$INT_COM\" \n if [[ \"$INT_COM\" =~ bytes* ]]; then # If internet connection is available\n _INT_COM=1\n else # If no internet connection\n _INT_COM=0;\n fi\n \n echo -e \"GET http://metastudio.org\\n\\n\" | nc metastudio.org 80 > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n _META=1;\n else # If no internet connection\n _META=0;\n fi\n \n echo -e \"GET http://google.com HTTP/1.0\\n\\n\" | nc google.com 80 > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n _GOOGLE=1;\n else # If no internet connection\n _GOOGLE=0; \n fi\n \n# echo -e \"ping:$_INT_COM ; meta:$_META ; google:$_GOOGLE\" \n if ([ \"$_INT_COM\" == 0 ] && [ \"$_META\" == 0 ] && [ \"$_GOOGLE\" == 0 ]); then # If no internet connection\n echo -e \"\\nInternet connection failed. Please check the network connections(IP, gateway, routes or physical cables).\" | sed -e \"s/^/$(date +%Y%m%d-%H%M) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t_INTERNET_STATUS=0;\n\tsed -e '/_INTERNET_STATUS/ s/=.*;/=\"0\";/' -i $HOME/confs/deploy.conf; \n else # If internet connection is available\n echo -e \"\\nInternet connection Successful.\" | sed -e \"s/^/$(date +%Y%m%d-%H%M) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n echo -e \"Hence we will continue with online installation.\" | sed -e \"s/^/$(date +%Y%m%d-%H%M) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t_INTERNET_STATUS=1;\n\tsed -e '/_INTERNET_STATUS/ s/=.*;/=\"1\";/' -i $HOME/confs/deploy.conf; \n fi\n}\n"
},
{
"alpha_fraction": 0.6096574664115906,
"alphanum_fraction": 0.6179828643798828,
"avg_line_length": 41.8979606628418,
"blob_id": "b79370b96307c9e504360834708bd5acdec3f6a5",
"content_id": "bfbe88d2ab6c80130e44e9d55cef4381bce78bf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4204,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 98,
"path": "/scripts/copy-softwares.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#--------------------------------------------------------------------#\n# Copy extra software packages for CLIx student platform in coreos / ubuntu \n# File name : copy-softwares.sh\n# File version : 1.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : Mon Apr 30 01:13:01 IST 2018\n# Modified by : None\n# Modified on : Not yet\n# Description : This file is used for copying the extra software packages for CLIx student paltform in coreos / ubuntu.\n# Important : None\n# Future scope : 1. Make code more streamlined.\n# References : None\n#--------------------------------------------------------------------#\n\nsource ./mrulogger.sh\n\nSCRIPT_ENTRY\n\nsetup_progress_status_filename=\"/home/core/copy-softwares-setup_progress_status_value\";\n\nfunction COPY_EXTRA_SOFTWARE_PACKAGES(){\n \n # Check whether \"/\" is mounted from inserted disk or not...\n CHECK_IF_ROOT_MOUNTED_FROM_USB_DISK \"$BASH_SOURCE\";\n\n # Step 1 : Copy tar file\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n\n if [ \"$setup_progress_status\" == \"0\" ] || [ \"$setup_progress_status\" == \"\" ]; then\n CHECK_CORRECT_USB_DISK \"$BASH_SOURCE\";\n \n INFO \"Setup progress status value: $setup_progress_status. Hence copying extra software packages (Copy tar file).\" \"$BASH_SOURCE\" \"green\";\n \n # source_path=\"/mnt/home/core/setup-software/Tools /mnt/home/core/setup-software/coreos /mnt/home/core/setup-software/i2c-softwares /mnt/home/core/setup-software/syncthing \";\n source_path=\"/mnt/home/core/setup-software/extra_software_packages.tar.bz2\";\n destination_path=\"/home/core/setup-software/\" ;\n INFO \"Copy extra softwares from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n \tif [ \"${command_status_value}\" == \"Successful\" ]; then\n INFO \"Copying of extra softwares finished successfully.\" \"\" \"green\";\n UMOUNT_PARTITION \"/dev/$selected_usb_disk\" \"/mnt/\";\n\t\n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"1\";\n \telif [ \"${command_status_value}\" == \"Failed\" ]; then\n CRITICAL \"Copying of extra softwares doesn't finish successfully.\";\n\t exit 1;\n \telse\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n \tfi\n else\n CRITICAL \"Setup progress step value is ${setup_progress_status}, hence continuing with the process skipping the step 1.\" \"$BASH_SOURCE\";\n# exit 1;\n fi\n\n\n # Step 2 : Untar file\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n\n if [ \"$setup_progress_status\" == \"1\" ]; then\n INFO \"Setup progress status value: $setup_progress_status. Hence contining to untar the tar file.\" \"$BASH_SOURCE\" \"green\";\n \n INFO \"Untar the extra_software_packages.tar.bz2 (tar file)\" \"$BASH_SOURCE\" \"green\";\n cd /home/core/setup-software/ ;\n sudo tar xvjf extra_software_packages.tar.bz2\n\techo \"mrunal\"\n\tCHECK_COMMAND_STATUS; \n \tif [ \"${command_status_value}\" == \"Successful\" ]; then\n INFO \"Untaring of extra softwares tar finished successfully.\" \"\" \"green\";\t\n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"2\";\n \telif [ \"${command_status_value}\" == \"Failed\" ]; then\n CRITICAL \"Copying of extra softwares doesn't finish successfully.\";\n\t exit 1;\n \telse\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n \tfi\n\t\n else\n CRITICAL \"Setup progress step value is ${setup_progress_status}, hence continuing with the process skipping the step 2.\" \"$BASH_SOURCE\";\n # exit 1;\n fi\n\n if [ \"$setup_progress_status\" -ge \"2\" ]; then\n INFO \"It seems nothing to do. Either everything finished successfully or something failed.\\nPlease verify manually\" \"$BASH_SOURCE\" \"yellow\"; \n fi\n}\n\n#**************************** Copying process starts from here ********************************#\n\nCOPY_EXTRA_SOFTWARE_PACKAGES;\n\n#**************************** Copying process ends here ********************************#\n\nSCRIPT_ENTRY\n\nexit 0;\n"
},
{
"alpha_fraction": 0.6804393529891968,
"alphanum_fraction": 0.6922070980072021,
"avg_line_length": 42.95402145385742,
"blob_id": "744d8427e737f70ca4ea694fda3b5d3adfcf5cf2",
"content_id": "0b904e185ce72fbf3b0110a1b97664f46dc0d9cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3824,
"license_type": "no_license",
"max_line_length": 422,
"num_lines": 87,
"path": "/README.md",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "# gstudio-docker\nDocker file for gstudio\n=======================\n\nTo build the docker image, we need to clone the project in your docker host, and run the script build-docker.sh. This script clones the gstudio code and builds the image. After cloning the gstudio-docker, download and install the static javascript depedencies from http://gnowledge.org/~nagarjun/bower_components.tar.gz. unzip the contents of this file in the project directory before starting the buuild-docker.sh script.\n\nThe image uses Ubuntu 14.04, django, nginx, mongodb, and several code dependent python libraries and OS level libraries. The image builds to about 1.6GB. \n\nUnder development\n-----------------\n\n- autostart of mongod issue to be resolved\n- schema files to be updated for course builder and course player\n- single point data directory for all data (mongo, sqlite, static files, rcs files, mail queue, etc.)\n- after successful completion of the above tasks the docker project to be published in dockerhub.\n- security enhancements\n - run all services as non-root user\n - expose only port 80\n - gpg keys installation script for school servers joining auto-sync program.\n\nBuilding the Image\n------------------\n\nBuild the image by running \"build-docker.sh\" script from the project directory after cloning the project files. If you are re-building the image, delete the 'gstudio' directory to get the fresh updates from gstudio project. \n\nScript details\n---------------------\n\n\t1. script name : build-docker-image.sh\n\t execution : bash build-docker-image.sh\n\t use\t : used for the following -\n\t\t 1. installing docker application\n\t\t 2. building the images and starting the container\n\t\t 3. loading the imaes and starting the container \n\t procedure : bash build-docker.sh\n\t \t 1. it will check docker application\n\t\t 1.1. if it is installed, print the version. \n\t\t 1.2. if it is not installed, install the same. \n\t\t 2. reboot the system.\n\t\t 3. again execute the same command after reboot (bash build-docker.sh) to load the image / build the image.\n\t\t 3.1 load the image.\n\t\t 3.2 build the image.\n\t\t \t3.2.1 Please give branch name of online repo \n\t\t 4. start the container.\n\n\t2. script name : initialize.sh\n\t execution : bash initialize.sh\n\t use\t : used for the following -\n\t\t 1. starting the applications at the startup of a docker container\n\t\t 2. execute application related scripts\n\n\t3. script name : numa-arch-check.sh\n\t execution : bash numa-arch-check.sh\n\t use\t : used for the following -\n\t\t 1. check of numa arch.\n\n\t4. script name : generate-self-certified-certificate-ssl.sh\n\t execution : bash generate-self-certified-certificate-ssl.sh\n\t use\t : used for the following -\n\t\t 1. generate ssl certificate (self certified ssl certificate).\n\n\t5. script name : local_settings_changes.sh\n\t execution : bash local_settings_changes.sh\n\t use\t : used for the following -\n\t\t 1. generate ssl certificate (self certified ssl certificate).\n\n\t6. script name : smtpd.sh\n\t execution : bash smtpd.sh\n\t use\t : used for the following -\n\t\t 1. starting smtpd \n\n\t7. script name : Bulk-User-creation.sh\n\t execution : bash Bulk-User-creation.sh\n\t use\t : used for the following -\n\t\t 1. creating the bulk users. It makes use of \"Users.csv\" for creating user details for creation.\n \t\t Note : First add new usernames and passwords in \"Users.csv\".\n\n\t8. script name : git-update.sh\n\t execution : bash git-update.sh\n\t use\t : used for the following -\n\t\t 1. updating the code from git\n\n\t9. script name : ss-gpg-setup.sh\n\t execution : bash ss-gpg-setup.sh\n\t use\t : used for the following - (Under developement)\n\t\t 1. take installing user and school details.\n\t\t 2. generate gpg keys.\n"
},
{
"alpha_fraction": 0.604651153087616,
"alphanum_fraction": 0.6257928013801575,
"avg_line_length": 31.84722137451172,
"blob_id": "39cbb7ce039651aff819507e00e5a5781d1123ec",
"content_id": "bd094e02643c9b37a76ba33a703c7d4b6162f984",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4730,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 144,
"path": "/scripts/Create-Server-symlink-base-backup-files-dir.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# dirname_in is used to store input dir name\n#dirname_in=\"/media/glab/d23792a7-c7d6-40db-b712-0e80d0678b761/MStore/RnD/testbase\" ; # Mrunal changes are here : Change the source directory\ndirname_in=\"/data\" ; # Mrunal changes are here : Change the source directory\n\n# dirname_out_default is used to store default output dir name\ndirname_out_default=\"/backups/\" ; # Mrunal changes are here : Change the default directory\n\n# dirname_out is used to store output dir name\ndirname_out=\"$1\" ; # Mrunal changes are here : Change the destination directory (default it is set to 1stargument value)\n\n#State id limits as a default start is set to 1 and end is set to 100 (means for mizoram its mz1 to mz100)\ncurrnet_state_nm_start=1; # Mrunal changes are here : starting state id\ncurrnet_state_nm_end=100; # Mrunal changes are here : ending state id\n\n# Following variables are used to store the color codes for displaying the content on terminal\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;32m\" ;\nbrown=\"\\033[0;33m\" ;\nblue=\"\\033[0;34m\" ;\ncyan=\"\\033[0;36m\" ;\nreset=\"\\033[0m\" ;\n\necho -e \"\" ;\n\ncreate_symlink(){\n\nfor selected_file in `find $dirname_in -type f`; do\n \n dirname_selected_file=$(dirname ${selected_file});\n last_dirname_in=$(dirname $dirname_in);\n \n dirstructure=${dirname_selected_file#$dirname_in};\n \n Final_output_dirname_selected_file=\"$1${dirstructure}\"\n\t\n $(mkdir -p $Final_output_dirname_selected_file);\n\n echo -e \"Symlinking : \\nSource - ${selected_file} \\nDestination - ${Final_output_dirname_selected_file} \\n\"\n ln -s ${selected_file} ${Final_output_dirname_selected_file}\n\ndone\n\n}\n\n# Checking - Validations\nerr_msg=\"Usage: \";\necho -e \"Input filename is '$dirname_in'. \\n\";\n\n# Check for input (dirname_in)\nif [[ $dirname_in == \"\" ]] ; then\n echo -e \"${red}Please specify the Input dir name(Full path). ${reset}\\n\" ;\n msg=\"${red}Please set the Input dir name(Full path) {Name of the orignal source directory - variable name in the file is 'dirname_in'} . ${reset}\\n\"\n error=1 ;\n exit\nfi\n\n# Check for output (dirname_out)\nif [[ $1 == \"\" ]] ; then\n\n echo -e \"${red}Please specify the Output dir name(Full path). ${reset}\\n\" ;\n msg=\"\\n${red}Please specify the Output dir name(Full path) {variable name should be passed as an argument while executing the file} . ${reset}\\n\"\n error=1 ;\n exit\n\nelif [[ $1 != \"\" ]] ; then\n\n if [[ -d \"$1\" ]] ; then\n\n echo -e \"${red}Output dir name(Full path) exist. Hence continuing process.${reset}\\n\" ;\n \n else\n\n echo -e \"${red}Output dir name(Full path) does not exist. Hence creating it.${reset}\\n\" ;\n mkdir -p $1\n\n fi\nfi\n\n# Print usage incase of invalid inputs\nif [[ $error == 1 ]] ; then\n echo -e \"${red}${msg}${reset}\" ;\n error=0 ;\n exit ;\nfi\n\n# Creation logical conditions\nif [[ \"$1\" != \"\" ]] && [[ \"$2\" == \"mz\" ]] ; then\n\n\tstate_nm=\"mz\" ;\n\tmax_server_state_limit=5;\n\tcurrnet_state_nm=0;\n\tfor currnet_state_nm in `seq $currnet_state_nm_start $currnet_state_nm_end`;\n\tdo\n\t create_symlink $dirname_out_default$state_nm$currnet_state_nm ;\n\t echo \"Name: $dirname_out_default$state_nm$currnet_state_nm\";\n\tdone\n\nelif [[ \"$1\" != \"\" ]] && [[ \"$2\" == \"rj\" ]] ; then\n\n state_nm=\"rj\" ;\n max_server_state_limit=5;\n currnet_state_nm=0;\n for currnet_state_nm in `seq $currnet_state_nm_start $currnet_state_nm_end`;\n do\n create_symlink $dirname_out_default$state_nm$currnet_state_nm ;\n echo \"Name: $dirname_out_default$state_nm$currnet_state_nm\";\n done\n\nelif [[ \"$1\" != \"\" ]] && [[ \"$2\" == \"ct\" ]] ; then\n\n state_nm=\"ct\" ;\n max_server_state_limit=5;\n currnet_state_nm=0;\n for currnet_state_nm in `seq $currnet_state_nm_start $currnet_state_nm_end`;\n do\n create_symlink $dirname_out_default$state_nm$currnet_state_nm ;\n echo \"Name: $dirname_out_default$state_nm$currnet_state_nm\";\n done\n\nelif [[ \"$1\" != \"\" ]] && [[ \"$2\" == \"tg\" ]] ; then\n\n state_nm=\"tg\" ;\n max_server_state_limit=5;\n currnet_state_nm=0;\n for currnet_state_nm in `seq $currnet_state_nm_start $currnet_state_nm_end`;\n do\n create_symlink $dirname_out_default$state_nm$currnet_state_nm ;\n echo \"Name: $dirname_out_default$state_nm$currnet_state_nm\";\n done\n\nelif [[ \"$1\" != \"\" ]] && [[ \"$2\" == \"sp\" ]] ; then\n\n state_nm=\"sp\" ;\n max_server_state_limit=5;\n currnet_state_nm=0;\n for currnet_state_nm in `seq $currnet_state_nm_start $currnet_state_nm_end`;\n do\n create_symlink $dirname_out_default$state_nm$currnet_state_nm ;\n echo \"Name: $dirname_out_default$state_nm$currnet_state_nm\";\n done\n\nfi\n"
},
{
"alpha_fraction": 0.5997310876846313,
"alphanum_fraction": 0.6230390071868896,
"avg_line_length": 32.29850769042969,
"blob_id": "443fa05dd803b25355f4e6850a0c209b5d7a9282",
"content_id": "d45c2aa6591b39b2dc2bb2039b7f68cd174979a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2231,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 67,
"path": "/scripts/bulk-User-creation-database.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ndHOME=\"/home/docker/code\";\n\n# Mrunal : 20160131-2130 : Take user input as School id (small letter of initials of state and school no in 3 digit)\nif [[ $1 == \"\" ]]; then\n echo \"Please provide the user details file name.\" ;\n echo \"(For example Rajasthan state and school 001 'r001-user-details' must be the default file name and hit Enter key of Keyboard)\" ;\n read INPUT_FILE ;\nelse\n INPUT_FILE=$1;\nfi\n\necho \"File name entered is $INPUT_FILE .\" ;\n\nfilename=$(basename \"$INPUT_FILE\")\nextension=\"${filename##*.}\"\nfilename1=\"${filename%.*}\"\n#echo \":$filename:$extension:$filename1:$INPUT_FILE: $dHOME/user-details/${INPUT_FILE}\";\n\nif [[ \"${INPUT_FILE}\" == \"${filename}\" ]]; then\n if [[ -f \"$dHOME/user-details/${INPUT_FILE}\" ]]; then\n\tINPUT_FILE=\"$dHOME/user-details/${INPUT_FILE}\"\n fi\nfi\n#echo \"\\nNow filename : ${INPUT_FILE}\"\n\n# Mrunal : 20160131-2130 : \nif [[ \"${INPUT_FILE}\" == \"\" ]] ; then\n echo \"No input. Hence exiting please restart / re-run the script again.\" ;\n exit ;\nelif [[ ! -f \"${INPUT_FILE}\" ]]; then\n echo \"File ${INPUT_FILE} does not exists. Hence exiting please restart / re-run the script again.\" ;\n exit ;\nelif [[ \"${extension}\" != csv ]]; then\n echo \"Only csv file can be used to create new users. Hence exiting please restart / re-run the script again.\" ;\n exit ;\nelse\n echo \"File ${INPUT_FILE} exists. Continuing the process.\" ;\nfi\n\n# Mrunal : 20160131-2130 : Take username and password from file and add the user. (username as \"username from file\"-\"school id\") \n#INPUT_FILE='Users.csv' ;\nIFS=';' ;\ni=1 ;\nwhile read sch_id Uname UPass ;\ndo\n\n echo \"Name - $Uname\" ;\n echo \"Password - $UPass\" ;\n \n cd $dHOME/gstudio/gnowsys-ndf/\n\n echo \"[run] create superuser $Uname\" ;\n echo \"from django.contrib.auth.models import User ;\nif not User.objects.filter(username='$Uname').count():\n User.objects.create_user('$Uname', '', '$UPass') \n\" | python manage.py shell\n \n if [[ $? == \"0\" ]]; then\n\techo \"User : $Uname and Password : $UPass created successfully in the database\" 2&1 >> Bulk-User-creation-database.logs\n fi\n i=$((i+1))\n\ndone < $INPUT_FILE\n\nexit ;\n"
},
{
"alpha_fraction": 0.6129032373428345,
"alphanum_fraction": 0.6821983456611633,
"avg_line_length": 31.230770111083984,
"blob_id": "c6b14fb0421c6842cc34bc180a1741c8e692a322",
"content_id": "f90d47ca93772c609865868fd6c3ed5a05e9a4fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 837,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 26,
"path": "/scripts/patch/mz-te-workshop-fix.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\n\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\necho -e \"\\n${cyan}copy files (drop_database.sql and pg_dump_all_working.sql) in /home/core/data ${reset}\"\nrsync -avPh /mnt/drop_database.sql /mnt/pg_dump_all_working.sql /home/core/data/\n\necho -e \"\\n${cyan}school server instance config - setting postgres database ${reset}\"\ndocker exec -it gstudio /bin/sh -c \"echo 'psql -f /data/drop_database.sql;' | sudo su - postgres\"\ndocker exec -it gstudio /bin/sh -c \"echo 'psql -f /data/pg_dump_all_working.sql;' | sudo su - postgres\"\n\ncd /home/core/\ndocker-compose restart\n\nexit"
},
{
"alpha_fraction": 0.541711151599884,
"alphanum_fraction": 0.5634560585021973,
"avg_line_length": 56.80976867675781,
"blob_id": "a5c41f9ef2fc5d90c9e8d9a6f98ad13d0bb26121",
"content_id": "a416f4b3618da899d25c18689aa61a34dd332fa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 22488,
"license_type": "no_license",
"max_line_length": 389,
"num_lines": 389,
"path": "/scripts/Docker-load-start.sh-old",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n{\n\n#--------------------------------------------------------------------------------------------------------------#\n# File name : build-docker.sh\n# File creation : gnowgi\n# Description :\n# git clone\n# Build Docker-Image via docker build command (using Dockerfile)\n#\n# Last Modification : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM\n# Description : \n# Logs directory check and creation\n# Prerequisites - Checking for OS version and architecture\n# Checking type of user and permission\n# Internet checking\n# Checking wget package\n# Docker application / package checking and installation\n# Creating local copy of replica code via git clone or update via git pull \n# Build Docker-Image via docker build command (using Dockerfile)\n# Verify image creation\n# Start the Docker-container via docker run command (using newly created docker image)\n# Copy host logs(pre-install logs) inside docker container \n# Verify initialization of docker-container and display message of completion\n#--------------------------------------------------------------------------------------------------------------#\n\n#-----------------------------------------------------------------------\n# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM : Old code\n#git clone https://github.com/gnowledge/gstudio.git\n#docker build -t gnowgi/gstudio .\n#-----------------------------------------------------------------------\n\n\n# shell \nsh_c=\"sh -c\"\n\n\n#--------------------------------------------------------------------#\n# Log file details...\n#--------------------------------------------------------------------#\nLOG_DIR=\"$(pwd)/Pre-install_Logs\";\nINSTALL_LOG=\"pre-install-$(date +%d-%b-%Y-%I-%M-%S-%p).log\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\nINSTALL_LOG_FILE=\"$LOG_DIR/$INSTALL_LOG\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\n# ---------------- Log files variable def ends here -----------------\n\n\n#--------------------------------------------------------------------#\n# Check the existence of the directory...\n# If directory is exists : Display messages\n# If directory is not exists : create and display messages\n#--------------------------------------------------------------------#\nfunction check_dir() {\n if [[ -d $1 ]]; then\n echo -e \"Info-msg : $1 directory is already exists.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n echo -e \"Caution-msg : $1 directory not exists. Hence creating the same.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n `mkdir -p $1` # Mrunal : No redirections here please\n echo -e \"$1 directory is now been created.\\n\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n fi\n}\n# ----------------- Check directory code ends here ------------------\n\n#--------------------------------------------------------------------#\n# Check the existence of the file...\n# If directory is exists : Display messages\n# If directory is not exists : create and display messages\n#--------------------------------------------------------------------#\nfunction check_file() {\n if [[ -f $1 ]]; then\n echo -e \"Info-msg : $2($1) file is already exists.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n echo -e \"Caution-msg : $2($1) file not exists.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n fi\n}\n# ----------------- Check directory code ends here ------------------\n\n#------------------------------------------------------------------------#\n# Checking the existence of the command (passed as an argument) is here..\n#------------------------------------------------------------------------#\ncommand_existence_check() {\n command -v \"$@\" > /dev/null 2>&1\n}\n#----------- Check for existence of directory code ends here ------------\n\n#--------------------------------------------------------------------#\n# Checking for Internet is here..\n#--------------------------------------------------------------------#\n\n_INTERNET_STATUS=0; # Mrunal : 20151229-1050 : 0 - Offline (No internet) and 1 - Online (internet available)\nfunction internet_chk() {\n #ping www.google.com -c 5\n \n echo -e \"\\nWe are checking for Internet connection \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n INT_COM=`ping www.google.com -c 5 | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) /\"` | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n echo -e \"$INT_COM\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n if [[ \"$INT_COM\" =~ bytes* ]]; then # If internet connection is available\n _INT_COM=1\n else # If no internet connection\n _INT_COM=0;\n fi\n \n echo -e \"GET http://metastudio.org\\n\\n\" | nc metastudio.org 80 > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n _META=1;\n else # If no internet connection\n _META=0;\n fi\n \n echo -e \"GET http://google.com HTTP/1.0\\n\\n\" | nc google.com 80 > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n _GOOGLE=1;\n else # If no internet connection\n _GOOGLE=0; \n fi\n \n echo -e \"ping:$_INT_COM ; meta:$_META ; google:$_GOOGLE\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n if ([ \"$_INT_COM\" == 0 ] && [ \"$_META\" == 0 ] && [ \"$_GOOGLE\" == 0 ]); then # If no internet connection\n echo -e \"\\nInternet connection failed. Please check the network connections(IP, gateway, routes or physical cables).\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n# echo -e \"Sorry couldn't continue installation. Try again later. Thanks.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} # Mrunal : 20151229-1050 : Commented as we need different logic to proceed with offline installation\n# exit 1; # Mrunal : 20151229-1050 : Commented as we need different logic to proceed with offline installation\n echo -e \"\\nHence we will continue with offline installation.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n\t_INTERNET_STATUS=0;\n else # If internet connection is available\n echo -e \"\\nInternet connection Successful.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n echo -e \"\\nHence we will continue with online installation.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n\t_INTERNET_STATUS=1;\n fi\n \n}\n# -------------------------- Internet code ends here ----------------------------------------\n\n\n# -------------------------- Shell file code starts from here ----------------------------------------\n\n# To check LOG directory and files (If directory is not created do create it with function)\n# Here check_dir is the function and $LOG_DIR is dirctory full path variable defined earlier\n\ncheck_dir \"$LOG_DIR\" # Calling check_dir function to check LOG directory existence\n\necho -e \"Info-msg : **Prerequisites** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\necho -e \"Info-msg : Checking for OS version and architecture.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n# Check system os architecture (Currently {Fri Aug 28 17:38:35 IST 2015} docker only supports 64-bit platforms)\nos_arch=\"$(uname -m)\"\ncase \"$(uname -m)\" in\n *64)\n ;;\n *)\n echo -e \"Error-msg: The platform you are using is not an 64-bit version. \\n\n Docker currently only supports 64-bit versions of the platforms. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit 1\n ;;\nesac\n\n\n# checking the platform, version and architecture\nlsb_dist=''\ndist_version=''\n\nif command_existence_check lsb_release; then\n lsb_dist=\"$(lsb_release -si)\"\nfi\nlsb_dist=\"$(echo \"$lsb_dist\" | tr '[:upper:]' '[:lower:]')\"\n\nif command_existence_check lsb_release; then\n dist_version=\"$(lsb_release --codename | cut -f2)\"\nfi\n\nif [ -z \"$dist_version\" ] && [ -r /etc/lsb-release ]; then\n dist_version=\"$(. /etc/lsb-release && echo \"$DISTRIB_CODENAME\")\"\nfi\necho \"dist:$lsb_dist and version:$dist_version and OS architecture:$os_arch \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n\n\n# Print the username \nuser=\"$(id -un 2>/dev/null || true)\"\necho -e \"User name : $user \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n\n# Identify whether user is root or not\necho -e \"\\nInfo-msg : Checking type of user and permission\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nif [ \"$user\" != 'root' ]; then\n if command_existence_check sudo; then\n \tsh_c=\"sudo -E sh -c\"\n \techo -e \"Info-msg : User($user) with sudo user. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n elif command_existence_check su; then\n sh_c=\"su -c\"\n echo -e \"Info-msg : User($user) with su user. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n\techo -e \"Error: The installer needs the ability to run few commands with root privileges.\n We are unable to find either 'sudo' or 'su' available to make this happen. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\texit 1\n fi\nfi\n\n# Checking for the interent connections\ninternet_chk \n\n# We are checking the wget package. If the package is not installed then install the same\necho -e \"\\nInfo-msg : Checking wget package. If the package is not installed then install the same \"\nif command_existence_check wget; then\n echo -e \"\\nInfo-msg : wget application is already instlled on the system. So no need to install the package. Continuing with the process.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelif _INTERNET_STATUS==1; then\n echo -e \"\\nCaution-msg : wget application is not installed on the system. Hence now we will be installing the wget application(Online installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n # Updating the repo\n $sh_c 'apt-get update'\n \n # Installing wget application package\n $sh_c 'sudo apt-get install wget'\nelif _INTERNET_STATUS=0; then\n echo -e \"\\nCaution-msg : wget application is not installed on the system. Hence now we will be installing the wget application(Offline installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n # Installing wget application package\n package_file_name=\"wget.deb\"; # Mrunal : 20151229-1050 : \n package_name=\"wget\"; # Mrunal : 20151229-1050 : Name or common name of the package\n check_file $package_file_name $package_name # Mrunal : 20151229-1050 : Check for existance of package file\n $sh_c 'dpkg -i wget.deb' # Mrunal : 20151229-1050 : Only for Ubuntu or Debian based systems\nelse\n echo -e \"\\nError-msg : Something went wrong.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n\n# Checking for the interent connections\ninternet_chk\n\necho -e \"\\nInfo-msg : **Docker-Image creation** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n# We are checking the Docker package. If the package is not installed then install the same\necho -e \"\\nInfo-msg : Checking Docker package. If the package is not installed then install the same \" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nif command_existence_check docker && [ -e /var/run/docker.sock ]; then\n echo -e \"\\nInfo-msg : docker application is already instlled on the system. So no need to install the package. Continuing with the process. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Current user\n echo -e \"\\nInfo-msg : Current Username : $(whoami) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Print the version of installed docker \n echo -e \"\\nInfo-msg : Checking the already installed docker application version \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'docker version'\nelif _INTERNET_STATUS==1; then\n echo -e \"\\nCaution-msg : Docker application is not installed on the system. Hence now we will be installing the Docker application(Online installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Install Docker application via wget\n wget -qO- https://get.docker.com/ | sh | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Current user\n echo -e \"\\nInfo-msg : Current Username : $(whoami) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Adding the current user in docker group\n echo -e \"\\nInfo-msg : Adding $(whoami) in docker group : sudo usermod -aG docker $(whoami) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'sudo usermod -aG docker $(whoami)' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Checking the current group of the current user\n echo -e \"\\nInfo-msg : Checking the current group of the $(whoami) : id -g \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'id -g' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Forcefully changing group of the current user to docker group (to avoid restart)\n echo -e \"\\nInfo-msg : Forcefully changing group of the $(whoami) to docker group : newgrp docker \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'newgrp docker' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Checking the current group of the current user\n echo -e \"\\nInfo-msg : Checking the current group of the $(whoami) : id -g \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'id -g' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Starting docker(docker-engine) service\n echo -e \"\\nInfo-msg : Starting docker service (docker-engine) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'sudo start docker' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\nelif _INTERNET_STATUS==0; then\n echo -e \"\\nCaution-msg : Docker application is not installed on the system. Hence now we will be installing the Docker application(Online installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Install Docker application via wget\n dpkg -i docker-engine.deb | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Current user\n echo -e \"\\nInfo-msg : Current Username : $(whoami) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Adding the current user in docker group\n echo -e \"\\nInfo-msg : Adding $(whoami) in docker group \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'sudo usermod -aG docker $(whoami)' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n \n # Checking the current group of the current user\n echo -e \"\\nInfo-msg : Checking the current group of the $(whoami) : id -g \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'id -g' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Forcefully changing group of the current user to docker group (to avoid restart)\n echo -e \"\\nInfo-msg : Forcefully changing group of the $(whoami) to docker group : newgrp docker \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'newgrp docker' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Checking the current group of the current user\n echo -e \"\\nInfo-msg : Checking the current group of the $(whoami) : id -g \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'id -g' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n # Starting docker(docker-engine) service\n echo -e \"\\nInfo-msg : Starting docker service (docker-engine) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'sudo start docker' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\nelse\n echo -e \"\\nError-msg : Something went wrong.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n# Build the docker image (via instructions in Docker file)\n#echo -e \"\\nInfo-msg : Build the docker image (via instructions in Docker file). This process may take long time {Depends on the internet speed. Approx(45mins - 1.45mins)}\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n#docker build -t gnowgi/gstudio . | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n# docker image and container related variables\ndock_img_file=gstudio-docker-10112015.tar\ndock_img=gnowgi/gstudio;\ndock_con=gstudio-con\n\nport_ssh_dock=22;\nport_ssh_host=8022;\n\nport_smtp_dock=25;\nport_smtp_host=8025;\n\nport_http_dock=80;\nport_http_host=80;\n\nport_django_dev_dock=8000;\nport_django_dev_host=8000;\n\nport_mongo_dock=27107;\nport_mongo_host=27107;\n\nport_smtp_test_dock=1025;\nport_smtp_test_host=11025;\n\nport_imap_dock=143;\nport_imap_host=8143;\n\nport_smtps_dock=587;\nport_smtps_host=8857;\n\nif [ -f $dock_img_file ]; then\n echo -e \"Info-msg : Docker-Image tar file exist. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\nall_ip_address=`hostname -I`\nip_address=($all_ip_address)\nlen_ip=$ip_address{*}\n echo -e \"Error-msg : Docker-Image tar file does not exist. Please make sure $dock_img_file file is kept near this ${0##*/} (Path : $(pwd)) script \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit\nfi\ndocker load < $dock_img_file\n\ndocker images | grep -w $dock_img > /dev/null 2>&1 # Mrunal : No redirections here please\n\nn=0 ;\nfor (( n=1; n>=1; n++ ))\ndo\n #echo \"name : $dock_con$n\" # Mrunal : Testing purpose\n echo \" # docker ps -a | grep -w $dock_con$n > /dev/null 2>&1\" \n docker ps -a | grep -w $dock_con$n > /dev/null 2>&1 # Mrunal : No redirections here please\n if [[ $? -eq 0 ]]; then\n\t# Docker-Image creation Failed\n\techo -e \"Caution-msg : Docker-container creation Failed. Please try again.$?. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n\t# Docker-Image created successfully\n\techo -e \"Info-msg : Docker-Image created successfully. Now initiating the Docker-Container with created docker image(gnowgi/gstudio).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\techo -e \"\\nInfo-msg : **Docker-container initialization** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\techo -e \"\\nInfo-msg : **Please wait for some time - approx 5 mins** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n#\tdocker run -it -d -p 8825:25 -p 8822:22 -p 80:80 -p 8000:8000 -p 27017:27017 -p 11025:1025 -p 8587:587 -p 8143:143 --name=gstudio$n gnowgi/gstudio ;\n#\techo \" # docker run -it -d -p $port_ssh_dock:$port_ssh_host -p $port_smtp_dock:$port_smtp_host -p $port_http_dock:$port_http_host -p $port_django_dev_dock:$port_django_dev_host -p $port_mongo_dock:$port_mongo_host -p $port_smtp_test_dock:$port_smtp_test_host -p $port_imap_dock:$port_imap_host -p $port_smtps_dock:$port_smtps_host --name=$dock_con$n $dock_img\" # Mrunal: Testing purpose\n\tdocker run -it -d -p $port_ssh_host:$port_ssh_dock -p $port_smtp_host:$port_smtp_dock -p $port_http_host:$port_http_dock -p $port_django_dev_host:$port_django_dev_dock -p $port_mongo_host:$port_mongo_dock -p $port_smtp_test_host:$port_smtp_test_dock -p $port_imap_host:$port_imap_dock -p $port_smtps_host:$port_smtps_dock --name=$dock_con$n $dock_img ;\n\tif [[ $? -eq 0 ]]; then\n\t # Docker-Container starting success\n\t echo -e \"Info-msg : Docker-container created and started successfully. \" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\telse\n\t # Docker-Container creation Failed\n\t echo -e \"Caution-msg : Docker-container creation Failed. Please try again. (Error code : $?) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\tfi\n\tsleep 5m\n\tbreak\n fi\ndone\n\n#docker ps -q --filter=image=gnowgi/gstudio > /dev/null 2>&1 # Mrunal : No redirections here please\ndocker ps | grep -w $dock_con$n > /dev/null 2>&1 # Mrunal : No redirections here please\nif [[ $? -eq 0 ]]; then\n # Installation completed\n echo -e \"Info-msg : Installation complete successfully. Just enter your ipaddress:port (IP:$port_http_host) in address bar of your internet browser.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\n # Installation Failed\n echo -e \"Caution-msg : Installation Failed. Please try again. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n# ----------------------------- Shell file code ends here ------------------------------------------\n\n}\n"
},
{
"alpha_fraction": 0.5088849663734436,
"alphanum_fraction": 0.5286539196968079,
"avg_line_length": 45.41237258911133,
"blob_id": "45870dc099b1c8253ad36e9ad75fdfea3cdfad8d",
"content_id": "dd55f02f6461d836fbee623f87fdf733215bcecd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4502,
"license_type": "no_license",
"max_line_length": 226,
"num_lines": 97,
"path": "/scripts/load-docker-image.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n{\n\n#--------------------------------------------------------------------------------------------------------------#\n# File name : build-docker.sh\n# File creation : gnowgi\n# Description :\n# git clone\n# Build Docker-Image via docker build command (using Dockerfile)\n#\n# Last Modification : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM\n# Description : \n# Logs directory check and creation\n# Prerequisites - Checking for OS version and architecture\n# Checking type of user and permission\n# Internet checking\n# Checking wget package\n# Docker application / package checking and installation\n# Creating local copy of replica code via git clone or update via git pull \n# Build Docker-Image via docker build command (using Dockerfile)\n# Verify image creation\n# Start the Docker-container via docker run command (using newly created docker image)\n# Copy host logs(pre-install logs) inside docker container \n# Verify initialization of docker-container and display message of completion\n#--------------------------------------------------------------------------------------------------------------#\n\n#-----------------------------------------------------------------------\n# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM : Old code\n#git clone https://github.com/gnowledge/gstudio.git\n#docker build -t gnowgi/gstudio .\n#-----------------------------------------------------------------------\n\n\n# shell \nsh_c=\"sh -c\"\n\n\n#--------------------------------------------------------------------#\n# Log file details...\n#--------------------------------------------------------------------#\nLOG_DIR=\"$(pwd)/Pre-install_Logs\";\nINSTALL_LOG=\"docker-load-image-$(date +%Y%m%d-%H%M%S).log\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\nINSTALL_LOG_FILE=\"$LOG_DIR/$INSTALL_LOG\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\ndHOME=\"/home/docker/code\";\n# ---------------- Log files variable def ends here -----------------\n\n\n# Mrunal : Set dHOME variable in deploy.conf\nfile=`readlink -e -f $0`\nfile1=`echo $file | sed -e 's/\\/scripts.*//'` ; \nfile2=`echo $file1 | sed -e 's/\\//\\\\\\\\\\//g'` ;\n# file3=`echo $file1 | sed -e 's:/:\\\\\\/:g'` ;\nsed -e \"/hHOME/ s/=.*;/=$file2;/\" -i $file1/confs/deploy.conf;\nmore $file1/confs/deploy.conf | grep hHOME; \n\n\nsource $file1/confs/deploy.conf\n\npwd\n#sg docker -c 'pwd'\ndocker ps\nif [[ $? != 0 ]]; then\n echo -e \"\\nCaution-msg : Please check the docker installation Or install docker and restart system to take effect. Try again later after.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit;\nfi\n \n\nif [[ $1 == \"\" ]]; then\n echo \"Please provide the image name with complete path.(\\path_to_the_dirctory\\file_name)\" ;\n echo \"(For example '/home/docker/code/school-server_mongokit_v1-20160330-134534' must be the default file name and hit Enter key of Keyboard)\" ;\n read dock_img_file ;\nelif [[ -f $dock_img_file ]]; then\n dock_img_file=$1;\nelif [[ ! -f $dock_img_file ]]; then\n echo -e \"Info-msg : Docker image file does not exist. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n exit;\nelse\n echo -e \"\\nCaution-msg : Something went wrong.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit;\nfi\necho \"Docker image file name entered is $dock_img_file .\" ;\n\necho -e \"\\nInfo-msg : Loading docker images($dock_img). Be patient it may take few minutes. : sg docker -c 'docker load < $dock_img_file' \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\ndocker load < $dock_img_file\n\nif [[ $? -eq 0 ]]; then\n # Docker image loaded successfully\n echo -e \"Info-msg : Docker image loaded successfully. \" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n bash $hHOME/scripts/start-new-container.sh $dock_img_name\nelse\n # Docker image loading failed \n echo -e \"Caution-msg : Docker image could not be loaded. Please try again. (Error code : $?) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n# ----------------------------- Shell file code ends here ------------------------------------------\n\n}\n"
},
{
"alpha_fraction": 0.6010396480560303,
"alphanum_fraction": 0.6107861995697021,
"avg_line_length": 42.91428756713867,
"blob_id": "5bbc27f14b1d8f2dffc25b50592a1907758cfd31",
"content_id": "d30177a5c99fd00789f1c14cbe380607110fb73e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1539,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 35,
"path": "/scripts/create-all-schools-users-in.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#--------------------------------------------------------------------#\n# Create all users of all school csvs provided \n# File name : create-all-schools-users.sh\n# File version : 1.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : 10-08-2016 09:22:AM\n# Modified by : None\n# Modified on : Not yet\n# Description : This file is used for creating all the users of all the csv files placed in the directory. \n# Execute Command with each filename path (Element of an array) as an argument to sync_users command\n#--------------------------------------------------------------------#\n\nFILE_NAMES=( `ls /home/docker/code/user-csvs/*_users.csv` );\n#echo \"File names are: ${FILE_NAMES[@]} and Timestamp: $DateTime_STAMP2\";\n#echo \"array : Len of array ${#FILE_NAMES[@]} : Len of first ${#FILE_NAMES} : Array ${FILE_NAMES[@]}\"; # : Testing purpose printing\n\necho \"File names selected for user creation: ${FILE_NAMES[@]}\"\necho \"No of files: ${#FILE_NAMES[@]}\"\n\n# : Execute Command with each filename path (Element of an array) as an argument to sync_users command\nfor ss_id in \"${FILE_NAMES[@]}\";\ndo\n# echo \"ss_id = $ss_id\"; # : Testing purpose printing\n if [[ $ss_id != \"\" ]]; then\n\t\techo \"File name : $ss_id\";\n\t\tcd /home/docker/code/gstudio/gnowsys-ndf;\n\t\tpython manage.py sync_users ${ss_id};\n else\n\t\techo \"Found a blank entry in file name. Hence skipping this enrty and continuing the process. Filename : $ss_id.\"\n fi\ndone\n\necho \"Finished processing, Please verify the users in website' s admin panel. \";\n\n\n"
},
{
"alpha_fraction": 0.6244628429412842,
"alphanum_fraction": 0.6352715492248535,
"avg_line_length": 57.064273834228516,
"blob_id": "13e593d7c3fc87e7314a8e3db31e68ae18d271a9",
"content_id": "9096ba600dcb4ac380a87eb0caf43c6eb4abf48a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 30716,
"license_type": "no_license",
"max_line_length": 280,
"num_lines": 529,
"path": "/scripts/setup.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#--------------------------------------------------------------------#\n# setup CLIx student platform in coreos / ubuntu \n# File name : setup.sh\n# File version : 2.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : Thu Jan 18 00:54:21 IST 2018\n# Modified by : Mr. Mrunal M. Nachankar\n# Modified on : Sun Apr 29 11:44:17 IST 2018\n# Description : This file is used for installation and setup of CLIx student paltform in coreos / ubuntu.\n# Important : 1. Change / Add / Fill your detail in \"Fill in your details here\" block.\n# 2. Ensure mongodb is installed and configured properly.\n# 3. Script is by default assuming no authentication, localhost as host and default 27017.\n# 4. Files will be generated with \"<db_name><collection_name>.csv\".\n# 5. Files will be generated in the same directory from where shell file is triggered.\n# Future scope : 1. Make it compatible with arguments as laymans will not be able to edit this file.\n# 2. Make it compatible to just fetch single collection of database.\n# 3. Make it compatible to export in json format.\n# References : 1. https://gist.github.com/mderazon/8201991#file-mongo-dump-csv-sh\n# 1.1 https://www.drzon.net/posts/export-mongodb-collections-to-csv-without-specifying-fields/\n# 1.2 https://stackoverflow.com/questions/6814151/how-to-export-collection-to-csv-in-mongodb\n# 2. https://gist.github.com/zubairalam/ab0a8d6a32439f74d267#file-make_csv-sh\n#--------------------------------------------------------------------#\n\nsource ./mrulogger.sh\n\nSCRIPT_ENTRY\n\nsudo true\n\nfunction setup(){\n\n # Variables related to \"copy_content_validations\" function\n source_path=\"1\"; # Holds source file / directory path for copying\n destination_path=\"2\"; # Holds destination directory path for copying\n\n # Variables related to \"type_of_content\" function\n content=\"NULL\"; # Holds content value (path) to be checked file, directory or something else\n content_type=\"NULL\"; # Holds type of file\n\n\n # Variables related to \"copy_content_validations\" function (Data Integrity)\n filename_full=\"\"\n filename=$(basename \"$filename_full\")\n extension=\"${filename##*.}\"\n filename=\"${filename%.*}\"\n\n # Variables related to \"docker_load\" and \"docker_load_validations\" function (docker load and validation)\n docker_image_path=\"1\";\n docker_image_name=\"2\";\n docker_image_grep_name=\"3\";\n docker_image_loading_status=\"Not Idea\";\n\n # Variables related to \"docker_run\" and \"docker_run_validations\" function (docker run and validation)\n docker_container_name=\"1\";\n docker_container_running_status=\"Not Idea\";\n\n # Variables related to \"set_language\" function (setting default language)\n state_code=\"1\";\n language=\"Not Idea\";\n\n setup_progress_status_filename=\"/home/core/setup_progress_status_value\";\n\n source_base_path=\"/mnt/home/core/installation-content\";\n\n#************************ Major process realted functions starts from here ***************************#\n\n # Check whether \"/\" is mounted from inserted disk or not...\n CHECK_IF_ROOT_MOUNTED_FROM_USB_DISK \"$BASH_SOURCE\";\n\n\n # Step 1: copying initial files required for the setup of student CLIx platform\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n if [ \"$setup_progress_status\" == \"0\" ] || [ ! -f $setup_progress_status_filename ] || [ \"$setup_progress_status\" == \"\" ]; then\n CHECK_CORRECT_USB_DISK \"$BASH_SOURCE\";\n\n INFO \"Setup progress status value: $setup_progress_status. Hence copying initial files required for the setup of student CLIx platform.\" \"green\" ;\n \n source_path=\"${source_base_path}/backup-old-server-data.sh\"; # backup-old-server-data.sh\n destination_path=\"/home/core/\";\n INFO \"Copy backup-old-server-data.sh script from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/install-to-disk.sh\"; # install-to-disk.sh\n destination_path=\"/home/core/\";\n INFO \"Copy install-to-disk.sh script from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/copy-softwares.sh\"; # copy-softwares.sh\n destination_path=\"/home/core/\";\n INFO \"Copy copy-softwares.sh script from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/setup.sh\"; # setup.sh\n destination_path=\"/home/core/\";\n INFO \"Copy setup.sh script from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/mrulogger.sh\"; # mrulogger.sh\n destination_path=\"/home/core/\";\n INFO \"Copy mrulogger.sh script from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/Execute-get_all_users_activity_timestamp_csvs.sh\"; # Execute-single_school_get_MIT_activity_data.sh\n destination_path=\"/home/core/\";\n INFO \"Copy Execute-single_school_get_MIT_activity_data.sh script from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/mycron-host\"; # mycron-host\n destination_path=\"/home/core/\";\n INFO \"Copy mycron-host cron config file from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/data\"; # data\n destination_path=\"/home/core/\";\n INFO \"Copy data directory from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/code\"; # code\n destination_path=\"/home/core/\"; \n INFO \"Copy code directory from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/user-csvs\"; # user-csvs\n destination_path=\"/home/core/\";\n INFO \"Copy user-csvs directory from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/display-pics\"; # display-pics\n destination_path=\"/home/core/\";\n INFO \"Copy display-pics directory from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/setup-software/oac\"; # oac\n destination_path=\"/home/core/setup-software/\"; \n INFO \"Copy oac and oat from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n # source_path=\"${source_base_path}/setup-software/oat\"; # oat\n # destination_path=\"/home/core/setup-software/\"; \n # INFO \"Copy oac and oat from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n # RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/docker-compose\"; # dokcer-compose (docker-compose.yml for gstudio and syncthing)\n destination_path=\"/home/core/\";\n INFO \"Copy user-csvs directory from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/setup-software/docker-images/gstudio\"; # gstudio docker image\n destination_path=\"/home/core/setup-software/\"; \n INFO \"Copy gstudio docker image from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/setup-software/docker-images/syncthing\"; # syncthing docker image\n destination_path=\"/home/core/setup-software/\"; \n INFO \"Copy syncthing docker image from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/setup-software/i2c-softwares/Fonts/\"; # fonts\n destination_path=\"/usr/share/fonts/\";\n INFO \"Copy display-pics directory from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n source_path=\"${source_base_path}/assesment-datastore\"; # assesment-datastore\n destination_path=\"/home/core/\";\n INFO \"Copy display-pics directory from $source_path to $destination_path\" \"$BASH_SOURCE\" \"green\";\n RSYNC_CONTENT \"$source_path\" \"$destination_path\";\n\n UMOUNT_PARTITION \"/dev/$selected_usb_disk\" \"/mnt/\";\n\n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"1\";\n else\n WARNING \"Setup progress step value is ${setup_progress_status}, hence continuing with the process.\\nSkipping the step 1 of copying initial files required for the setup of student CLIx platform.\" \"$BASH_SOURCE\";\n fi\n\n\n # Step 2: loading docker image for the setup of student CLIx platform\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n if [ \"$setup_progress_status\" == \"1\" ]; then\n INFO \"Setup progress status value: $setup_progress_status. Hence loading docker image for the setup of student CLIx platform.\" \"green\" ;\n\n # Set docker image realted variable\n docker_image_path=\"/home/core/setup-software/gstudio/registry.tiss.edu-school-server-master-20180605-v1.tar\";\n docker_image_name=\"registry.tiss.edu/school-server-master:20180605-v1\";\n docker_image_grep_name=\"20180605-v1\";\n CHECK_FOR_ALREADY_LOADED_DOCKER_IMAGE;\n\n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"2\";\n else\n WARNING \"Setup progress step value is ${setup_progress_status}, hence continuing with the process.\\nSkipping the step 2 of loading docker image for the setup of student CLIx platform.\" \"$BASH_SOURCE\";\n fi\n\n\n # Step 3: starting the container for the setup of student CLIx platform\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n if [ \"$setup_progress_status\" == \"2\" ]; then\n INFO \"Setup progress status value: $setup_progress_status. Hence starting the container for setup student CLIx platform.\" \"green\" ;\n\n # Set docker image realted variable\n docker_compose_filename=\"/home/core/docker-compose/gstudio/docker-compose.yml\";\n docker_container_name=\"gstudio\";\n CHECK_FOR_ALREADY_STARTED_DOCKER_CONTAINER;\n\n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"3\";\n else\n WARNING \"Setup progress step value is ${setup_progress_status}, hence continuing with the process.\\nSkipping the step 3 of starting the container for setup student CLIx platform.\" \"$BASH_SOURCE\";\n fi\n\n\n # Step 4: setting up / configuring the container for the setup of student CLIx platform\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n if [ \"$setup_progress_status\" == \"3\" ]; then\n INFO \"Setup progress status value: $setup_progress_status. Hence setting up / configuring the container for the setup of student CLIx platform.\" \"green\" ;\n\n # Set docker image realted variable\n docker_compose_filename=\"/home/core/docker-compose/gstudio/docker-compose.yml\";\n docker_container_name=\"gstudio\";\n CHECK_FOR_ALREADY_STARTED_DOCKER_CONTAINER;\n docker-compose -f $docker_compose_filename up -d\n\n\n echo -e \"\\n${cyan}school server instance config - setting server name/id ${reset}\"\n \n CHECK_FILE_EXISTENCE \"/home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\" \"create\"\n\n\techo \"ss_id: $ss_id ; ss_code: $ss_code ; ss_name: $ss_name ; state_code: $state_code\"\n\n # get server id (Remove single quote {'} and Remove double quote {\"})\n# ss_id=`echo $(echo $(more /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID | sed 's/.*=//g')) | sed \"s/'//g\" | sed 's/\"//g'`\n #ss_id=$(more /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | sed -n '/.*=/{p;q;}' | sed 's/.*= //g' | sed \"s/'//g\" | sed 's/\"//g')\n\n # Trim leading whitespaces \n ss_id=$(echo ${ss_id##*( )})\n # Trim trailing whitespaces \n ss_id=$(echo ${ss_id%%*( )})\n\n # update server id\n if grep -Fq \"GSTUDIO_INSTITUTE_ID\" /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\n then\n # code if found\n sudo sh -c \"sed -e \\\"/GSTUDIO_INSTITUTE_ID/ s/=.*/='${ss_id}'/\\\" -i /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\";\n else\n # code if not found\n sudo sh -c \"echo -e \\\"GSTUDIO_INSTITUTE_ID ='${ss_id}'\\\" >> /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\";\n fi\n\n # update school code\n ss_code=$(grep -irw \"$ss_id\" /home/core/code/All_States_School_CLIx_Code_+_School_server_Code_-_TS_Intervention_Schools.csv | awk -F ';' '{print $3}')\n\n # Trim leading whitespaces \n ss_code=$(echo ${ss_code##*( )})\n # Trim trailing whitespaces \n ss_code=$(echo ${ss_code%%*( )})\n\n if grep -Fq \"GSTUDIO_INSTITUTE_ID_SECONDARY\" /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\n then\n # code if found\n sudo sh -c \"sed -e \\\"/GSTUDIO_INSTITUTE_ID_SECONDARY/ s/=.*/='${ss_code}'/\\\" -i /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\";\n else\n # code if not found\n sudo sh -c \"echo -e \\\"GSTUDIO_INSTITUTE_ID_SECONDARY ='${ss_code}'\\\" >> /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\";\n fi\n\n # update school name\n ss_name=$(grep -irw \"$ss_id\" /home/core/code/All_States_School_CLIx_Code_+_School_server_Code_-_TS_Intervention_Schools.csv | awk -F ';' '{print $2}' | sed 's/\"//g')\n\n # Trim leading whitespaces \n ss_name=$(echo ${ss_name##*( )})\n # Trim trailing whitespaces \n ss_name=$(echo ${ss_name%%*( )})\n \tstate_code=${ss_id:0:2};\n\n\n if grep -Fq \"GSTUDIO_INSTITUTE_NAME\" /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\n then\n # code if found\n sudo sh -c \"sed -e \\\"/GSTUDIO_INSTITUTE_NAME/ s|=.*|='${ss_name}'|\\\" -i /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\";\n else\n # code if not found\n sudo sh -c \"echo -e \\\"GSTUDIO_INSTITUTE_NAME ='${ss_name}'\\\" >> /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\";\n fi\n\n\techo \"ss_id: $ss_id ; ss_code: $ss_code ; ss_name: $ss_name ; state_code: $state_code\"\n\n INFO \"Waiting for the processes to start\" \"\" \"green\";\n WARNING \"caution : it may take long time (90s)\";\n\n sleep 90 # Wait for apllication to start properly\n\n echo -e \"\\n${cyan}school server instance config - setting postgres database ${reset}\"\n docker exec -it gstudio /bin/sh -c \"echo 'psql -f /data/drop_database.sql;' | sudo su - postgres\"\n docker exec -it gstudio /bin/sh -c \"echo 'psql -f /data/pg_dump_all.sql;' | sudo su - postgres\"\n\n echo -e \"\\n${cyan}restarting school server instance to apply the configuration ${reset}\"\n docker restart gstudio\n\n INFO \"Restarting gstudio container. \" \"\" \"green\";\n WARNING \"caution : it may take long time (60s)\";\n\n sleep 60 # Wait for apllication to start properly\n\n echo -e \"\\n${cyan}school server instance config - copy display pics and user csvs ${reset}\" \n docker cp display-pics gstudio:/home/docker/code/\n\t docker exec -it gstudio /bin/sh -c \"mkdir /home/docker/code/user-csvs\"\n docker cp user-csvs/${state_code}/${ss_id}_users.csv gstudio:/home/docker/code/user-csvs/\n\n echo -e \"\\n${cyan}school server instance config - create users and apply display pics ${reset}\"\n docker exec -it gstudio /bin/sh -c \"/usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py sync_users /home/docker/code/user-csvs/${ss_id}_users.csv\"\n\n# echo -e \"\\n${cyan}school server instance config - setting necessary permissions to media direcory and files ${reset}\"\n# sudo chmod -R 755 /home/core/data/media/*\n\n echo -e \"\\n${cyan}school server instance config - create workspace with institute id ${reset}\"\n docker exec -it gstudio /bin/sh -c \"/bin/echo \\\"execfile('/home/docker/code/gstudio/doc/deployer/create_workspace_from_institute_id.py')\\\" |/usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py shell\"\n\n echo -e \"\\n${cyan}school server instance config - correct spelling mistakes in usernames id ${reset}\"\n docker exec -it gstudio /bin/sh -c \"/bin/echo \\\"execfile('/home/docker/code/gstudio/doc/release-scripts/release2-1_nov17.py')\\\" |/usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py shell\"\n\n echo -e \"\\n${cyan}school server instance config - set crontab (trigger script at start of system) ${reset}\"\n crontab /home/core/mycron-host\n\n # copy ssl files \n docker exec -it gstudio /bin/sh -c \"/usr/bin/rsync -avPh /data/clixserver.tiss.edu /etc/ssl/\"\n\n echo -e \"\\n${cyan}collectstatic${reset}\"\n docker exec -it gstudio /bin/sh -c \"/bin/echo yes |/usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py collectstatic\"\n\n echo -e \"\\n${cyan}trigger crontab scripts to set syncthing${reset}\"\n docker exec -it gstudio /bin/sh -c \"/home/docker/code/scripts/system-heartbeat.sh > /tmp/cron-system-heartbeat.log\"\n docker exec -it gstudio /bin/sh -c \"/home/docker/code/scripts/analytics.sh > /tmp/cron-analytics.log\"\n docker exec -it gstudio /bin/sh -c \"/home/docker/code/scripts/backup.sh > /tmp/cron-bkp.log\"\n docker exec -it gstudio /bin/sh -c \"/usr/sbin/logrotate /etc/logrotate.d/nginx > /tmp/cron-logrotate.log\"\n docker exec -it gstudio /bin/sh -c \"/usr/bin/python /home/docker/code/qbank-gstudio-scripts/single_school_get_MIT_activity_data/single_school_get_MIT_activity_data.py > /tmp/cron-single_school_get_MIT_activity_data.log\"\n docker exec -it gstudio /bin/sh -c \"/bin/echo \\\"execfile('/home/docker/code/gstudio/doc/deployer/get_all_users_activity_timestamp_csvs.py')\\\" | /usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py shell > /tmp/cron-get_all_users_activity_timestamp_csvs.py.log\"\n\n echo -e \"\\n${cyan}copying fonts${reset}\"\n sudo rsync -avPh /mnt/home/core/installation-content/setup-software/i2c-softwares/Fonts/* /usr/share/fonts/;\n sudo fc-cache -f -v\n\n\n echo -e \"\\n${cyan}restarting school server instance to apply the configuration ${reset}\"\n docker restart gstudio\n\n INFO \"Restarting gstudio container. \" \"\" \"green\";\n WARNING \"caution : it may take long time (180s)\";\n\n sleep 180 # Wait for apllication to start properly\n\n # restart cron\n # docker exec -it gstudio /bin/sh -c \"/bin/kill -9 $(pidof cron) && /usr/sbin/cron \"\n \n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"4\";\n else\n WARNING \"Setup progress step value is ${setup_progress_status}, hence continuing with the process.\\nSkipping the step 4 of setting up / configuring the container for the setup of student CLIx platform.\" \"$BASH_SOURCE\";\n fi\n\n\n # Step 5: loading docker image for the setup of syncthing\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n if [ \"$setup_progress_status\" == \"4\" ]; then\n INFO \"Setup progress status value: $setup_progress_status. Hence loading docker image for the setup of syncthing.\" \"green\" ;\n\n # Set docker image realted variable\n docker_image_path=\"/home/core/setup-software/syncthing/linuxserver-syncthing.tar\";\n docker_image_name=\"linuxserver/syncthing\";\n docker_image_grep_name=\"linuxserver/syncthing\";\n CHECK_FOR_ALREADY_LOADED_DOCKER_IMAGE;\n\n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"5\";\n else\n WARNING \"Setup progress step value is ${setup_progress_status}, hence continuing with the process.\\nSkipping the step 5 of loading docker image for the setup of syncthing.\" \"$BASH_SOURCE\";\n fi\n\n\n # Step 6: starting the container for the setup of syncthing\n GET_SETUP_PROGRESS \"$setup_progress_status_filename\";\n if [ \"$setup_progress_status\" == \"5\" ]; then\n INFO \"Setup progress status value: $setup_progress_status. Hence starting the container for setup syncthing.\" \"green\" ;\n\n # Set docker image realted variable\n docker_compose_filename=\"/home/core/docker-compose/syncthing/docker-compose.yml\";\n docker_container_name=\"syncthing\";\n CHECK_FOR_ALREADY_STARTED_DOCKER_CONTAINER;\n\n SET_SETUP_PROGRESS \"$setup_progress_status_filename\" \"6\";\n else\n WARNING \"Setup progress step value is ${setup_progress_status}, hence continuing with the process.\\nSkipping the step 6 of starting the container for setup syncthing.\" \"$BASH_SOURCE\";\n fi\n\n}\n\n#************************ Major process realted functions ends from here ***************************#\n\n#**************************** Installation process starts from here ********************************#\n\n# echo -e \"\\n${cyan}Please be ready with the School server id ${reset}\" ;\n\nif [ ! -f /home/core/school_server_id_value ]; then\n INFO \"Please provide the School server id? (Example Mizoram school 23 will have mz23 and Telangana 24 school - tg24)\" \"$BASH_SOURCE\" \"yellow\";\n GET_INPUTS \"School server id: \";\n eval ss_id=$input;\n\n ss_id=\"${ss_id##*( )}\"; ### Trim leading whitespaces ###\n ss_id=\"${ss_id%%*( )}\"; ### Trim trailing whitespaces ###\n echo \"${ss_id}\" > /home/core/school_server_id_value;\n\n state_code=${ss_id:0:2};\n SET_LANGUAGE $state_code;\nelif [ -f /home/core/school_server_id_value ] || [ \"$school_server_id\" != \"\" ]; then\n INFO \"School server id value is ${school_server_id}, hence continuing with the process.\" \"$BASH_SOURCE\" \"green\";\n ss_id=$(more /home/core/school_server_id_value);\n state_code=${ss_id:0:2};\n SET_LANGUAGE $state_code;\nfi\n\n# update host entry\nif grep -Fq \"clixserver clixserver.tiss.edu\" /etc/hosts\nthen\n # code if found\n INFO \"host entry for 'clixserver clixserver.tiss.edu' already exist in '/etc/host'.\" \"$BASH_SOURCE\" \"green\"; \nelse\n # code if not found\n WARNING \"host entry for 'clixserver clixserver.tiss.edu' doesn't exist in '/etc/host'. Hence trying to add\" \"$BASH_SOURCE\"; \n rsync -avPh /etc/hosts /tmp/ ;\n sed -e '/^127.0.1.1/ s/$/ clixserver clixserver.tiss.edu/' -i /tmp/hosts;\n sudo rsync -avzPh /tmp/hosts /etc/;\n sudo cat /tmp/hosts | sudo tee /etc/hosts;\nfi\n\n# update Huawei E353/E3131 support entry\nif grep -Fq \"# Huawei E353/E3131\" /lib/udev/rules.d/40-usb_modeswitch.rules\nthen\n # code if found\n INFO \"host entry for 'clixserver clixserver.tiss.edu' already exist in '/lib/udev/rules.d/40-usb_modeswitch.rules'.\" \"$BASH_SOURCE\" \"green\"; \nelse\n # code if not found\n WARNING \"Entry for 'Huawei E353/E3131' dongle support doesn't exist in '/lib/udev/rules.d/40-usb_modeswitch.rules'. Hence trying to add\" \"$BASH_SOURCE\"; \n echo -e \"# Huawei E353/E3131 \\nATTR{idVendor}==\\\"12d1\\\", ATTR{idProduct}==\\\"1f01\\\", RUN +=\\\"usb_modeswitch '%b/%k'\\\"\" | tee -a /lib/udev/rules.d/40-usb_modeswitch.rules;\n\n # rsync -avPh /lib/udev/rules.d/40-usb_modeswitch.rules /tmp/ ;\n # echo -e \"# Huawei E353/E3131 \\nATTR{idVendor}==\\\"12d1\\\", ATTR{idProduct}==\\\"1f01\\\", RUN +=\\\"usb_modeswitch '%b/%k'\\\"\" | tee -a /tmp/40-usb_modeswitch.rules;\n # sudo rsync -avzPh /tmp/40-usb_modeswitch.rules /etc/;\n # sudo cat /tmp/40-usb_modeswitch.rules | sudo tee /lib/udev/rules.d/40-usb_modeswitch.rules;\nfi\n\n# Ref: https://askubuntu.com/questions/172524/how-can-i-check-if-automatic-updates-are-enabled\n# update disable unattended updates (package list) entry \nif grep -Fq \"APT::Periodic::Update-Package-Lists\" /etc/apt/apt.conf.d/10periodic\nthen\n # code if found\n INFO \"Auto-updates (package list) entry already exist in '/etc/apt/apt.conf.d/10periodic'. Hence trying to update (set it to disable{value as '1'})\" \"$BASH_SOURCE\" \"green\";\n sudo sed -i 's/APT::Periodic::Update-Package-Lists .*$/APT::Periodic::Update-Package-Lists \"0\"/' /etc/apt/apt.conf.d/10periodic\nelse\n # code if not found\n WARNING \"Auto-updates (package list) entry doesn't exist in '/etc/apt/apt.conf.d/10periodic'. Hence trying to add (set it to disable{value as '1'})\" \"$BASH_SOURCE\"; \n sudo echo -e \"APT::Periodic::Update-Package-Lists \\\"0\\\";\" | sudo tee -a /etc/apt/apt.conf.d/10periodic;\nfi\n\n# update disable unattended updates (download upgradeable package) entry\nif grep -Fq \"APT::Periodic::Download-Upgradeable-Packages\" /etc/apt/apt.conf.d/10periodic\nthen\n # code if found\n INFO \"Auto-updates (download upgradeable package) entry already exist in '/etc/apt/apt.conf.d/10periodic'. Hence trying to update (set it to disable{value as '1'})\" \"$BASH_SOURCE\" \"green\";\n sudo sed -i 's/APT::Periodic::Download-Upgradeable-Packages .*$/APT::Periodic::Download-Upgradeable-Packages \"0\"/' /etc/apt/apt.conf.d/10periodic\nelse\n # code if not found\n WARNING \"Auto-updates (download upgradeable package) entry doesn't exist in '/etc/apt/apt.conf.d/10periodic'. Hence trying to add (set it to disable{value as '1'})\" \"$BASH_SOURCE\"; \n sudo echo -e \"APT::Periodic::Download-Upgradeable-Packages \\\"0\\\";\" | sudo tee -a /etc/apt/apt.conf.d/10periodic;\nfi\n\n# update disable unattended updates (upgrade packages) entry\nif grep -Fq \"APT::Periodic::Unattended-Upgrade\" /etc/apt/apt.conf.d/10periodic\nthen\n # code if found\n INFO \"Auto-updates (upgrade packages) entry already exist in '/etc/apt/apt.conf.d/10periodic'. Hence trying to update (set it to disable{value as '1'})\" \"$BASH_SOURCE\" \"green\";\n sudo sed -i 's/APT::Periodic::Unattended-Upgrade .*$/APT::Periodic::Unattended-Upgrade \"0\"/' /etc/apt/apt.conf.d/10periodic\nelse\n # code if not found\n WARNING \"Auto-updates (upgrade packages) entry doesn't exist in '/etc/apt/apt.conf.d/10periodic'. Hence trying to add (set it to disable{value as '1'})\" \"$BASH_SOURCE\"; \n sudo echo -e \"APT::Periodic::Unattended-Upgrade \\\"0\\\";\" | sudo tee -a /etc/apt/apt.conf.d/10periodic;\nfi\n\n\n# update disable unattended updates (package list) entry\nif grep -Fq \"APT::Periodic::Update-Package-Lists\" /etc/apt/apt.conf.d/20auto-upgrades\nthen\n # code if found\n INFO \"Auto-updates (package list) entry already exist in '/etc/apt/apt.conf.d/20auto-upgrades'. Hence trying to update (set it to disable{value as '1'})\" \"$BASH_SOURCE\" \"green\";\n sudo sed -i 's/APT::Periodic::Update-Package-Lists .*$/APT::Periodic::Update-Package-Lists \"0\"/' /etc/apt/apt.conf.d/20auto-upgrades\nelse\n # code if not found\n WARNING \"Auto-updates (package list) entry doesn't exist in '/etc/apt/apt.conf.d/20auto-upgrades'. Hence trying to add (set it to disable{value as '1'})\" \"$BASH_SOURCE\"; \n sudo echo -e \"APT::Periodic::Update-Package-Lists \\\"0\\\";\" | sudo tee -a /etc/apt/apt.conf.d/20auto-upgrades;\nfi\n\n# update disable unattended updates (download upgradeable package) entry\nif grep -Fq \"APT::Periodic::Download-Upgradeable-Packages\" /etc/apt/apt.conf.d/20auto-upgrades\nthen\n # code if found\n INFO \"Auto-updates (download upgradeable package) entry already exist in '/etc/apt/apt.conf.d/20auto-upgrades'. Hence trying to update (set it to disable{value as '1'})\" \"$BASH_SOURCE\" \"green\";\n sudo sed -i 's/APT::Periodic::Download-Upgradeable-Packages .*$/APT::Periodic::Download-Upgradeable-Packages \"0\"/' /etc/apt/apt.conf.d/20auto-upgrades\nelse\n # code if not found\n WARNING \"Auto-updates (download upgradeable package) entry doesn't exist in '/etc/apt/apt.conf.d/20auto-upgrades'. Hence trying to add (set it to disable{value as '1'})\" \"$BASH_SOURCE\"; \n sudo echo -e \"APT::Periodic::Download-Upgradeable-Packages \\\"0\\\";\" | sudo tee -a /etc/apt/apt.conf.d/20auto-upgrades;\nfi\n\n# update disable unattended updates (upgrade packages) entry\nif grep -Fq \"APT::Periodic::Unattended-Upgrade\" /etc/apt/apt.conf.d/20auto-upgrades\nthen\n # code if found\n INFO \"Auto-updates (upgrade packages) entry already exist in '/etc/apt/apt.conf.d/20auto-upgrades'. Hence trying to update (set it to disable{value as '1'})\" \"$BASH_SOURCE\" \"green\";\n sudo sed -i 's/APT::Periodic::Unattended-Upgrade .*$/APT::Periodic::Unattended-Upgrade \"0\"/' /etc/apt/apt.conf.d/20auto-upgrades\nelse\n # code if not found\n WARNING \"Auto-updates (upgrade packages) entry doesn't exist in '/etc/apt/apt.conf.d/20auto-upgrades'. Hence trying to add (set it to disable{value as '1'})\" \"$BASH_SOURCE\"; \n sudo echo -e \"APT::Periodic::Unattended-Upgrade \\\"0\\\";\" | sudo tee -a /etc/apt/apt.conf.d/20auto-upgrades;\nfi\n\nsetup\n\nINFO \"Start docker at startup\" \"$BASH_SOURCE\" \"green\";\nsudo systemctl enable docker\n\n#**************************** Installation process ends here ********************************#\nSCRIPT_ENTRY\n\nexit 0;\n"
},
{
"alpha_fraction": 0.654049277305603,
"alphanum_fraction": 0.6923415660858154,
"avg_line_length": 41.867923736572266,
"blob_id": "01c206c52195a95168596befa51d55b9213147f6",
"content_id": "540c67df5451945c23eb9ef4297349ef7b8f061f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2272,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 53,
"path": "/scripts/data-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n#filename=$(basename $(ls /mnt/update_*.tar.gz | head -n 1));\n#update_patch=\"${filename%.*.*}\";\n#update_patch=\"update_patch-beb6af2-r2.1-20171229\"\n#patch=$(basename $(tar -tf /mnt/patch-*.tar.gz | head -n 1));\npatch=\"update-patch-c0463c5-r6-20190718\";\n\n# echo -e \"\\n${cyan}copy updated patch from /mnt/home/core/${update_patch} to /home/docker/code/ in gstudio container ${reset}\";\n# sudo docker cp /mnt/${update_patch} gstudio:/home/docker/code/;\n\n#echo -e \"\\n${cyan}copy updated patch from /home/docker/code./${update_patch}/data-updates/* to /data/ in gstudio container ${reset}\";\n#docker exec -it gstudio /bin/sh -c \"rm -rf /data/data_export/* && rsync -avzPh /home/docker/code/${update_patch}/data-updates/* /data/data_export/\";\n\n#echo -e \"\\n${cyan}Update offline patch ${reset}\";\n#docker exec -it gstudio /bin/sh -c \"/bin/bash /home/docker/code/${update_patch}/data-updates/course-import-and-export-update.sh\";\n\n#docker exec -it gstudio /bin/sh -c \"rm -rf /data/data_export/*\";\n\n## code for data update started\n\n#for copying the help_videos folder to /data/media\necho -e \"\\n${cyan}Copying the help_videos folder to /data/media ${reset}\";\nsudo rsync -avPhz /mnt/update-patch-r6/${patch}/data-updates/help_videos /home/core/data/media/ ;\n\n#for copying data-updates to /data folder\necho -e \"\\n${cyan}copying the data-updates folder to /data folder ${reset}\";\ndocker exec -it gstudio /bin/sh -c \"rsync -avPhz /home/docker/code/${patch}/data-updates /data/\";\n\n#running the images-copy.sh script\necho -e \"\\n${cyan}Update the data ${reset}\";\ndocker exec -it gstudio /bin/sh -c \"/bin/bash /home/docker/code/${patch}/data-updates/images-copy.sh\";\n\n#for deleting the data-updates copied earlier\necho -e \"\\n${cyan}Removing the data-updates folder from /data folder ${reset}\";\ndocker exec -it gstudio /bin/sh -c \"rm -rf /data/data-updates\";\n\n## code for data update ended\n\n#echo -e \"\\n${cyan}Restart gstudio container ${reset}\";\n#sudo docker restart gstudio;\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 36.71428680419922,
"blob_id": "d8c81450b11bdea3213d2db637856d0d70d7dad7",
"content_id": "c634dac7d0e9819e256e5f49066fa6093724bea7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 7,
"path": "/scripts/analytics.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Generating csv reports of gstudio groupwise user analystics\n\n/usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py export_users_analytics\n\n#ln -s /data/gstudio-exported-users-analytics-csvs /softwares/gstudio-exported-users-analytics-csvs\n\n"
},
{
"alpha_fraction": 0.5202769637107849,
"alphanum_fraction": 0.5717111825942993,
"avg_line_length": 35.10714340209961,
"blob_id": "14be6bcfc203a7240f5c6c9b96214f06f12ef833",
"content_id": "907c693a2a9a5b733cf41b1e96a54536c0eb9784",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2022,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 56,
"path": "/scripts/coreos-installer.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\n# Following variables are used to store the color codes for displaying the content on terminal\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;32m\" ;\nbrown=\"\\033[0;33m\" ;\nblue=\"\\033[0;34m\" ;\ncyan=\"\\033[0;36m\" ;\nreset=\"\\033[0m\" ;\n\nblack=\"\\033[0;30m\" ;\nred=\"\\033[0;31m\" ;\ngreen=\"\\033[0;32m\" ;\nbrown=\"\\033[0;33m\" ;\nblue=\"\\033[0;34m\" ;\npurple=\"\\033[0;35m\" ;\ncyan=\"\\033[0;36m\" ;\n#=\"\\033[0;37m\" ;\n#=\"\\033[0;38m\" ;\n#=\"\\033[0;39m\" ;\n#=\"\\033[0;40m\" ;\n\n\necho -e \"${cyan}Where do you want to install coreos ${reset}\" ;\necho -e \"${cyan}(For example 'sda1' or 'sda8' or 'sdb2' must be entered and hit Enter key of Keyboard) ${reset}\"\necho -e \"${cyan}{if you are not sure please do not proceed. Without input hit enter } ${reset}\" ;\nread part_i ;\necho -e \"\\n${green}Partion entered is $part_i ${reset}\" ;\nif [[ \"$part_i\" == \"\" ]]; then\n echo -e \"\\nNo input. Hence exiting. Thank you. Please try again later.\" ;\n exit\nelse \n check_part=`lsblk | grep $part_i | wc -l`\n if [[ \"$check_part\" != \"1\" ]]; then\n\techo -e \"\\nInvalid input. Hence exiting. Thank you. Please try again later.\" ;\n\texit\n fi\n echo -e \"\\n${red}Caution: It will format $part_i partion ${reset}\";\n echo -e \"${red}Are you sure you want to proceed? ${reset}\" ;\n echo -e -n \"${cyan}yes/no : ${reset}\"\n read part_format_i\n if [[ \"$part_format_i\" == \"\" ]]; then\n\techo -e \"\\nNo input. Hence exiting. Thank you. Please try again later.\" ;\n\texit\n elif [[ \"$part_format_i\" == \"n\" ]] || [[ \"$part_format_i\" == \"N\" ]] || [[ \"$part_format_i\" == \"no\" ]] || [[ \"$part_format_i\" == \"No\" ]] || [[ \"$part_format_i\" == \"NO\" ]]; then\n\techo -e \"\\nInput is '$part_format_i'. Hence exiting. Thank you.\" ;\n\texit\n elif [[ \"$part_format_i\" == \"y\" ]] || [[ \"$part_format_i\" == \"Y\" ]] || [[ \"$part_format_i\" == \"yes\" ]] || [[ \"$part_format_i\" == \"Yes\" ]] || [[ \"$part_format_i\" == \"YES\" ]]; then\n\techo -e \"\\nInput is '$part_format_i'. Continuing the process.\" ;\n else\n\techo -e \"\\nInput is '$part_format_i'. Oops!!! Something went wrong.\"\n fi\n \nfi\nexit\n"
},
{
"alpha_fraction": 0.6731598377227783,
"alphanum_fraction": 0.7132341861724854,
"avg_line_length": 47.55595779418945,
"blob_id": "eba34df660b84a65bde5d51cdfedb74010e74d20",
"content_id": "5824f780eb4bd205b4dabd188208fe400fae610f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 13450,
"license_type": "no_license",
"max_line_length": 763,
"num_lines": 277,
"path": "/scripts/git-offline-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Mrunal : 20161123 : below part is added by Mrunal as suggested by Nagarjuna, Ulhas and Kedar\n\n# Following variables are used to store the color codes for displaying the content on terminal\n# red=\"\\033[0;31m\" ;\n# green=\"\\033[0;32m\" ;\n# brown=\"\\033[0;33m\" ;\n# blue=\"\\033[0;34m\" ;\n# cyan=\"\\033[0;36m\" ;\n# reset=\"\\033[0m\" ;\n\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n#filename=$(basename $(ls -dr /home/docker/code/patch-*/ | head -n 1));\n#patch=\"${filename%.*.*}\";\n#update_patch=\"update_patch-beb6af2-r2.1-20171229\"\npatch=\"update-patch-c0463c5-r6-20190718\";\n\n# git offline update docker code - started\n# git_commit_no_docker=\"520d9ed489fba752fa3843ccb98c0c9ad70329e3\"; # Earlier commit no\n# git_commit_no_docker=\"3a12a65e161c7a13ebfe528fa0dd00359bd7f9c0\"; # Commit on 13-11-2017\n#git_commit_no_docker=\"beb6af265bd62b6dc34bb0acdfcdcedb6b2bccd0\"; # Commit on 29-12-2017\n#git_commit_no_docker=\"26eaf18e4e75553786d52ecb96e259b68090c139\";\ngit_commit_no_docker=\"c0463c5a55a92629edbca0ee34b8c7cbba161d3a\";\n\necho -e \"\\n${cyan}change the directory to /home/docker/code/ ${reset}\"\ncd /home/docker/code/\n\necho -e \"\\n${cyan}changing the git branch to master\";\ngit checkout master;\n\necho -e \"\\n${cyan}Changing the repo to Gnowledge ${reset}\";\ngit remote remove origin ;\ngit remote add origin https://github.com/gnowledge/gstudio-docker.git ;\n\necho -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/code-updates/gstudio-docker ${reset}\"\ngit fetch /home/docker/code/${patch}/code-updates/gstudio-docker \n\necho -e \"\\n${cyan}merging till specified commit number (${git_commit_no_docker}) from /home/docker/code/${patch}/code-updates/gstudio-docker ${reset}\"\ngit merge $git_commit_no_docker\n\n# git offline update docker code - ended\n\n\n# git offline update gstudio code - started\n\n#git_commit_no_gstudio=\"5d5ed8acd48950f9eb850590bef068f853a42fb5\"; # Earlier commit no\n#git_commit_no_gstudio=\"225cf7b5b8c11b916ee33488c5fc2e82ceaffa5d\"; # Commit on 05-01-2018 \n#git_commit_no_gstudio=\"d6f6592643ec87f06e493aa12467fd17d67b93bd\"; #patch-r5 commit\ngit_commit_no_gstudio=\"235eb4e9818a333e132595664838a22c8e4b4d11\";\n\n#--- One time for 20170912 update - started\necho -e \"\\n${cyan}change the directory to /home/docker/code/gstudio ${reset}\"\ncd /home/docker/code/gstudio/\n\necho -e \"\\n${cyan}change branch to master ${reset}\"\ngit checkout master;\n\n# echo -e \"\\n${cyan}pulling the latest code from master ${reset}\"\n# git pull origin master\n#--- One time for 20170912 update - ended\n\n#echo -e \"\\n${cyan}change the directory to /home/docker/code/gstudio ${reset}\"\n#cd /home/docker/code/gstudio/\n\necho -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/code-updates/gstudio ${reset}\"\ngit fetch /home/docker/code/${patch}/code-updates/gstudio \n\necho -e \"\\n${cyan}merging till specified commit number (${git_commit_no_gstudio}) from /home/docker/code/${patch}/code-updates/gstudio ${reset}\"\ngit merge $git_commit_no_gstudio\n\n# git offline update gstudio code - ended\n\n# git offline update qbank-gstudio-scripts code - started\n\ngit_commit_no_qbank_gstudio_scripts=\"002cbdff2e596f2dab6f0b2c14efd5a561b3dae0\";\n\necho -e \"\\n${cyan}change the directory to /home/docker/code/qbank-gstudio-scripts/ ${reset}\"\ncd /home/docker/code/qbank-gstudio-scripts/\n\necho -e \"\\n${cyan}changing the git branch to master\";\ngit checkout master;\n\necho -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/qbank-gstudio-scripts ${reset}\"\ngit fetch /home/docker/code/${patch}/code-updates/qbank-gstudio-scripts\n\necho -e \"\\n${cyan}merging till specified commit number (${git_commit_no_qbank_gstudio_scripts}) from /home/docker/code/${patch}/qbank-gstudio-scripts ${reset}\"\ngit merge ${git_commit_no_qbank_gstudio_scripts}\n\n# git offline update qbank-gstudio-scripts code - ended\n\n# git offline update qbank-lite code - started\n\n#git_commit_no_qbank_lite=\"1b488926a4d609dcde017e4fe7a47b8a4b541339\"; # Earlier commit no\n#git_commit_no_qbank_lite=\"23e21133c51be72534868e6b1f29f5c38ad217ef\"; # Commit on 29-12-2017\n\n#echo -e \"\\n${cyan}change the directory to /home/docker/code/gstudio/gnowsys-ndf/qbank-lite ${reset}\"\n#cd /home/docker/code/gstudio/gnowsys-ndf/qbank-lite\n\n#echo -e \"\\n${cyan}creating new branch clixserver.tiss.edu in qbank-lite repo ${reset}\" \n#git checkout -b clixserver.tiss.edu \n\n#echo -e \"\\n${cyan}fetch all ${reset}\"\n#git fetch /home/docker/code/${update_patch}/code-updates/qbank-lite origin/clixserver.tiss.edu\n\n#echo -e \"\\n${cyan}change branch to clixserver ${reset}\"\n#git checkout clixserver.tiss.edu\n\n#echo -e \"\\n${cyan}fetching git details from /home/docker/code/${update_patch}/code-updates/qbank-lite ${reset}\"\n#git fetch /home/docker/code/${update_patch}/code-updates/qbank-lite \n\n#echo -e \"\\n${cyan}merging till specified commit number (${git-commit-no}) from /home/docker/code/${update_patch}/code-updates/qbank-lite ${reset}\"\n#git merge $git_commit_no_qbank_lite\n\n#echo -e \"\\n${cyan}remove all the file and sub-driectories in directory (/home/docker/code/gstudio/gnowsys-ndf/qbank-lite/*) ${reset}\"\n#rm -rf /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/*\n\n#echo -e \"\\n${cyan}rsync /home/docker/code/${update_patch}/code-updates/qbank-lite/* in /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/ ${reset}\"\n#rsync -avzPh /home/docker/code/${update_patch}/code-updates/qbank-lite/* /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/\n\n# git offline update qbank-lite code - ended\n\n\n# git offline update OpenAssessmentsClient code - started\n\n# #git_commit_no_OpenAssessmentsClient=\"462ba9c29e6e8874386c5e76138909193e90240e\"; # Earlier commit no\n# git_commit_no_OpenAssessmentsClient=\"acfed44c30b421a49fa2ec43b361ff11653e9d31\"; # Commit on 18-09-2017\n\n# echo -e \"\\n${cyan}change the directory to /home/docker/code/OpenAssessmentsClient ${reset}\"\n# cd /home/docker/code/OpenAssessmentsClient/\n\n# echo -e \"\\n${cyan}change branch to clixserver ${reset}\"\n# git checkout clixserver\n\n# echo -e \"\\n${cyan}fetching git details from /home/docker/code/${update_patch}/code-updates/OpenAssessmentsClient ${reset}\"\n# git fetch /home/docker/code/${update_patch}/code-updates/OpenAssessmentsClient \n\n# echo -e \"\\n${cyan}merging till specified commit number (${git-commit-no}) from /home/docker/code/${update_patch}/code-updates/OpenAssessmentsClient ${reset}\"\n# git merge $git_commit_no_OpenAssessmentsClient\n\n# git offline update OpenAssessmentsClient code - ended\n\n\n# prefix and suffix double quotes \" in server code - started\n\n# get server id (Remove single quote {'} and Remove double quote {\"})\n#ss_id=`echo $(echo $(more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID | sed 's/.*=//g')) | sed \"s/'//g\" | sed 's/\"//g'`\n#ss_id=$(more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | sed -n '/.*=/{p;q;}' | sed 's/.*= //g' | sed \"s/'//g\" | sed 's/\"//g')\n\n# Trim leading whitespaces \n#ss_id=$(echo ${ss_id##*( )})\n# Trim trailing whitespaces \n#ss_id=$(echo ${ss_id%%*( )})\n\n# update server id\n#if grep -Fq \"GSTUDIO_INSTITUTE_ID\" /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\n#then\n # code if found\n #sed -e \"/GSTUDIO_INSTITUTE_ID/ s/=.*/='${ss_id}'/\" -i /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py;\n#else\n # code if not found\n #echo -e \"GSTUDIO_INSTITUTE_ID ='${ss_id}'\" >> /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py;\n#fi\n\n# update school code\n#ss_code=$(grep -irw \"$ss_id\" /home/docker/code/All_States_School_CLIx_Code_+_School_server_Code_-_TS_Intervention_Schools.csv | awk -F ';' '{print $3}')\n\n# Trim leading whitespaces \n#ss_code=$(echo ${ss_code##*( )})\n# Trim trailing whitespaces \n#ss_code=$(echo ${ss_code%%*( )})\n\n#if grep -Fq \"GSTUDIO_INSTITUTE_ID_SECONDARY\" /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\n#then\n # code if found\n #sed -e \"/GSTUDIO_INSTITUTE_ID_SECONDARY/ s/=.*/='${ss_code}'/\" -i /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py;\n#else\n # code if not found\n #echo -e \"GSTUDIO_INSTITUTE_ID_SECONDARY ='${ss_code}'\" >> /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py;\n#fi\n\n# update school name\n#ss_name=$(grep -irw \"$ss_id\" /home/docker/code/All_States_School_CLIx_Code_+_School_server_Code_-_TS_Intervention_Schools.csv | awk -F ';' '{print $2}' | sed 's/\"//g')\n\n# Trim leading whitespaces \n#ss_name=$(echo ${ss_name##*( )})\n# Trim trailing whitespaces \n#ss_name=$(echo ${ss_name%%*( )})\n\n#if grep -Fq \"GSTUDIO_INSTITUTE_NAME\" /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py\n#then\n # code if found\n #sed -e \"/GSTUDIO_INSTITUTE_NAME/ s/=.*/='${ss_name}'/\" -i /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py;\n#else\n # code if not found\n #echo -e \"GSTUDIO_INSTITUTE_NAME ='${ss_name}'\" >> /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py;\n#fi\n\n# prefix and suffix double quotes \" in server code - ended\n\n\n# extra scripts - started\n\n#echo -e \"\\n${cyan}change the directory to /home/docker/code/gstudio ${reset}\"\n#cd /home/docker/code/gstudio/gnowsys-ndf/\n\n#echo -e \"\\n${cyan}apply fab update_data ${reset}\"\n#fab update_data\n\n#echo -e \"\\n${cyan}apply bower components - datatables datatables-plugins datatables-rowsgroup datatables.net datatables.net-buttons datatables.net-buttons-dt jszip pdfmake ${reset}\"\n#rsync -avzPh /home/docker/code/${update_patch}/code-updates/bower_components/datatables /home/docker/code/${update_patch}/code-updates/bower_components/datatables-plugins /home/docker/code/${update_patch}/code-updates/bower_components/datatables-rowsgroup /home/docker/code/${update_patch}/code-updates/bower_components/datatables.net /home/docker/code/${update_patch}/code-updates/bower_components/datatables.net-buttons /home/docker/code/${update_patch}/code-updates/bower_components/datatables.net-buttons-dt /home/docker/code/${update_patch}/code-updates/bower_components/jszip /home/docker/code/${update_patch}/code-updates/bower_components/pdfmake /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/ndf/static/ndf/bower_components/\n\n#echo -e \"\\n${cyan}add few variables and there value so replace the same - local_settings ${reset}\"\n#rsync -avzPh /home/docker/code/${update_patch}/code-updates/gstudio-docker/confs/local_settings.py.default /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/local_settings.py\n\n# Variables related to \"set_language\" function (setting default language)\n#state_code=${ss_id:0:2};\n#language=\"Not Idea\";\n#if [ ${state_code} == \"ct\" ] || [ ${state_code} == \"rj\" ]; then\n #echo -e \"\\n${cyan}State code is ${state_code}. Hence setting hi as language.${reset}\"\n #language=\"hi\";\n#elif [ ${state_code} == \"mz\" ]; then\n # echo -e \"\\n${cyan}State code is ${state_code}. Hence setting en as language.${reset}\"\n #language=\"en\";\n#elif [ ${state_code} == \"tg\" ]; then\n #echo -e \"\\n${cyan}State code is ${state_code}. Hence setting te as language.${reset}\"\n #language=\"te\";\n#else\n #echo -e \"\\n${red}Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ($directoryname)${reset}\" ;\n#fi \n#sed -e \"/GSTUDIO_PRIMARY_COURSE_LANGUAGE/ s/=.*/= u'${language}'/\" -i /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/local_settings.py\n\n#echo -e \"\\n${cyan}apply requirements - copying dlkit dist-packages ${reset}\"\n# if [[ -d /usr/local/lib/python2.7/dist-packages-old ]]; then\n# mv -v /usr/local/lib/python2.7/dist-packages-old /tmp/\n# rm -rf /tmp/dist-packages-old\n# fi\n# mv -v /usr/local/lib/python2.7/dist-packages /usr/local/lib/python2.7/dist-packages-old\n# rsync -avzPh /home/docker/code/${update_patch}/code-updates/dist-packages /usr/local/lib/python2.7/\n#mv -v /usr/local/lib/python2.7/dist-packages/Sphinx-1.6.5.dist-info /usr/local/lib/python2.7/dist-packages/sphinx /usr/local/lib/python2.7/dist-packages/sphinxcontrib /usr/local/lib/python2.7/dist-packages/sphinxcontrib_websupport-1.0.1-py3.6-nspkg.pth /usr/local/lib/python2.7/dist-packages/sphinxcontrib_websupport-1.0.1.dist-info /tmp/\n#rm -rf /tmp/Sphinx-1.6.5.dist-info /tmp/sphinx /tmp/sphinxcontrib /tmp/sphinxcontrib_websupport-1.0.1-py3.6-nspkg.pth /tmp/sphinxcontrib_websupport-1.0.1.dist-info\n#rsync -avzPh /home/docker/code/${update_patch}/code-updates/dist-packages/dlkit* /usr/local/lib/python2.7/dist-packages/\n\n\n#echo -e \"\\n${cyan}updating teacher' s agency type ${reset}\"\n#python manage.py teacher_agency_type_update\n\n#echo -e \"\\n${cyan}collectstatic ${reset}\"\n#echo yes | python manage.py collectstatic\n\n#echo -e \"\\n${cyan}execute release2-1_nov17.py ${reset}\"\n#echo \"execfile('../doc/deployer/release2-1_nov17.py')\" | python manage.py shell\n\n# extra scripts - ended\n\n# modify logrotate for nginx\n#rsync -avzPh /home/docker/code/${update_patch}/code-updates/nginx /etc/logrotate.d/\n\n# copy ssl crt and key files for nginx\n#rsync -avzPh /home/docker/code/${update_patch}/code-updates/clixserver.tiss.edu /etc/ssl/\n\n# set newly updated crontab - started\n\n#echo -e \"\\n${cyan}Applying newly updated cron jobs in crontab ${reset}\"\n#crontab /home/docker/code/confs/mycron\n\n# set newly updated crontab - ended\n"
},
{
"alpha_fraction": 0.6017467379570007,
"alphanum_fraction": 0.6585152745246887,
"avg_line_length": 26.95121955871582,
"blob_id": "57925faae5e82c16e735956a2388f6457e5de67c",
"content_id": "6a0a1b4b6d59b9f3fdf19df5bb3c59911db88574",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1145,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 41,
"path": "/scripts/patch/patch-r1.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\nfunction apply_patch() {\n\n\t# fetch the filename (patch name)\n\tfilename=$(basename $(ls -r /mnt/update_*.tar.gz | head -n 1));\n\tupdate_patch=\"${filename%.*.*}\";\n\n\techo -e \"\\n${cyan}patch directory name : ${update_patch} and this update shell file name is $(readlink -f $0) ${reset}\"\n\n\techo -e \"\\n${cyan}change directory /mnt/ ${reset}\"\n\tcd /mnt/\n\n\techo -e \"\\n${cyan}Extract the tar.gz file (${update_patch}.tar.gz) ${reset}\"\n\tsudo tar xvzf ${update_patch}.tar.gz\n\n\techo -e \"\\n${cyan}Applying code updates ${reset}\"\n\tsudo bash ${update_patch}/code-updates/code-update.sh\n\n\techo -e \"\\n${cyan}Applying oac and oat updates ${reset}\"\n\tsudo bash ${update_patch}/oac-and-oat-updates/update-oac-and-oat.sh\n\n\techo -e \"\\n${cyan}School server will be restarting in 10sec ${reset}\"\n\tsleep 10\n\tsudo reboot\n\n} \n\napply_patch | tee patch-r1.log;"
},
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.780701756477356,
"avg_line_length": 37,
"blob_id": "d820ce960682ebd5c12b99eb8f867eee93ee6c76",
"content_id": "3cc919275b40be06a0b287f9605c6a59bf389a5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 3,
"path": "/confs/server_settings.py",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "GSTUDIO_INSTITUTE_ID = 'sp100'\nGSTUDIO_INSTITUTE_ID_SECONDARY ='00000000'\nGSTUDIO_INSTITUTE_NAME = 'clix-default'\n"
},
{
"alpha_fraction": 0.6946107745170593,
"alphanum_fraction": 0.6986027956008911,
"avg_line_length": 24.049999237060547,
"blob_id": "b486a99848f7cb611798031597349d3f08b5c3aa",
"content_id": "d9e0e98d6b74f9109094e5cdcf5b64ed1f9c7d70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 20,
"path": "/confs/rc.local",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/sh -e\n#\n# rc.local\n#\n# This script is executed at the end of each multiuser runlevel.\n# Make sure that the script will \"exit 0\" on success or any other\n# value on error.\n#\n# In order to enable or disable this script just change the execution\n# bits.\n#\n# By default this script does nothing.\n\n# Mrunal : heartbeat\n/home/docker/code/scripts/heartbeat.sh >> /data/heartbeats/hb-$(date +\\%Y\\%m\\%d-\\%H\\%M\\%S).log\n\n# Mrunal : backup\n/home/docker/code/scripts/backup.sh >> /tmp/cron-bkp.log\n\nexit 0\n"
},
{
"alpha_fraction": 0.6181483268737793,
"alphanum_fraction": 0.6315062046051025,
"avg_line_length": 37.76785659790039,
"blob_id": "638137a7dae7822a738ba05cd7e764694b041e13",
"content_id": "8922b553dfa98b2de960bc91f2dcc9227b6cacfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2171,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 56,
"path": "/scripts/zip_progress_csvs.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#File creation : Shivani Dixit\n\n\n#Prerequisite : The folder structure of the backup taken in the hard disk must be /home/core/2019/<state-code>/<school-code>/gstudio\n\n\n#Description : This script creates a zip of the \"gstudio-exported-users-analytics-csvs\" folder in the mounted hard disk.\n\n\n#Steps to run the script : 1. Connect the hard disk which has the data\n# 2. Mount it. Command for mounting \"sudo mount <device> /mnt\" (Ex: sudo mount /dev/sdb1 /mnt).\n# 3. Run the bash script using \"sudo bash zip_progress_csvs.sh\".\n# 4. The script will ask for the state code. Enter according to the instructions there and your requirements.\n\n\nfunction zip_progress_csvs() {\n\n cd /mnt/ ;\n \n ##Enter the state code\n read -p \"Please enter the state code (ct, tg, mz or rj): \" state_code\n\n ##If state code entered is right\n if [ $state_code == ct ] || [ $state_code == tg ] || [ $state_code == rj ] || [ $state_code == mz ]; then\n ##Creation of the folder structure\n sudo mkdir backup_${state_code}_progress_csvs; \n cd /mnt/backup_${state_code}_progress_csvs/ ; \n ls /mnt/home/core/2019/${state_code}/ > ${state_code}_folder_names.txt;\n xargs -I {} mkdir -p \"{}/gstudio\" < ${state_code}_folder_names.txt;\n sudo rm -r /mnt/backup_${state_code}_progress_csvs/${state_code}_folder_names.txt;\n\n ##Code To sync the \"gstudio-exported-users-analytics-csvs\" folder.\n for ((i=1; i<=300; i++))\n do \n id=\"${state_code}$i\"\n cd /mnt/home/core/2019/${state_code}/ ;\n if [ -d *$id ]; then\n sudo rsync -avPhz /mnt/home/core/2019/${state_code}/*${id}/gstudio/gstudio-exported-users-analytics-csvs /mnt/backup_${state_code}_progress_csvs/*${id}/gstudio/ ;\n fi\n done\n\n ##Code to zip the folder\n cd /mnt/ ;\n sudo zip -r backup_${state_code}_progress_csvs.zip backup_${state_code}_progress_csvs ;\n \n ##If state code entered is wrong \n else\n echo -e \"\\e[1;31mERROR:You have entered the wrong code!!! \\e[0m\"\n echo \"Terminating the script.\"\n exit\n fi\n}\n\nzip_progress_csvs | tee zip_progress_csvs.log ; #logs stored in this file\n"
},
{
"alpha_fraction": 0.37709498405456543,
"alphanum_fraction": 0.3980447053909302,
"avg_line_length": 41.117645263671875,
"blob_id": "3a0f4a523df67fb6d4a6c05c1e8f5827e4a3cb16",
"content_id": "3dccca74a58c3828c6b4565f100d6b08be7960ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 716,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 17,
"path": "/scripts/test.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash \n{\n file=`readlink -e -f $0`\n echo \"File : $file\"\n file1=`echo $file | sed -e 's/\\/scripts.*//'` ; \n echo \"echo $file1 \"\n file2=`echo $file1 | sed -e 's/\\//\\\\\\\\\\//g'` ;\n# file3=`echo $file1 | sed -e 's:/:\\\\\\/:g'` ;\n echo \"Dir : $file1\"\n echo \"Dir rep : $file2\"\n echo \"Dir rep : $file3\"\n sed -e \"/hHOME/ s/=.*;/=$file2;/\" -i $file1/confs/deploy.conf;\n echo \"here\"\n more $file1/confs/deploy.conf | grep hHOME; \n# sed -e '/_INTE/ s/=.*;/=\"1\";/' -i confs/deploy.conf; \n# echo \"File : $file1/confs/deploy.conf\"\n}\n"
},
{
"alpha_fraction": 0.76113361120224,
"alphanum_fraction": 0.7651821970939636,
"avg_line_length": 60.75,
"blob_id": "f055515f6dbbe221fc8b06afa6939ce83374b0f0",
"content_id": "3ec9f494adb865bdcc44ec079fec184ff32b268b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 8,
"path": "/scripts/execute-ActivityTimestamp-process.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Runs the activity time stamp process,\n# the same process runs on 2nd and 4th Friday of the every month, automatically, once the machine starts.\n# This activity takes longer time to complete, \n# so one who starts the execution should wait till the process gets completed. \n\ndocker exec gstudio /bin/sh -c \"echo \\\"execfile('/home/docker/code/gstudio/doc/deployer/get_all_users_activity_timestamp_csvs.py')\\\" |/usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py shell\";\n"
},
{
"alpha_fraction": 0.6279182434082031,
"alphanum_fraction": 0.634010910987854,
"avg_line_length": 44.126522064208984,
"blob_id": "11ac98d9d295c712412ee186cc12ec9c4190f9ec",
"content_id": "56ee71e1dd85796e25e8fbae276771499c270972",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 18547,
"license_type": "no_license",
"max_line_length": 255,
"num_lines": 411,
"path": "/scripts/backup-old-server-data.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\n\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\nfunction backup_completely() {\n content=\"NULL\"; # Holds content value (path) to be checked file, directory or something else\n\n # Variables related to \"copy_content_validations\" function\n source_path=\"1\"; # Holds source file / directory path for copying\n destination_path=\"2\"; # Holds destination directory path for copying\n flag=\"3\"; # Holds destination directory path for copying\n\n # Variables related to \"type_of_content\" function\n content_type=\"NULL\"; # Holds type of file\n\n\n # Variables related to \"copy_content_validations\" function (Data Integrity)\n filename_full=\"\"\n filename=$(basename \"$filename_full\")\n extension=\"${filename##*.}\"\n filename=\"${filename%.*}\"\n\n # Variables related to \"docker_load\" and \"docker_load_validations\" function (docker load and validation)\n docker_image_path=\"1\";\n docker_image_name=\"2\";\n docker_image_grep_name=\"3\";\n docker_image_loading_status=\"Not Idea\";\n\n # Variables related to \"docker_run\" and \"docker_run_validations\" function (docker run and validation)\n docker_container_name=\"1\";\n docker_container_running_status=\"Not Idea\";\n\n #******************************** Basic functions starts from here ***********************************#\n\n function response()\n {\n if [ $? = 0 ]; then\n response_status=\"Working\";\n # echo \"Working (Code=$?)\" # For testing uncomment here\n else\n response_status=\"Not_Working\";\n # echo \"Not_Working (Code=$?)\" # For testing uncomment here\n fi\n }\n\n function check_disk_insertion()\n {\n\n for (( i=1; i<5; i++ )); \n do\n\n check_disk=`lsblk | grep /mnt | wc -l`\n\n if [[ \"$check_disk\" != \"1\" ]]; then\n sleep 5;\n echo -e \"\\nWaiting for the installer (pen drive / portable HDD).\";\n elif [[ \"$check_disk\" == \"1\" ]]; then\n #echo -e \"\\nPen drive found. Continuing installation.\";\n disk_status=\"Found\";\n break\n fi\n\n if [[ $i == 4 ]]; then\n echo -e \"\\nInstaller (pen drive / portable HDD) not found. Retry installation.\";\n disk_status=\"Not_found\";\n exit; # For testing comment here\n fi\n\n done\n\n }\n\n function mounting_disk()\n {\n echo -e \"\\n${cyan}Name the destination disk for taking backup (portable HDD)? ${reset}\" ;\n echo -e \"${brown}(For example 'sdb' or 'sdc') ${reset}\" ;\n echo -e \"${brown}{if you are not sure and want to exit simply type enter} ${reset}\" ;\n echo -e \"${brown}{should be other than 'sda'} ${reset}\" ;\n check_disk_h=`lsblk | grep SIZE`\n check_disk_d=`lsblk | grep disk`\n echo -e \"\\n${purple}$check_disk_h ${reset}\" ;\n echo -e \"${blue}$check_disk_d ${reset}\\n\" ;\n echo -e -n \"${cyan}disk name : ${reset}\" ;\n\n read disk_t ;\n disk_t_ck=`lsblk | grep $disk_t`\n\n if [[ \"$disk_t\" == \"\" ]]; then\n\n echo -e \"\\n${brown}No input. Hence exiting. Please try again later. ${reset}\" ;\n mounting_status=\"Unmounted\";\n exit\n\n elif [[ \"$disk_t_ck\" == \"\" ]]; then\n\n echo -e \"\\n${brown}Invalid input. Hence exiting. Please try again later. ${reset}\" ;\n mounting_status=\"Unmounted\";\n exit\n\n elif [[ \"$disk_t_ck\" != \"\" ]]; then\n\n \n echo -e \"${cyan}mounting /dev/${disk_t} in /mnt ${reset}\"\n sudo mount /dev/${disk_t}9 /mnt/\n\n mounting_status=\"Mounted\";\n \n fi\n\n }\n\n\n function unmounting_disk()\n {\n\n echo -e \"\\n${cyan}umount /mnt${reset}\"\n sudo umount /mnt/\n\n unmounting_status=\"Unmounted\";\n\n }\n\n # This function will check the type of content (file, directory or No idea)\n function type_of_content()\n {\n # Type of content\n # D=Directory\n # F=File\n # N=No idea\n content=\"$1\";\n if [[ -d $content ]]; then\n echo \"$content is a directory\"\n content_type=\"D\";\n elif [[ -f $content ]]; then\n echo \"$content is a file\"\n content_type=\"F\";\n else\n echo \"$content is not valid\"\n content_type=\"N\";\n exit 1\n fi\n }\n\n function file_existence_validation()\n {\n filename=\"$1\";\n if [[ -f $filename ]]; then\n file_existence_status=\"Present\";\n elif [[ ! -f $filename ]]; then\n echo -e \"\\n${cyan}File ($filename) doesn't exists. ${reset}\"\n file_existence_status=\"Not_Present\";\n else\n echo -e \"\\n${red}Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ($filename)${reset}\" ;\n fi \n }\n\n function directory_existence_validation()\n {\n directoryname=\"$1\";\n if [[ -d $directoryname ]]; then\n echo -e \"\\n${cyan}Directory ($directoryname) exists. ${reset}\"\n directory_existence_status=\"Present\";\n elif [[ ! -d $directoryname ]]; then\n echo -e \"\\n${cyan}Directory ($directoryname) doesn't exists. ${reset}\"\n directory_existence_status=\"Not_Present\";\n if [[ \"$2\" == \"Create\" ]]; then\n mkdir -p $directoryname;\n if [ \"$?\" == \"0\" ]; then\n echo -e \"\\n${cyan}Directory ($directoryname) doesn't exists. Got signal to create the same. Hence created successfully.${reset}\"\n directory_existence_status=\"Present\";\n else\n echo -e \"\\n${cyan}Directory ($directoryname) doesn't exists. Got signal to create the same. Unfortunately failed to create.${reset}\"\n fi\n fi\n else\n echo -e \"\\n${red}Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ($directoryname)${reset}\" ;\n fi \n }\n\n # This function will validate, copy the content increamentally from source to destination. (Can take care partial copy)\n # + Directory existence \n # + Data integrity\n # + In case of partial copy rsync will handle it\n function copy_content()\n {\n\n directory_existence_validation \"$destination_path\" \"Create\"\n if [ \"$directory_existence_status\" == \"Present\" ]; then\n if [[ \"$3\" == \"max-size\" ]]; then\n #Ref: https://serverfault.com/questions/105206/rsync-exclude-files-that-are-over-a-certain-size\n #flag --max-size=30m\n echo -e \"\\n${cyan}Destination directory exists. Hence proceeding to copy the content. ${reset}\" ;\n echo -e \"\\n${cyan}copy clix-platform data and necessary files from $source_path to $destination_path. \\nThis may take time, please be patient. (Approx 15-30 min depending on the system performance) ${reset}\"\n sudo rsync -avzPh --max-size=30m \"${source_path}\" \"${destination_path}\" # For testing comment here\n else\n echo -e \"\\n${cyan}Destination directory exists. Hence proceeding to copy the content. ${reset}\" ;\n echo -e \"\\n${cyan}copy clix-platform data and necessary files from $source_path to $destination_path. \\nThis may take time, please be patient. (Approx 15-30 min depending on the system performance) ${reset}\"\n sudo rsync -avzPh \"${source_path}\" \"${destination_path}\" # For testing comment here\n fi\n elif [ \"$directory_existence_status\" == \"Not_Present\" ]; then\n echo -e \"\\n${cyan}Destination directory doesn' t exists. Hence skipping the process of copying the content and continuing with the process. ${reset}\" ;\n else\n echo -e \"\\n${cyan}Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ${reset}\" ;\n fi\n\n }\n\n function docker_load_validation()\n {\n #echo \"docker_image_name:$docker_image_name\" # For testing uncomment here\n docker images | grep $docker_image_grep_name >> /dev/null \n response\n }\n\n function docker_load()\n {\n echo -e \"\\n${cyan}loading $1 docker image ${reset}\"\n echo -e \"${brown}caution : it may take long time ${reset}\"\n docker load < $docker_image_path # For testing comment here\n }\n\n function docker_run_validation()\n {\n docker ps -a | grep $docker_container_name # >> /dev/null\n response\n }\n\n function docker_run()\n {\n echo -e \"\\n${cyan}running $1 docker container ${reset}\"\n echo -e \"${brown}caution : it may take long time ${reset}\"\n docker run $docker_flag $docker_volumes $docker_ports --name=\"$docker_container_name\" $docker_image_name # For testing comment here\n }\n\n\n #******************************** Basic functions ends from here ***********************************#\n\n\n\n #**************************** Installation process starts from here ********************************#\n\n # \n # echo -e \"\\n${cyan}Please be ready with the following details: ${reset}\" ;\n # echo -e \"\\n${cyan}\\t School server id ${reset}\" ;\n\n echo -e \"\\n${cyan}Please (re)insert the (CLIx School Server) installer (pen drive / portable HDD).${reset}\"\n\n #sleep 5\n\n check_disk_insertion\n echo -e \"\\n${cyan}Disk status : $disk_status ${reset}\";\n\n # echo -e \"\\n${cyan}Please provide the School server id? (Example Mizoram school 23 will have mz23 and Telangana 24 school - tg24) ${reset}\" ;\n # echo -e -n \"School server id: \"\n # read ss_id\n\n # get current year\n cur_year=`date +\"%Y\"`\n\n # platform name\n platform=\"gstudio\"\n\n # get server id (Remove single quote {'} and Remove double quote {\"})\n ss_id=`docker exec -it gstudio bash -c \"more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID | sed 's/.*=//g' | sed \\\"s/'//g\\\" | sed 's/\\\"//g'\"`\n ss_id=`tr -dc '[[:print:]]' <<< \"$ss_id\"`\n\n # get state code\n state_code=${ss_id:0:2};\n\n # get server code (Remove single quote {'} and Remove double quote {\"})\n ss_code=`docker exec -it gstudio bash -c \"more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID_SECONDARY | sed 's/.*=//g' | sed \\\"s/'//g\\\" | sed 's/\\\"//g'\"`\n ss_code=`tr -dc '[[:print:]]' <<< \"$ss_code\"`\n\n # get server name (Remove single quote {'} and Remove double quote {\"})\n #ss_name=`docker exec -it gstudio bash -c \"more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_NAME | sed 's/.*=//g' | sed \\\"s/'//g\\\" | sed 's/\\\"//g'\"`\n ss_name=`tr -dc '[[:print:]]' <<< \"$ss_name\"`\n\n # For testing purpose\n # ls -ltrh /mnt/test\n # mounting_disk\n # echo -e \"\\n${cyan}Mounting status : $mounting_status ${reset}\";\n # unmounting_disk\n # echo -e \"\\n${cyan}Unmounting status : $unmounting_status ${reset}\";\n\n\n # backup up of old school server clix-platform data \n echo -e -n \"\\n${cyan}Do you want to backup old school server clix-platform data? [Y/N]: ${reset}\" ;\n read backup_old_clix_platform_status\n\n if [ \"$backup_old_clix_platform_status\" == \"Y\" ] || [ \"$backup_old_clix_platform_status\" == \"y\" ] || [ \"$backup_old_clix_platform_status\" == \"Yes\" ] || [ \"$backup_old_clix_platform_status\" == \"yes\" ] || [ \"$backup_old_clix_platform_status\" == \"\" ]; then\n echo -e \"\\n${cyan}Option selected / entered: $backup_old_clix_platform_status. Hence initiating the backup up of old school server clix-platform data. ${reset}\" ;\n \n # Mrunal : Handling mounting in case of unplanned poweroff (Power failure). Unmount the mounting point\n unmounting_disk\n echo -e \"\\n${cyan}Unmounting status : $unmounting_status ${reset}\";\n\n mounting_disk\n echo -e \"\\n${cyan}Mounting status : $mounting_status ${reset}\";\n \n source_base_path=\"/home/core/data\"\n destination_path=\"/mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/\"\n\n if [[ ! -d /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/ ]]; then\n mkdir -p /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/\n fi\n\n source_path=\"${source_base_path}/db\";\n echo -e \"\\n${cyan}copy clix-platform db from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/gstudio-exported-users-analytics-csvs\";\n echo -e \"\\n${cyan}copy clix-platform gstudio-exported-users-analytics-csvs from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/gstudio-logs\";\n echo -e \"\\n${cyan}copy clix-platform gstudio-logs from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/gstudio_tools_logs\";\n echo -e \"\\n${cyan}copy clix-platform gstudio_tools_logs from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/activity-timestamp-csvs\";\n echo -e \"\\n${cyan}copy clix-platform activity-timestamp-csvs from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/postgres-dump\";\n echo -e \"\\n${cyan}copy clix-platform postgres-dump from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/rcs-repo\";\n echo -e \"\\n${cyan}copy clix-platform rcs-repo from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/local_settings.py\";\n echo -e \"\\n${cyan}copy clix-platform local_settings.py from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/server_settings.py\";\n echo -e \"\\n${cyan}copy clix-platform server_settings.py from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/system-heartbeat\";\n echo -e \"\\n${cyan}copy clix-platform system-heartbeat from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/git-commit-details.log\";\n echo -e \"\\n${cyan}copy clix-platform git-commit-details from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/qbank/qbank_data.tar.gz\";\n echo -e \"\\n${cyan}copy clix-platform qbank_data.tar.gz (Cole's script) from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/assessment-media\";\n echo -e \"\\n${cyan}copy clix-platform data and necessary files except media directory from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/nginx-logs\";\n echo -e \"\\n${cyan}copy clix-platform nginx-logs from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\"\n\n source_path=\"${source_base_path}/media\";\n echo -e \"\\n${cyan}copy clix-platform media directory of data from $source_path to $destination_path ${reset}\";\n copy_content \"$source_path\" \"$destination_path\" \"max-size\" \n\n echo -e \"\\n${cyan}Size of directories: ${reset}\"\n # sudo du -hs ${source_base_path}/benchmark-dump /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/benchmark-dump\n # sudo du -hs ${source_base_path}/counters-dump /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/counters-dump\n sudo du -hs ${source_base_path}/db /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/db\n sudo du -hs ${source_base_path}/gstudio-exported-users-analytics-csvs /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/gstudio-exported-users-analytics-csvs\n sudo du -hs ${source_base_path}/gstudio-logs /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/gstudio-logs\n sudo du -hs ${source_base_path}/gstudio_tools_logs /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/gstudio_tools_logs\n sudo du -hs ${source_base_path}/activity-timestamp-csvs /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/activity-timestamp-csvs\n sudo du -hs ${source_base_path}/postgres-dump /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/postgres-dump\n sudo du -hs ${source_base_path}/rcs-repo /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/rcs-repo\n sudo du -hs ${source_base_path}/local_settings.py /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/local_settings.py\n sudo du -hs ${source_base_path}/server_settings.py /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/server_settings.py\n sudo du -hs ${source_base_path}/system-heartbeat /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/system-heartbeat\n sudo du -hs ${source_base_path}/git-commit-details.log /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/git-commit-details.log\n sudo du -hs ${source_base_path}/qbank/qbank_data.tar.gz /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/qbank_data.tar.gz\n sudo du -hs ${source_base_path}/assessment-media /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/assessment-media\n sudo du -hs ${source_base_path}/nginx-logs /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/nginx-logs\n sudo du -hs ${source_base_path}/media /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/media\n\n sudo du -hc $(find ${source_base_path}/media -type f -size +30M)\n echo -e \"\\n${cyan}Size of directories: ${reset}\"\n sudo du -chs ${source_base_path}/* /mnt/home/core/${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}/*\n\n unmounting_disk\n echo -e \"\\n${cyan}Unmounting status : $unmounting_status ${reset}\";\n\n elif [ \"$backup_old_clix_platform_status\" == \"N\" ] || [ \"$backup_old_clix_platform_status\" == \"n\" ] || [ \"$backup_old_clix_platform_status\" == \"No\" ] || [ \"$backup_old_clix_platform_status\" == \"no\" ]; then\n echo -e \"\\n${cyan}Option selected / entered: $backup_old_clix_platform_status. Hence skipping backup up of old school server clix-platform data and continuing with the process. ${reset}\" ;\n else\n echo -e \"\\n${cyan}Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ${reset}\" ;\n fi\n}\nbackup_completely | tee /mnt/backup_completely.log;\nexit\n"
},
{
"alpha_fraction": 0.6181725263595581,
"alphanum_fraction": 0.6237876415252686,
"avg_line_length": 31.11475372314453,
"blob_id": "fbe55d52b03baca8f13e950ee207cd5f11a5d258",
"content_id": "8fe9a800987963eccb82890c3dafe63def83e837",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1959,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 61,
"path": "/scripts/generate-self-certified-certificate-ssl.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# Ref : https://serversforhackers.com/self-signed-ssl-certificates\n\n\n# Specify where we will install\n# the clixserver certificate\nGNAME=\"clixserver.tiss.edu\"\nSSL_DIR=\"/etc/ssl/$GNAME\"\n\nif [[ -d /etc/ssl/$GNAME && -f /etc/ssl/$GNAME/$GNAME.crt && -f /etc/ssl/$GNAME/$GNAME.csr && -f /etc/ssl/$GNAME/$GNAME.key ]]; then\n echo \"Directory and files exists. Hence exiting the process.\";\n echo \"Directory : /etc/ssl/$GNAME \";\n echo \"File : /etc/ssl/$GNAME/$GNAME.crt , /etc/ssl/$GNAME/$GNAME.csr and /etc/ssl/$GNAME/$GNAME.key\";\n exit;\nfi\n\n# Set the wildcarded domain\n# we want to use\nDOMAIN=\"*.$GNAME\"\n\n# A blank passphrase\nPASSPHRASE=\"\"\n\n# CSR variables meaning\n# C = Country\n# ST = Test State or Province\n# L = Test Locality\n# O = Organization Name\n# OU = Organizational Unit Name\n# CN = Common Name\n# emailAddress = [email protected]\n\n# Variables as it is prompted\n# Country Name (2 letter code) [AU]:IN\n# State or Province Name (full name) [Some-State]:Maharashtra\n# Locality Name (eg, city) []:Mumbai\n# Organization Name (eg, company) [Internet Widgits Pty Ltd]:Test\n# Organizational Unit Name (eg, section) []:Test\n# Common Name (e.g. server FQDN or YOUR name) []:test.org\n# Email Address []:[email protected]\n\n# Set our CSR variables\nSUBJ=\"\nC=IN\nST=Maharashtra\nL=Mumbai\nO=clix\nOU=ss\nCN=$GNAME\nemailAddress=admin@$GNAME\n\"\n\n# Create our SSL directory\n# in case it doesn't exist\nsudo mkdir -p \"$SSL_DIR\"\n\n# Generate our Private Key, CSR and Certificate\nsudo openssl genrsa -out \"$SSL_DIR/$GNAME.key\" 2048\nsudo openssl req -new -subj \"$(echo -n \"$SUBJ\" | tr \"\\n\" \"/\")\" -key \"$SSL_DIR/$GNAME.key\" -out \"$SSL_DIR/$GNAME.csr\" -passin pass:$PASSPHRASE\nsudo openssl x509 -req -days 365 -in \"$SSL_DIR/$GNAME.csr\" -signkey \"$SSL_DIR/$GNAME.key\" -out \"$SSL_DIR/$GNAME.crt\"\n"
},
{
"alpha_fraction": 0.6844885349273682,
"alphanum_fraction": 0.726588785648346,
"avg_line_length": 42.33333206176758,
"blob_id": "6b456ebadb46036c7b023fde1e224c20219fecab",
"content_id": "bc3a9c7043237233bb9f968167563b3f5eaedb94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 6247,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 144,
"path": "/scripts/git-offline-tools-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n#filename=$(basename $(ls -dr /home/docker/code/patch-*/ | head -n 1));\n#patch=\"${filename%.*.*}\";\n#patch=\"patch-7a6c2ac-r5-20190221\"; #earlier patch\n#patch=\"patch-26eaf18-r5-20190320\"; #latest patch\npatch=\"update-patch-c0463c5-r6-20190718\";\n\n# git offline update Astroamer_Planet_Trek_Activity code - started\ngit_commit_no_Astroamer_Planet_Trek_Activity=\"39f1cc7cb1cd567f69477b20830bf7f9b89be4d6\"; # Commit on 01/10/2018\n\necho -e \"\\n${cyan}change the directory to /softwares/Tools/Astroamer_Planet_Trek_Activity/ ${reset}\"\ncd /softwares/Tools/Astroamer_Planet_Trek_Activity/\n\necho -e \"\\n${cyan}changing the git branch to master\";\ngit checkout master;\n\necho -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/tools-updates/Astroamer_Planet_Trek_Activity ${reset}\";\ngit fetch /home/docker/code/${patch}/tools-updates/Astroamer_Planet_Trek_Activity;\n\necho -e \"\\n${cyan}merging till specified commit number (${git_commit_no_Astroamer_Planet_Trek_Activity}) from /home/docker/code/${patch}/tools-updates/Astroamer_Planet_Trek_Activity ${reset}\";\ngit merge $git_commit_no_Astroamer_Planet_Trek_Activity;\n\n# git offline update Astroamer_Planet_Trek_Activity code - ended\n\n\n# git offline update Motions_of_the_Moon_Animation code - started\ngit_commit_no_Motions_of_the_Moon_Animation=\"c4feb76dbb784e6c4bb86c76c02d3ff73353d107\"; # Commit on 08/10/2018\n\necho -e \"\\n${cyan}change the directory to /softwares/Tools/Motions_of_the_Moon_Animation/ ${reset}\"\ncd /softwares/Tools/Motions_of_the_Moon_Animation/\n\necho -e \"\\n${cyan}changing the git branch to master\";\ngit checkout master;\n\necho -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/tools-updates/Motions_of_the_Moon_Animation ${reset}\";\ngit fetch /home/docker/code/${patch}/tools-updates/Motions_of_the_Moon_Animation;\n\necho -e \"\\n${cyan}merging till specified commit number (${git_commit_no_Motions_of_the_Moon_Animation}) from /home/docker/code/${patch}/tools-updates/Motions_of_the_Moon_Animation ${reset}\";\ngit merge $git_commit_no_Motions_of_the_Moon_Animation;\n\n# git offline update Motions_of_the_Moon_Animation code - ended\n\n\n# git offline update Rotation_of_Earth_Animation code - started\ngit_commit_no_Rotation_of_Earth_Animation=\"2c070c5b54550b519ed4429f82cc9c7358e38b18\"; # Commit on 03/07/2018\n\necho -e \"\\n${cyan}change the directory to /softwares/Tools/Rotation_of_Earth_Animation/ ${reset}\"\ncd /softwares/Tools/Rotation_of_Earth_Animation/;\n\necho -e \"\\n${cyan}changing the git branch to master\";\ngit checkout master;\n\necho -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/tools-updates/Rotation_of_Earth_Animation ${reset}\";\ngit fetch /home/docker/code/${patch}/tools-updates/Rotation_of_Earth_Animation;\n\necho -e \"\\n${cyan}merging till specified commit number (${git_commit_no_Rotation_of_Earth_Animation}) from /home/docker/code/${patch}/tools-updates/Rotation_of_Earth_Animation ${reset}\";\ngit merge $git_commit_no_Rotation_of_Earth_Animation;\n\n# git offline update Rotation_of_Earth_Animation code - ended\n\n\n#git offline update food_sharing_tool code - started\ngit_commit_no_food_sharing_tool=\"dfa73432caedb121c567f2f3484bc7d8cfd39f1a\"; # Commit on 01/02/2019\n\necho -e \"\\n${cyan}change the directory to /softwares/Tools/food_sharing_tool/ ${reset}\"\ncd /softwares/Tools/food_sharing_tool/;\n\necho -e \"\\n${cyan}changing the git branch to master\";\ngit checkout master;\n\necho -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/tools-updates/food_sharing_tool ${reset}\";\ngit fetch /home/docker/code/${patch}/tools-updates/food_sharing_tool;\n\necho -e \"\\n${cyan}merging till specified commit number (${git_commit_no_food_sharing_tool}) from /home/docker/code/${patch}/tools-updates/food_sharing_tool ${reset}\";\ngit merge $git_commit_no_food_sharing_tool;\n\n# git offline update food_sharing_tool code - ended\n\n\n# git offline update sugarizer code - started\ngit_commit_no_sugarizer=\"239b9d716c0b0686f1389610cea31b91e58665c2\"; # Commit on 04/04/2016\n\necho -e \"\\n${cyan}change the directory to /softwares/DOER/sugarizer/ ${reset}\"\ncd /softwares/DOER/sugarizer/;\n\necho -e \"\\n${cyan}changing the git branch to master\";\ngit checkout master;\n\n#echo -e \"\\n${cyan}fetching git details from /home/docker/code/${patch}/tools-updates/sugarizer ${reset}\";\n#git fetch /home/docker/code/${patch}/tools-updates/sugarizer;\n\n#echo -e \"\\n${cyan}merging till specified commit number (${git_commit_no_sugarizer}) from /home/docker/code/${patch}/tools-updates/sugarizer ${reset}\";\n#git merge $git_commit_no_sugarizer;\n\ngit reset --hard ${git_commit_no_sugarizer};\n\n# git offline update sugarizer code - ended\n\n\necho -e \"\\n${cyan}Changing the directory to /softwares/Tools/ ${reset}\";\ncd /softwares/Tools/;\n\necho -e \"\\n${cyan}TurtleBlocksJS repo renamed to turtle_customized_version. ${reset}\";\nsudo mv TurtleBlocksJS turtle_customized_version;\n\necho -e \"\\n${cyan}Changing the directory to /softwares/DOER/ ${reset}\";\ncd /softwares/DOER/;\n\necho -e \"\\n${cyan}turtle repo renamed to turtle_full_version ${reset}\";\nsudo mv turtle turtle_full_version;\n\necho -e \"\\n${cyan}Copy/Move turtle_customized_version repo/folder from /Tools to /DOER directory. ${reset}\";\nsudo mv /softwares/Tools/turtle_customized_version /softwares/DOER/;\n\necho -e \"\\n${cyan}Changing the directory to /softwares/DOER/ ${reset}\";\ncd /softwares/DOER/;\n\necho -e \"\\n${cyan}Creating symlink for turtle_customized_version ${reset}\";\nln -s turtle_customized_version turtle; #for creation of link \n\necho -e \"\\n${cyan}Changing the directory to /home/docker/code/gstudio/gnowsys_ndf ${reset}\";\ncd /home/docker/code/gstudio/gnowsys-ndf;\n\necho -e \"\\n${cyan}running the collectstatic in manage.py ${reset}\";\necho \"yes\" | python manage.py collectstatic;\n\n#echo -e \"\\n${cyan}running the compass watch ${reset}\";\n#compass watch;\n\necho -e \"\\n${cyan}Removing the ${patch} from /home/docker/code/ ${reset}\";\nrm -rf /home/docker/code/${patch};\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5377432703971863,
"alphanum_fraction": 0.5528181195259094,
"avg_line_length": 47.309783935546875,
"blob_id": "fc18c6e75ad328ee527fe34c4c78c60399a5a514",
"content_id": "f0af363f1fa32fc816202ff4969c24b8392ba8ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 8889,
"license_type": "no_license",
"max_line_length": 519,
"num_lines": 184,
"path": "/scripts/start-new-container.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n{\n\n#--------------------------------------------------------------------------------------------------------------#\n# File name : build-docker.sh\n# File creation : gnowgi\n# Description :\n# git clone\n# Build Docker-Image via docker build command (using Dockerfile)\n#\n# Last Modification : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM\n# Description : \n# Logs directory check and creation\n# Prerequisites - Checking for OS version and architecture\n# Checking type of user and permission\n# Internet checking\n# Checking wget package\n# Docker application / package checking and installation\n# Creating local copy of replica code via git clone or update via git pull \n# Build Docker-Image via docker build command (using Dockerfile)\n# Verify image creation\n# Start the Docker-container via docker run command (using newly created docker image)\n# Copy host logs(pre-install logs) inside docker container \n# Verify initialization of docker-container and display message of completion\n#--------------------------------------------------------------------------------------------------------------#\n\n#-----------------------------------------------------------------------\n# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM : Old code\n#git clone https://github.com/gnowledge/gstudio.git\n#docker build -t gnowgi/gstudio .\n#-----------------------------------------------------------------------\n\n\n# shell \nsh_c=\"sh -c\"\n\n\n#--------------------------------------------------------------------#\n# Log file details...\n#--------------------------------------------------------------------#\nLOG_DIR=\"$(pwd)/Pre-install_Logs\";\nINSTALL_LOG=\"docker-load-image-$(date +%Y%m%d-%H%M%S).log\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\nINSTALL_LOG_FILE=\"$LOG_DIR/$INSTALL_LOG\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\ndHOME=\"/home/docker/code\"\n# ---------------- Log files variable def ends here -----------------\n\n\n# Mrunal : Set dHOME variable in deploy.conf\nfile=`readlink -e -f $0`\nfile1=`echo $file | sed -e 's/\\/scripts.*//'` ; \nfile2=`echo $file1 | sed -e 's/\\//\\\\\\\\\\//g'` ;\n# file3=`echo $file1 | sed -e 's:/:\\\\\\/:g'` ;\nsed -e \"/hHOME/ s/=.*;/=$file2;/\" -i $file1/confs/deploy.conf;\nmore $file1/confs/deploy.conf | grep hHOME; \n\n\nsource $file1/confs/deploy.conf\n\npwd\nsg docker -c 'pwd'\n# echo -e \"\\nInfo-msg : Loading docker images($dock_img). Be patient it may take few minutes. : sg docker -c 'docker load < $dock_img_file' \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n# sg docker -c \"docker load < $dock_img_file\"\n\nif [[ $1 == \"\" ]]; then\n echo \"Please provide the image name.(REPOSITORY:TAG)\" ;\n docker images --format \"table {{.Repository}}\\t{{.Tag}} \\t{{.ID}}\"\n echo \"(For example 'school-server/mongokit:v1-20160330-134534' must be the default file name and hit Enter key of Keyboard)\" ;\n read dock_img_name ;\nelse\n dock_img_name=$1;\nfi\necho \"Image name entered is $dock_img_name .\" ;\ndocker_repo=$(echo $dock_img_name | cut -f1 -d:);\ndocker_tag=$(echo $dock_img_name | cut -f2 -d:);\necho \"docker images | grep -w $docker_repo.*$docker_tag\"\ndocker images | grep -w \"$docker_repo.*$docker_tag\" > /dev/null 2>&1 # Mrunal : No redirections here please\n#echo \"$?\"\n\nif [ \"$?\" != \"0\" ] ; then\n # Docker-Image creation Failed\n echo -e \"Caution-msg : Docker image $dock_img_name does not exist. Try again later\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit\nelse\n echo -e \"Caution-msg : Docker image $dock_img_name exist.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \nfi\n\nn=0 ;\nnl=($(more $file1/confs/deploy.conf | sed -n -e '/port_.*_host/ s/ *=.*;//p' | wc -l)); # ref : https://linuxconfig.org/how-to-count-occurrence-of-a-specific-character-in-a-string-or-file-using-bash\n\nlport_name=($(more $file1/confs/deploy.conf | sed -n -e '/port_.*_host/ s/ *=.*;//p'))\n\nlport_num=($(more $file1/confs/deploy.conf | sed -n -e '/port_.*_host/ s/.*= *\\(.*\\);/\\1/p'))\n\n#echo \"${lport_name[@]}\"\n#echo \"$n : $nl\"\n\ndock_hostname=($(more $file1/confs/deploy.conf | sed -n -e '/dock_hostname/ s/.*= *\\(.*\\);/\\1/p'))\n\n\nfor (( n=0; n<nl; n++ ))\ndo\n \n echo \"${lport_name[$n]} is ${lport_num[$n]}\"\n \n lport_status=\"busy\";\n while [ \"$lport_status\" == \"busy\" ]\n do\n echo \"H1\"\n # Two_entry=\"False\";\n # echo \"${lport_num[*]} ------ ${lport_num[$n]} ---- ${lport_num[*]/${lport_num[$n]}/} ---- ${lport_num[*]}\"\n\t# echo \"${tport_num[*]} ------ ${tport_num[$n]} ---- ${tport_num[*]/${tport_num[$n]}/} ---- ${tport_num[*]}\"\n\t# if [[ \"${tport_num[*]/$port_no/}\" = \"${tport_num[*]}\" ]]; then\n\t# \tTwo_entry=\"True\";\n\t# fi\n\t# echo \"H2 $Two_entry\"\n\tl=`sudo netstat -ntulp | grep -w :${lport_num[$n]}`\n\tif [[ \"$?\" == \"0\" ]] ; then\n\t lport_status=\"busy\";\n\t port_no=${lport_num[$n]};\n\t nothing=1;\n\t while [ $nothing == 1 ]\n\t do\n\t\ttport_num=(\"${lport_num[@]}\")\n \tport_no=${tport_num[$n]};\n \tunset tport_num[$n]\n \t#port_no=$((port_no+1));\n\t\techo \"${lport_num[*]} ------ $port_no --- ${lport_num[*]/$port_no/} --- ${lport_num[*]}\"\n\t\tif [ \"${tport_num[*]/$port_no/}\" = \"${tport_num[*]}\" ]; then\n\t\t lport_num[$n]=$port_no;\n\t\t nothing=0;\n\t\t echo \"does not exists\";\n\t\tfi\n\t done\n\t echo \"port_no and ${lport_num[$n]}\";\n\telse\n\t lport_status=\"use it (free port)\";\n\t sed -e \"/${lport_name[$n]}/ s/=.*;/=${lport_num[$n]};/\" -i $file1/confs/deploy.conf\n\t echo \"-----------------${lport_name[$n]} is ${lport_num[$n]}--------------\"\n\tfi\n\t\n\tsource $file1/confs/deploy.conf\n done\ndone\n#exit\n\nn=0 ;\nfor (( n=1; n>=1; n++ ))\ndo\n #echo \"name : $dock_con$n\" # Mrunal : Testing purpose\n echo \" # docker ps -a | grep -w $dock_con$n > /dev/null 2>&1\" \n sg docker -c \"docker ps -a | grep -w $dock_con$n > /dev/null 2>&1\" # Mrunal : No redirections here please\n if [[ $? != 0 ]]; then\n\techo -e \"\\nInfo-msg : **Docker-container initialization** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\techo -e \"\\nInfo-msg : **Please wait for some time - approx 5 mins** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\techo \" # docker run -it -d --restart=always -v $hHOME/data:/data -v $hHOME/backups:/backups -v $hHOME/softwares:/softwares -h $dock_hostname -p $port_ssh_host:$port_ssh_dock -p $port_smtp_host:$port_smtp_dock -p $port_http_host:$port_http_dock -p $port_django_dev_host:$port_django_dev_dock -p $port_mongo_host:$port_mongo_dock -p $port_smtp_test_host:$port_smtp_test_dock -p $port_imap_host:$port_imap_dock -p $port_smtps_host:$port_smtps_dock --name=$dock_con$n $docker_repo:$docker_tag\" # Mrunal: Testing purpose\n\tsg docker -c \"docker run -it -d --restart=always -v $hHOME/data:/data -v $hHOME/backups:/backups -v $hHOME/softwares:/softwares -h $dock_hostname -p $port_ssh_host:$port_ssh_dock -p $port_smtp_host:$port_smtp_dock -p $port_http_host:$port_http_dock -p $port_django_dev_host:$port_django_dev_dock -p $port_mongo_host:$port_mongo_dock -p $port_smtp_test_host:$port_smtp_test_dock -p $port_imap_host:$port_imap_dock -p $port_smtps_host:$port_smtps_dock --name=$dock_con$n $docker_repo:$docker_tag \" ;\n\n\tif [[ $? -eq 0 ]]; then\n\t # Docker-Container starting success\n\t echo -e \"Info-msg : Docker-container created and started successfully. \" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t break\n\telse\n\t # Docker-Container creation Failed\n\t echo -e \"Caution-msg : Docker-container creation Failed. Please try again. (Error code : $?) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\tfi\n\tsleep 5m\n\tbreak\n fi\ndone\n\n#docker ps -q --filter=image=gnowgi/gstudio > /dev/null 2>&1 # Mrunal : No redirections here please\nsg docker -c \"docker ps | grep -w $dock_con$n > /dev/null 2>&1\" # Mrunal : No redirections here please\nip_address=`ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'`\nif [[ $? -eq 0 ]]; then\n # Installation completed\n echo -e \"Info-msg : Installation complete successfully. Just enter your ipaddress:port ($ip_address:$port_http_host) in address bar of your internet browser.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\n # Installation Failed\n echo -e \"Caution-msg : Installation Failed. Please try again. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n# ----------------------------- Shell file code ends here ------------------------------------------\n\n}\n"
},
{
"alpha_fraction": 0.5071542263031006,
"alphanum_fraction": 0.5810810923576355,
"avg_line_length": 32.05263137817383,
"blob_id": "8106ac7d86c7db1760a671164799286b94c6e32d",
"content_id": "baec2d7b7c44937941168c379a2f6ced75ed5038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 1258,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 38,
"path": "/docker-compose.yml",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "# gstudio with postgres+mongodb+rcs+fs-media\n#\n# Access via \"http://localhost:80\" (or \"http://$(docker-machine ip):8080\" if using docker-machine)\n#\n\nversion: '2'\n\nservices:\n\n # Create gstudio container\n gstudio:\n image: registry.tiss.edu/school-server-dlkit:43-7b32cc4\n container_name: gstudio\n restart: always\n environment:\n - TERM=xterm\n hostname: 'clixserver.tiss.edu'\n ports:\n - \"80:80\" #http\n - \"443:443\" #https\n - \"8022:22\" #ssh\n - \"8025:25\" #smtp\n - \"8143:143\" #g-imap\n - \"8587:587\" #g-smtp\n - \"8432:5432\" #postgres\n - \"8000:8000\" #dev\n - \"8017:27017\" #mongodb\n - \"8080:8080\" #qbank\n - \"5555:5555\" #celery-flower\n \n volumes:\n # requires proper authorizations, see rights.sh (incase of 403-forbidden error on web browser, please check /data/media permissions)\n - /home/core/data:/data\n - /home/core/code:/home/docker/code\n - /home/core/setup-software:/softwares\n - /home/core/assesment-datastore:/home/docker/code/gstudio/gnowsys-ndf/qbank-lite/webapps/CLIx/datastore\n - /home/core/backups:/backups\n - /home/core/static:/static\n \n"
},
{
"alpha_fraction": 0.5359925031661987,
"alphanum_fraction": 0.5524669289588928,
"avg_line_length": 53.88551330566406,
"blob_id": "39fe3aeb1d05a4ce072adb3221891af8df4c6543",
"content_id": "4c3517019b52449804bde35b844369e32320c13d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 23491,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 428,
"path": "/scripts/build-docker.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n{\n\n#--------------------------------------------------------------------------------------------------------------#\n# File name : build-docker.sh\n# File creation : gnowgi\n# Description :\n# git clone\n# Build Docker-Image via docker build command (using Dockerfile)\n#\n# Last Modification : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM\n# Description : \n# Logs directory check and creation\n# Prerequisites - Checking for OS version and architecture\n# Checking type of user and permission\n# Internet checking\n# Checking wget package\n# Docker application / package checking and installation\n# Creating local copy of replica code via git clone or update via git pull \n# Build Docker-Image via docker build command (using Dockerfile)\n# Verify image creation\n# Start the Docker-container via docker run command (using newly created docker image)\n# Copy host logs(pre-install logs) inside docker container \n# Verify initialization of docker-container and display message of completion\n#--------------------------------------------------------------------------------------------------------------#\n\n#-----------------------------------------------------------------------\n# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM : Old code\n#git clone https://github.com/gnowledge/gstudio.git\n#docker build -t gnowgi/gstudio .\n#-----------------------------------------------------------------------\n\n\n# shell \nsh_c=\"sh -c\"\n\n\n# Following variables are used to store the color codes for displaying the content on terminal\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;32m\" ;\nbrown=\"\\033[0;33m\" ;\nblue=\"\\033[0;34m\" ;\ncyan=\"\\033[0;36m\" ;\nreset=\"\\033[0m\" ;\n\n\n#--------------------------------------------------------------------#\n# Log file details...\n#--------------------------------------------------------------------#\nLOG_DIR=\"$(pwd)/Pre-install_Logs\";\nINSTALL_LOG=\"pre-install-$(date +%d-%b-%Y-%I-%M-%S-%p).log\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\nINSTALL_LOG_FILE=\"$LOG_DIR/$INSTALL_LOG\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\n# ---------------- Log files variable def ends here -----------------\n\n\n#--------------------------------------------------------------------#\n# Check the existence of the directory...\n# If directory is present : Display messages\n# If directory is not present : create and display messages\n#--------------------------------------------------------------------#\nfunction check_dir() {\n if [[ -d $1 ]]; then\n echo -e \"Info-msg : $1 directory is already present.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n echo -e \"Caution-msg : $1 directory not present. Hence creating the same.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n `mkdir -p $1` # Mrunal : No redirections here please\n echo -e \"$1 directory is now been created.\\n\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n fi\n}\n# ----------------- Check directory code ends here ------------------\n\n\n#------------------------------------------------------------------------#\n# Checking the existence of the command (passed as an argument) is here..\n#------------------------------------------------------------------------#\ncommand_existence_check() {\n command -v \"$@\" > /dev/null 2>&1\n}\n#----------- Check for existence of directory code ends here ------------\n\n#--------------------------------------------------------------------#\n# Checking for Internet is here..\n#--------------------------------------------------------------------#\n\n_INTERNET_STATUS=0; # Mrunal : 20151229-1050 : 0 - Offline (No internet) and 1 - Online (internet available)\nfunction internet_check() {\n#ping www.google.com -c 5\n\n echo -e \"\\nWe are checking for Internet connection \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n INT_COM=`ping www.google.com -c 5 | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) /\"` | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n echo -e \"$INT_COM\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n if [[ \"$INT_COM\" =~ bytes* ]]; then # If internet connection is available\n _INT_COM=1\n else # If no internet connection\n _INT_COM=0;\n fi\n \n echo -e \"GET http://metastudio.org\\n\\n\" | nc metastudio.org 80 > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n _META=1;\n else # If no internet connection\n _META=0;\n fi\n \n echo -e \"GET http://google.com HTTP/1.0\\n\\n\" | nc google.com 80 > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n _GOOGLE=1;\n else # If no internet connection\n _GOOGLE=0; \n fi\n \n echo -e \"ping:$_INT_COM ; meta:$_META ; google:$_GOOGLE\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n if ([ \"$_INT_COM\" == 0 ] && [ \"$_META\" == 0 ] && [ \"$_GOOGLE\" == 0 ]); then # If no internet connection\n echo -e \"\\nInternet connection failed. Please check the network connections(IP, gateway, routes or physical cables).\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t_TYPE=0;\n\techo -e 'Type of image : \\n 1. Docker-build {Building a docker image} \\n 2. Docker-image-load {Loading a existing docker image}';\n\tread _TYPE ;\n\techo -e \"USER input : $_TYPE\";\n\t\n\tif [[ \"$_TYPE\" == \"\" ]]; then\n\t echo \"No input\";\n\telif [[ '$_TYPE' == '1' ]]; then # Docker-build\n echo -e \"As internet is not available we couldn't continue installation. Try again later. Thanks.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit 1;\n\telif [[ '$_TYPE' == '2' ]]; then # Docker-build\n\t echo -e \"\\nHence we will continue with offline installation.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n\telse\n\t echo -e \"\\nError-msg : Something went wrong.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\tfi\n\t_INTERNET_STATUS=0;\n else # If internet connection is available\n echo -e \"\\nInternet connection Successful.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n echo -e \"\\nHence we will continue with online installation.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n\t_INTERNET_STATUS=1;\n fi\n\n}\n# -------------------------- Internet code ends here ----------------------------------------\n\n\n#------------------------------------------------------------------------#\n# Checking the existence of the docker package is here..\n#------------------------------------------------------------------------#\ndocker_package_install() {\nget_docker_script=\"get_docker_script.sh\"\n if [ -f $get_docker_script ]; then\n\techo -e \"Info-msg : Docker fetching script shell file exist. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n\techo -e \"Info-msg : Docker fetching script shell file does not exist. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n fi\n\n # Checking for the interent connections\n internet_check\n\n if [ $_INTERNET_STATUS = 0 ]; then\n\techo -e \"Info-msg : No internet hence can' t check the latest code for docker fetching script. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n elif [ $_INTERNET_STATUS = 1 ]; then\n\tsed -i ' s/^/#/' \"$get_docker_script\";\n\twget -qO- https://get.docker.com/ >> /tmp/$get_docker_script ;\n\tdiff_new_old= `diff $get_docker_script /tmp/$get_docker_script`\n\t \n\techo -e \"Info-msg : No internet hence can' t check the latest code for docker fetching script. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n fi\n}\n# -------------------------- Checking the existence of the docker package code ends here ----------------------------------------\n\n\n# -------------------------- Shell file code starts from here ----------------------------------------\n\n# To check LOG directory and files (If directory is not created do create it with function)\n# Here check_dir is the function and $LOG_DIR is dirctory full path variable defined earlier\n\ncheck_dir \"$LOG_DIR\" # Calling check_dir function to check LOG directory existence\n\nls\n\nif [[ ! -f docker-inst.lock ]]; then\necho -e \"Info-msg : **Prerequisites** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\necho -e \"Info-msg : Checking for OS version and architecture.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n# Check system os architecture (Currently {Fri Aug 28 17:38:35 IST 2015} docker only supports 64-bit platforms)\nos_arch=\"$(uname -m)\"\ncase \"$(uname -m)\" in\n *64)\n ;;\n *)\n echo -e \"Error-msg: The platform you are using is not an 64-bit version. \\n\n Docker currently only supports 64-bit versions of the platforms. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit 1\n ;;\nesac\n\n\n# checking the platform, version and architecture\nlsb_dist=''\ndist_version=''\n\nif command_existence_check lsb_release; then\n lsb_dist=\"$(lsb_release -si)\"\nfi\nlsb_dist=\"$(echo \"$lsb_dist\" | tr '[:upper:]' '[:lower:]')\"\n\nif command_existence_check lsb_release; then\n dist_version=\"$(lsb_release --codename | cut -f2)\"\nfi\n\nif [ -z \"$dist_version\" ] && [ -r /etc/lsb-release ]; then\n dist_version=\"$(. /etc/lsb-release && echo \"$DISTRIB_CODENAME\")\"\nfi\necho \"dist:$lsb_dist and version:$dist_version and OS architecture:$os_arch \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n\n\n# Print the username \nuser=\"$(id -un 2>/dev/null || true)\"\necho -e \"User name : $user \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n\n# Identify whether user is root or not\necho -e \"\\nInfo-msg : Checking type of user and permission\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nif [ \"$user\" != 'root' ]; then\n if command_existence_check sudo; then\n \tsh_c=\"sudo -E sh -c\"\n \techo -e \"Info-msg : User($user) with sudo user. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n elif command_existence_check su; then\n sh_c=\"su -c\"\n echo -e \"Info-msg : User($user) with su user. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n\t echo -e \"Error: The installer needs the ability to run few commands with root privileges.\n We are unable to find either 'sudo' or 'su' available to make this happen. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t exit 1\n fi\nfi\n\n# Checking for the interent connections\ninternet_check \n\n# We are checking the wget package. If the package is not installed then install the same\necho -e \"\\nInfo-msg : Checking wget package. If the package is not installed then install the same \"\nif command_existence_check wget; then\n echo -e \"\\nInfo-msg : wget application is already instlled on the system. So no need to install the package. Continuing with the process.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\n echo -e \"\\nCaution-msg : wget application is not installed on the system. Hence now we will be installing the wget application.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n if _INTERNET_STATUS==1; then\n\techo -e \"\\nCaution-msg : Installing the wget application(Online installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t# Updating the repo\n\t$sh_c 'apt-get update'\n\t\n\t# Installing wget application package\n\t$sh_c 'sudo apt-get install wget'\n elif _INTERNET_STATUS=0; then\n\techo -e \"\\nCaution-msg : Installing the wget application(Offline installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t# Installing wget application package\n\tpackage_file_name=\"wget.deb\"; # Mrunal : 20151229-1050 : \n\tpackage_name=\"wget\"; # Mrunal : 20151229-1050 : Name or common name of the package\n\tcheck_file $package_file_name $package_name # Mrunal : 20151229-1050 : Check for existance of package file\n\t$sh_c 'dpkg -i wget.deb' # Mrunal : 20151229-1050 : Only for Ubuntu or Debian based systems\n else\n\techo -e \"\\nError-msg : Something went wrong.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n fi\nfi\n\n\n# Checking for the interent connections\ninternet_check\n\necho -e \"\\nInfo-msg : **Docker-Image creation** \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n# We are checking the Docker package. If the package is not installed then install the same\necho -e \"\\nInfo-msg : Checking Docker package. If the package is not installed then install the same \" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nif command_existence_check docker && [ -e /var/run/docker.sock ]; then\n echo -e \"\\nInfo-msg : docker application is already installed on the system. So no need to install the package. Continuing with the process. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Current user\n echo -e \"\\nInfo-msg : Current Username : $(whoami) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Print the version of installed docker \n echo -e \"\\nInfo-msg : Checking the already installed docker application version \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'docker version' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\n echo -e \"\\nCaution-msg : Docker application is not installed on the system. Hence now we will be installing the Docker application.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n if _INTERNET_STATUS==1; then\n\techo -e \"\\nCaution-msg : Installing the Docker application(Online installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t\n\t# Install Docker application via wget\n\twget -qO- https://get.docker.com/ | sh | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t\n elif _INTERNET_STATUS==0; then\n\techo -e \"\\nCaution-msg : Installing the Docker application(Offline installation mode).\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t\n\t# Install Docker application via wget\n\tdpkg -i docker-engine.deb | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t\n else\n\techo -e \"\\nError-msg : Something went wrong.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t\n fi\n \n # Current user\n echo -e \"\\nInfo-msg : Current Username : $(whoami) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n $sh_c 'more /etc/group' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n _CUR_USER=$(whoami);\n # Adding the current user in docker group\n echo -e \"\\nInfo-msg : Adding $(whoami) and $_CUR_USER in docker group \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'sudo usermod -aG docker $(whoami)' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n $sh_c \"sudo usermod -aG docker $_CUR_USER\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n $sh_c 'more /etc/group' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n \n # Checking the current group of the current user\n # echo -e \"\\nInfo-msg : Checking the current group of the $(whoami) : id -g \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n # $sh_c 'id -g' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # # Forcefully changing group of the current user to docker group (to avoid restart)\n # echo -e \"\\nInfo-msg : Forcefully changing group of the $(whoami) to docker group : newgrp docker \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n# $sh_c 'newgrp docker ' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # # Checking the current group of the current user\n # echo -e \"\\nInfo-msg : Checking the current group of the $(whoami) : id -g \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n # $sh_c 'id -g' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n # Starting docker(docker-engine) service\n echo -e \"\\nInfo-msg : Starting docker service (docker-engine) \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'sudo start docker' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'sudo service docker start' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \nfi\n $sh_c 'touch docker-inst.lock' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n $sh_c 'echo $INSTALL_LOG > docker-inst.lock' | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n echo -e \"\\nInfo-msg : Please reboot the system to take effect. And re-run this script again to continue the installation. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n exit\n \nelif [[ -f docker-inst.lock ]]; then\n\n\n# Checking for the interent connections\ninternet_check\n\n# We are checking the gstudio repo. If the directory exists then take git pull or else take clone of online repo\necho -e \"\\nInfo-msg : Checking gstudio repo local directory. If the directory exists then take git pull or else take clone of online repo \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n# docker image and container related variables\n_repo_branch=\"\";\n\n\necho -e \"\\nInfo-msg : Please give branch name of online repo \" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\necho -e -n \"\\n${red}Branch name: ${reset}\" | tee -a ${INSTALL_LOG_FILE}\nread -t 60 _repo_branch\n\nif [ \"$_repo_branch\" == \"\" ]; then\n echo -e \"\\nInfo-msg : No value provided. So applying default value as replica. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n _repo_branch=\"replica\";\nfi \n\ngit branch -r | cut -d/ -f2- | grep -v HEAD | grep $_repo_branch > /dev/null 2>&1\n\nif [ $? -eq 0 ]; then\n echo -e \"\\nInfo-msg : Value provided is $_repo_branch. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\n echo -e \"\\nInfo-msg : Value provided is $_repo_branch. Input is invalid \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit\nfi\n\nif [ -d \"gstudio\" ]; then\n cd gstudio\n\n git branch -r --list | grep $_repo_branch > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n\techo -e \"\\nInfo-msg : Value is correct hence continuing the procedure. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n\techo -e \"\\nInfo-msg : No value provided. So applying default value as replica. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t_repo_branch=\"replica\";\n fi\t\n\n # Pull the gstudio code from github online repo\n echo -e \"\\nInfo-msg : Pull the gstudio latest code from github online repo and $_repo_branch branch as gstudio directory already exist.\\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n git branch --list | grep $_repo_branch > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n\tgit checkout $_repo_branch # Switch to branch \n else\n\tgit checkout -b $_repo_branch # Create and Switched to branch\n fi\t\n #git pull https://github.com/gnowledge/gstudio.git\n git pull origin $_repo_branch | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n cd ..\nelse \n # Clone the gstudio code from github online repo\n echo -e \"\\nInfo-msg : Clone the gstudio code from github online repo : $_repo_branch \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n git clone https://github.com/gnowledge/gstudio.git -b $_repo_branch | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n\necho -e 'Please select the database for storing the users credentials: \\n 1. sqlite \\n 2. postgresql';\nread _OPTION ;\n\n# Mrunal : for applying comments for sqlite3/postgresql\nbash scripts/local_settings_changes.sh $_OPTION $_repo_branch\n\n# Mrunal : start the container\nbash scripts/start-new-container.sh $_OPTION $_repo_branch\n\necho -e \"\\nInfo-msg : copy pre-install logs to docker system \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nsudo docker cp ${INSTALL_LOG_FILE} $(docker inspect -f '{{.Id}}' $(docker ps -q)):/root/DockerLogs/ | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\necho -e \"\\nInfo-msg : Verify the copy process and existence of the file \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nsudo ls /var/lib/docker/aufs/mnt/$(docker inspect -f '{{.Id}}' $(docker ps -q --filter=image=$dock_img_name))/root/DockerLogs/ | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n\ndocker ps -q --filter=image=$dock_img_name > /dev/null 2>&1 # Mrunal : No redirections here please\nif [ $? -eq 0 ]; then\n # Installation completed\n echo -e \"Info-msg : Installation complete successfully. Just enter your ipaddress:port in address bar of your internet browser.\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\n # Installation Failed\n echo -e \"Caution-msg : Installation Failed. Please try again. \\n\" | sed -e \"s/^/$(date +%Y%b%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\nfi # file checking docker-inst.lock\n# ----------------------------- Shell file code ends here ------------------------------------------\n}\n"
},
{
"alpha_fraction": 0.6736842393875122,
"alphanum_fraction": 0.7157894968986511,
"avg_line_length": 22.75,
"blob_id": "c2909d2ac4858f728cd2049a73c91b9deef6a8f7",
"content_id": "d8b02f30cbb4885aa216fa0f36375e000282c074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 95,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 4,
"path": "/scripts/smtpd.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"[run] smtpd command\"\npython -m smtpd -n -c DebuggingServer localhost:1025 &\n"
},
{
"alpha_fraction": 0.4040403962135315,
"alphanum_fraction": 0.40656566619873047,
"avg_line_length": 29.461538314819336,
"blob_id": "158a5bcf227051d3bd7efa718963fcb440818e9a",
"content_id": "1efde7dfa13f332673bfb26c917fc44a9343c9bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 13,
"path": "/scripts/numa-arch-check.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#--------------------------------------------------------------------------------------------------------------#\n\nexport GNUM_ARC=NO_IDEA;\nnum_o=`numactl --show | grep -w \"cpubind\" | grep -v 'cpubind-' | awk '{gsub(\"cpubind: \", \"\");print}'`;\nif [ $num_o == \"0\" ]; then\n echo \"Numa arch - No\" ;\n export GNUM_ARC=NO;\nelse\n echo \"Numa arch - Yes\" ;\n export GNUM_ARC=YES;\nfi\n"
},
{
"alpha_fraction": 0.6580430269241333,
"alphanum_fraction": 0.6680327653884888,
"avg_line_length": 44.929412841796875,
"blob_id": "9d0e715dd1a41462620064821aa9114e17346cdc",
"content_id": "17477d65975f1e7be919ca80acde366a761066d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3904,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 85,
"path": "/scripts/update-oac-and-oat.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\n#--------------------------------------------------------------------#\n# Update oac and oat for clix school server (gstudio) \n# File name : update-oac-and-oat.sh\n# File version : 1.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : 24-05-2017 16:36:PM\n# Modified by : None\n# Modified on : Not yet\n# Description : This file is used for taking backup of gstudio\n# 1. Update oac and oat for clix school server. This file will update the oac and oat directory with the new updates.\n#\n#\n# Reference\t : https://stackoverflow.com/questions/9980186/how-to-create-a-patch-for-a-whole-directory-to-update-it\n# Information : \n#\n#\n# I just had this same problem - lots of advice on how to half do it. Well, here is what I did to get both the patching and unpatching to work:\n#\n# To Create the Patch File:\n#\n# Put copies of both directories in say /tmp, so we can create the patch file, or if brave, get them side by side - in one directory.\n#\n# Run an appropriate diff on the two directories, old and new:\n#\n# diff -ruN orig/ new/ > file.patch\n# \t\t# -r == recursive, so do subdirectories\n# \t\t# -u == unified style, if your system lacks it or if recipient\n# \t\t# may not have it, use \"-c\"\n# \t\t# -N == treat absent files as empty\n#\n# If a person has the orig/ directory, they can recreate the new one by running patch.\n#\n# To Recreate the new folder from old folder and patch file:\n#\n# Move the patch file to a directory where the orig/ folder exists\n#\n# This folder will get clobbered, so keep a backup of it somewhere, or use a copy.\n#\n# patch -s -p0 < file.patch\n# \t\t# -s == silent except errors\n# \t\t# -p0 == needed to find the proper folder\n#\n# At this point, the orig/ folder contains the new/ content, but still has its old name, so:\n#\n# mv orig/ new/ # if the folder names are different\n#\n#--------------------------------------------------------------------#\n\n# Copy patch files in setup-softwares directory\n#rsync -avzPh /mnt/oac.patch /mnt/oat.patch /home/core/setup-softwares/\n\n# fetch latest patch date and time stamp\nfilename=$(basename $(ls /mnt/update_*.tar.gz | head -n 1));\nupdate_patch=\"${filename%.*.*}\";\nupdate_patch=\"update_patch-beb6af2-r2.1-20171229\"\n\n# Apply patches - change the directory till the patch location and apply the patches\n#docker exec -it gstudio /bin/sh -c \"cp -rv /home/docker/code/${update_patch}/oac-and-oat-updates/oac.patch /softwares/ && cd /softwares && patch -s -p0 < oac.patch\"\n#docker exec -it gstudio /bin/sh -c \"cp -rv /home/docker/code/${update_patch}/oac-and-oat-updates/oat.patch /softwares/ && cd /softwares && patch -s -p0 < oat.patch\"\n\ndocker exec -it gstudio /bin/sh -c \"rsync -avzPh /home/docker/code/${update_patch}/oac-and-oat-updates/oac /home/docker/code/${update_patch}/oac-and-oat-updates/oat /softwares/\"\n#docker exec -it gstudio /bin/sh -c \"rsync -avzPh /home/docker/code/${update_patch}/oac-and-oat-updates/CLIx/datastore/AssetContent/* /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/webapps/CLIx/datastore/repository/AssetContent/\"\n#docker exec -it gstudio /bin/sh -c \"cd /home/docker/code/${update_patch}/oac-and-oat-updates/CLIx/ && mongorestore --drop mongodump \"\n\n\n# Make directories to keep of patches\nsudo mkdir -p /home/core/data/updates_archives/\n\n\n# Copy patch files in old patches directory\nsudo rsync -avzPh /mnt/${update_patch}.tar.gz /home/core/data/updates_archives/\n\n# As the patches are applied we can remove it now (from host system)\n#rm -rf /tmp/*\n#sudo mv /mnt/${update_patch} /home/core/setup-software/oac.patch /home/core/setup-software/oat.patch /tmp/\n#sudo rm -rf /tmp/${update_patch} /tmp/oa*.patch\n\n# As the patches are applied we can remove it now (from docker container)\n#rm -rf /tmp/*\ndocker exec -it gstudio /bin/sh -c \"mv /home/docker/code/${update_patch} /tmp/ && rm -rf /tmp/${update_patch}\"\n\nexit\n"
},
{
"alpha_fraction": 0.6145124435424805,
"alphanum_fraction": 0.6402116417884827,
"avg_line_length": 65.1500015258789,
"blob_id": "c3298ae1c01ca362f8d894d765c1d6abe7d1cdb5",
"content_id": "7fd4701e8cb2c76d6a925a378d32329c0d8d12c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1323,
"license_type": "no_license",
"max_line_length": 279,
"num_lines": 20,
"path": "/scripts/Execute-get_all_users_activity_timestamp_csvs.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\necho \"Script started\"\n# Variable definations\nday_of_week=$(date +%A); # (0..6); 0 is Sunday, 5 is Friday\nday_of_month=$(date +%d); # (1-28 for Feb and 1-30 / 1-31 for the other months); day \nmonth=$(date +%B); # (1-12)Month; 1 is January and 2 is February\nsleeping_time=\"10m\";\n\n# If 2nd and 4th Friday trigger the script inside docker container (after sleeping time to give enough time to start container and services inside it). \n# Else pint date and necessary variables. \nif ([ ${day_of_week} == \"Friday\" -a ${day_of_month} -ge 8 -a ${day_of_month} -le 14 ] || [ ${day_of_week} == \"Friday\" -a ${day_of_month} -ge 22 -a ${day_of_month} -le 28 ]); then # || [ ${day_of_week} == \"Sunday\" -a ${day_of_month} -ge 22 -a ${day_of_month} -le 28 ] || []); then\n echo \"Trigger script (Before sleeping : '$(date)') {Sleeping time: ${sleeping_time}in}\"; \n sleep ${sleeping_time};\n echo \"Trigger script (Before sleeping : '$(date)') {Sleeping time: ${sleeping_time}in}\"; \n docker exec gstudio /bin/sh -c \"echo \\\"execfile('/home/docker/code/gstudio/doc/deployer/get_all_users_activity_timestamp_csvs.py')\\\" |/usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py shell\";\nelse\n echo \"Script not Triggered (Date: '$(date)') : ${day_of_week} : ${month} : ${day_of_month} :\";\nfi\n\nexit 0;\n"
},
{
"alpha_fraction": 0.45632854104042053,
"alphanum_fraction": 0.5578455328941345,
"avg_line_length": 60.71158218383789,
"blob_id": "add3d888968997c1ded9026b591c5923137f7d61",
"content_id": "4897914cedb4f75b5e248d736e153be29c48647d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 26104,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 423,
"path": "/scripts/Backup-script-gstudio-pilot.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\n#--------------------------------------------------------------------#\n# Backup of gstudio \n# File name : Backup-script-mrunal.sh\n# File version : 1.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : 26-06-2014 12:04:AM\n# Modified by : None\n# Modified on : Not yet\n# Description : This file is used for taking backup of gstudio\n# 1. Check for backup directory - If don't exist please create the same.\n#\t\t\t\t\t1.1\tBackup directory : /home/glab/rcs-db-backup/<yyyy-mm-dd> i.e for 26th June 2015 it will be \"/home/glab/rcs-db-backup/2015-06-26\"\n#\t\t\t\t\t1.2 In backup directory we will have 2 sub directories \"rcs\" for rcs repo backup and \"mongodb\" for mongodb database backup (mongodb dump)\n#\t\t\t\t 2. Take backup of rcs via cp (copy -rv) command\n#\t\t\t\t 3. Take backup of mongodb via mongodbdump command\n#\t\t\t\t 4. Create a compressed file (TAR File - tar.bz2)\n#\t\t\t\t 5. Optional - Move the backup directory to /tmp/ after successful creation of tar.bz2 file\n#--------------------------------------------------------------------#\n\n#--------------------------------------------------------------------#\n# Setup log directories and Log files. \n#--------------------------------------------------------------------#\nulimit -c unlimited\n\n# Mrunal : 26-06-2014 12:04:AM : Log file / details related variables\nLOG_DIR=\"$HOME/Backup/LOGS/\";\nDateTime_STAMP=$(date +%Y%m%d-%H%M%S)\nBACKUP_LOG_FILE=\"$HOME/Backup/LOGS/backup-$DateTime_STAMP.log\"; # Mrunal : 26-06-2014 12:04:AM : used for redirecting Standard_output(Normal msg)\nERROR_LOG_FILE=\"$HOME/Backup/LOGS/error-$DateTime_STAMP.log\"; # Mrunal : 26-06-2014 12:04:AM : used for redirecting Standard_error(Error msg)\nappd_date=\"| sed -e \"\"s/^/$(date -R) /\"; # Mrunal : 26-06-2014 12:04:AM : Used for appending date at the satrting of the line in the log file\n\n# Mrunal : 26-06-2014 12:04:AM : Backup related variables\nBACKUP_DIR=\"$HOME/Backup/$DateTime_STAMP\" # Mrunal : 26-06-2014 12:04:AM : Used for Backup directory (full path) \n#BACKUP_DIR_NAME=${BACKUP_DIR#\"/\"} # Mrunal : 26-06-2014 12:04:AM : Used for Backup directory (Just name) \nBACKUP_DIR_NAME=$DateTime_STAMP # Mrunal : 26-06-2014 12:04:AM : Used for Backup directory (Just name) \nRCS_REPO_SOURCE_PATH=\"/home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/ndf/rcs-repo\" # Mrunal : 26-06-2014 12:04:AM : Used for RCS Repo source directory (full path)\n\nDATABASE_NAME=\"meta-mongodb\" # Mrunal : 26-06-2014 12:04:AM : Used for Database name of mongodb\n\nSQLITE_SOURCE_PATH=\"/home/docker/code/gstudio/gnowsys-ndf/\" # Mrunal : 26-06-2014 12:04:AM : Used for RCS Repo source directory (full path)\nSQLITE_SOURCE_NAME=\"example-sqlite3.db\" # Mrunal : 26-06-2014 12:04:AM : Used for RCS Repo source directory (full path)\n\nTAR_FILE_NAME=\"gstudio-$DateTime_STAMP\" # Mrunal : 26-06-2014 12:04:AM : used for creating tar file\n#TAR_FILE_NAME=\"${TAR_FILE_NAME:1:${#TAR_FILE_NAME}-1}\"\n\n#--------------------------------------------------------------------#\n# Check Directory...\n#\tIf directory is present : Display messages\n#\tIf directory is not present : create and display messages\n#--------------------------------------------------------------------#\n\ncheck_dir () {\n if [ -d $1 ]; then\n\techo \"$1 directory is already present.\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"$1 directory is already present.\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\techo \"\"; # Mrunal : 26-06-2014 12:04:AM\n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n else\n\t# Mrunal : 26-06-2014 12:04:AM : Check the existence of the directory\n\tif [ $1 == $LOG_DIR ]; then\n\t echo \"mkdir -p $1\" # Mrunal : 26-06-2014 12:04:AM : Log printing exempted as it is creating LOG directory\n\t `mkdir -p $1` # Mrunal : 26-06-2014 12:04:AM : Create LOG directory\n\telse\n\t echo \"mkdir -p $1\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t `mkdir -p $1` | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t echo \"\" # Mrunal : 26-06-2014 12:04:AM \n\t echo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\tfi\n\techo \"$1 directory is now been created.\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"$1 directory is now been created.\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n fi\n}\n\n#--------------------------------------------------------------------#\n# Backup Script execution starts from here..\n#--------------------------------------------------------------------#\n\n################# BACKUP STARTS FROM HERE #######################\n\n# To check LOG directory and files (If directory is not created do create it with function)\n# Here check_dir is the function and $LOG_DIR is dirctory full path variable defined earlier\n\ncheck_dir $LOG_DIR # Mrunal : 26-06-2014 12:04:AM : Calling check_dir function to check LOG directory existence\n\necho \"++++++++++++++++++++++++++ Backup - Started +++++++++++++++++++++++++++++++++\" # Mrunal : 26-06-2014 12:04:AM \necho \"++++++++++++++++++++++++++ Backup - Started +++++++++++++++++++++++++++++++++\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#------------------------------------------------------------------------------\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"Creating the backup directory\" # Mrunal : 26-06-2014 12:04:AM \necho \"Creating the backup directory\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\ncheck_dir \"$BACKUP_DIR\" # Mrunal : 26-06-2014 12:04:AM \n\n#------------------------------------------------------------------------------\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"Backup of RCS-Repo started\" # Mrunal : 26-06-2014 12:04:AM \necho \"Backup of RCS-Repo started\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\ncheck_dir \"$BACKUP_DIR\" # Mrunal : 26-06-2014 12:04:AM \n\necho \"cp -rv $RCS_REPO_SOURCE_PATH $BACKUP_DIR/\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \ncp -rv $RCS_REPO_SOURCE_PATH $BACKUP_DIR/ | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\nif [ \"$?\" != \"0\" ]; then\n\techo \"Backup (RCS) - Failed. Seems like the source path doesn't exists\"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (RCS) - Failed. Seems like the source path doesn't exists\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \nelse\n\techo \"Backup (RCS) - Successfully completed. \"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (RCS) - Successfully completed. \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \n#\texit 1; \n\n\t# Mrunal : 26-06-2014 12:04:AM : Size\n\techo \"Size of $RCS_REPO_SOURCE_PATH : \" # Mrunal : 26-06-2014 12:04:AM \n\techo \"Size of $RCS_REPO_SOURCE_PATH : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"du -hs $RCS_REPO_SOURCE_PATH | awk '{ print $1 }'\" # Mrunal : 26-06-2014 12:04:AM \n\tdu -hs $RCS_REPO_SOURCE_PATH | awk '{ print $1 }'\n\tdu -hs $RCS_REPO_SOURCE_PATH | awk '{ print $1 }' | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\n\techo \"Size of $BACKUP_DIR/rcs-repo/ : \" # Mrunal : 26-06-2014 12:04:AM \n\techo \"Size of $BACKUP_DIR/rcs-repo/ : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"du -hs $BACKUP_DIR/rcs | awk '{ print $1 }'\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\tdu -hs $BACKUP_DIR/rcs-repo/ | awk '{ print $1 }'\n\tdu -hs $BACKUP_DIR/rcs-repo/ | awk '{ print $1 }' | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrun| awk '{ print $1 }'al : 26-06-2014 12:04:AM \n\n\t\n\t# Mrunal : 26-06-2014 12:04:AM : No of directories\n\t\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\n\techo \"No of directories in $RCS_REPO_SOURCE_PATH : \" # Mrunal : 26-06-2014 12:04:AM \n\techo \"No of directories in $RCS_REPO_SOURCE_PATH : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"find $RCS_REPO_SOURCE_PATH -type d | wc -l \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\tfind $RCS_REPO_SOURCE_PATH -type d | wc -l\n\tfind $RCS_REPO_SOURCE_PATH -type d | wc -l | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\n\techo \"No of directories in $BACKUP_DIR/rcs/ : \" # Mrunal : 26-06-2014 12:04:AM \n\techo \"No of directories in $BACKUP_DIR/rcs/ : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"find $BACKUP_DIR/rcs-repo/ -type d | wc -l \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\tfind $BACKUP_DIR/rcs-repo/ -type d | wc -l\n\tfind $BACKUP_DIR/rcs-repo/ -type d | wc -l | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\t# Mrunal : 26-06-2014 12:04:AM : No of files\n\t\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\techo \"No of files in $RCS_REPO_SOURCE_PATH : \" # Mrunal : 26-06-2014 12:04:AM \n\techo \"No of files in $RCS_REPO_SOURCE_PATH : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"find $RCS_REPO_SOURCE_PATH -type f | wc -l \" # Mrunal : 26-06-2014 12:04:AM \n\tfind $RCS_REPO_SOURCE_PATH -type d | wc -l \n\tfind $RCS_REPO_SOURCE_PATH -type d | wc -l | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\n\techo \"No of files in $BACKUP_DIR/rcs-repo/ : \" # Mrunal : 26-06-2014 12:04:AM \n\techo \"No of files in $BACKUP_DIR/rcs-repo/ : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"find $BACKUP_DIR/rcs-repo/ -type f | wc -l \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\tfind $BACKUP_DIR/rcs-repo/ -type f | wc -l \n\tfind $BACKUP_DIR/rcs-repo/ -type f | wc -l | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\n\techo \"\" # Mrunal : 26-06-2014 12:04:AM \n\techo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\nfi\n\n#------------------------------------------------------------------------------\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"Backup of mongodb started\" # Mrunal : 26-06-2014 12:04:AM \necho \"Backup of mongodb started\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\ncheck_dir \"$BACKUP_DIR/mongodb\" # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n# echo \"pwd\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n# echo \"pwd:\" & pwd\n\n# echo \"\" # Mrunal : 26-06-2014 12:04:AM \n# echo \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"cd $BACKUP_DIR/mongodb\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \necho \"cd $BACKUP_DIR/mongodb\"\ncd $BACKUP_DIR/mongodb\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"mongodump -d $DATABASE_NAME\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \nmongodump -d \"$DATABASE_NAME\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\nif [ \"$?\" == \"0\" ]; then\n\techo \"Backup (Mongodb) - Successfully completed. \"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (Mongodb) - Successfully completed. \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \n\t#exit 1; \nelse\n\techo \"Backup (Mongodb) - Failed. Seems like the database with the name doesn't exists\"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (Mongodb) - Failed. Seems like the database with the name doesn't exists\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \nfi\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n# Mrunal : 26-06-2014 12:04:AM : Size\necho \"Size of $BACKUP_DIR/mongodb/ : \" # Mrunal : 26-06-2014 12:04:AM \necho \"Size of $BACKUP_DIR/mongodb/ : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"du -hs $BACKUP_DIR/mongodb/ | awk '{ print $1 }'\" # Mrunal : 26-06-2014 12:04:AM \ndu -hs $BACKUP_DIR/mongodb/ | awk '{ print $1 }'\ndu -hs $BACKUP_DIR/mongodb/ | awk '{ print $1 }' | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\n\n#------------------------------------------------------------------------------\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"Backup of sqlite(example-sqlite) started\" # Mrunal : 26-06-2014 12:04:AM \necho \"Backup of sqlite(example-sqlite) started\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\ncheck_dir \"$BACKUP_DIR\" # Mrunal : 26-06-2014 12:04:AM \n\necho \"cp -rv $SQLITE_SOURCE_PATH$SQLITE_SOURCE_NAME $BACKUP_DIR/\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \ncp -rv $SQLITE_SOURCE_PATH$SQLITE_SOURCE_NAME $BACKUP_DIR/ | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\nif [ \"$?\" == \"0\" ]; then\n\techo \"Backup (SQLITE) - Successfully completed. \"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (SQLITE) - Successfully completed. \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \n#\texit 1; \nelse\n\techo \"Backup (SQLITE) - Failed. Seems like the source path doesn't exists\"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (SQLITE) - Failed. Seems like the source path doesn't exists\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \nfi\n\n# Mrunal : 26-06-2014 12:04:AM : Size\necho \"Size of $SQLITE_SOURCE_PATH/$SQLITE_SOURCE_NAME : \" # Mrunal : 26-06-2014 12:04:AM \necho \"Size of $SQLITE_SOURCE_PATH/$SQLITE_SOURCE_NAME : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"du -hs $SQLITE_SOURCE_NAME | awk '{ print $1 }'\" # Mrunal : 26-06-2014 12:04:AM \ndu -hs \"$SQLITE_SOURCE_PATH/$SQLITE_SOURCE_NAME\" | awk '{ print $1 }'\ndu -hs \"$SQLITE_SOURCE_PATH/$SQLITE_SOURCE_NAME\" | awk '{ print $1 }' | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\necho \"Size of $BACKUP_DIR/$SQLITE_SOURCE_NAME : \" # Mrunal : 26-06-2014 12:04:AM \necho \"Size of $BACKUP_DIR/$SQLITE_SOURCE_NAME : \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#\techo \"du -hs $BACKUP_DIR/rcs | awk '{ print $1 }'\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \ndu -hs $BACKUP_DIR/$SQLITE_SOURCE_NAME | awk '{ print $1 }'\ndu -hs $BACKUP_DIR/$SQLITE_SOURCE_NAME | awk '{ print $1 }' | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n#------------------------------------------------------------------------------\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"Compression of Backup started\" # Mrunal : 26-06-2014 12:04:AM \necho \"Compression of Backup started\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\n# Mrunal : 26-06-2014 12:04:AM : creating a tar.bz2 file of the backup\n\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"cd $BACKUP_DIR\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \necho \"cd $BACKUP_DIR\"\ncd $BACKUP_DIR\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"cd ../\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \necho \"cd ../\"\ncd ../\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"tar -cvjf $TAR_FILE_NAME.tar.bz2 $BACKUP_DIR_NAME\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \ntar cvjf $TAR_FILE_NAME.tar.bz2 $BACKUP_DIR_NAME | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\t\nif [[ \"$?\" == \"0\" ]]; then\n\techo \"Compression of Backup - Successfully completed. \"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Compression of Backup - Successfully completed. \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \n\n\t# Mrunal :26-06-2014 12:04:AM : Moving the dated backup directory to /tmp as tarball has been created successfully & later remove the same. \n\tmv -v $BACKUP_DIR/rcs /tmp/ | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\trm -rf /tmp/$BACKUP_DIR | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 13-12-2014 12:04:AM \n\nelse\n echo \"$?\";\n\techo \"Compression of Backup - Failed. Ooops seems like some issue, please check logs for more details.\"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Compression of Backup - Failed. Ooops seems like some issue, please check logs for more details.\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \n\t#exit 1; \nfi\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\necho \"\" # Mrunal : 26-06-2014 12:04:AM \necho \"\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho \"++++++++++++++++++++++++++ Backup - Ended +++++++++++++++++++++++++++++++++\" # Mrunal : 26-06-2014 12:04:AM \necho \"++++++++++++++++++++++++++ Backup - Ended +++++++++++++++++++++++++++++++++\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\n\necho\necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\n\necho \"++++++++++++++++++++++++++ scp Backup - Started +++++++++++++++++++++++++++++++++\" # Mrunal : 26-06-2014 12:04:AM \necho \"++++++++++++++++++++++++++ scp Backup - Started +++++++++++++++++++++++++++++++++\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho\necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\nscp -r $TAR_FILE_NAME.tar.bz2 [email protected]:/home/glab/clix-pilot-backup/ | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\nmv $BACKUP_DIR /tmp/\n\nif [ \"$?\" == \"0\" ]; then\n\techo \"Backup (scp) - Successfully completed. \"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (scp) - Successfully completed. \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \n#\texit 1; \nelse\n\techo \"Backup (scp) - Failed. Seems like some issue.\"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (scp) - Failed. Seems like some issue.\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \nfi\n\necho \"++++++++++++++++++++++++++ scp Backup - Ended +++++++++++++++++++++++++++++++++\" # Mrunal : 26-06-2014 12:04:AM \necho \"++++++++++++++++++++++++++ scp Backup - Ended +++++++++++++++++++++++++++++++++\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} # Mrunal : 26-06-2014 12:04:AM \n\necho\necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\necho \"++++++++++++++++++++++++++ Mail sending - Started +++++++++++++++++++++++++++++++++\" # Mrunal : 26-06-2014 12:04:AM \necho \"++++++++++++++++++++++++++ Mail sending - Started +++++++++++++++++++++++++++++++++\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\necho\necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\n#echo \"Start\"\n#echo \"Start\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\necho \"PFA the backup logs for pilot system (158.144.44.198)\" | mail -A ${BACKUP_LOG_FILE} -A ${ERROR_LOG_FILE} -A /root/backup.log -s \"Backup File - pilot\" [email protected]\necho \"PFA the backup logs for pilot system (158.144.44.198)\" | mail -A ${BACKUP_LOG_FILE} -A ${ERROR_LOG_FILE} -A /root/backup.log -s \"Backup File - pilot\" [email protected]\n#echo \"End\"\n#echo \"End\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\nif [ \"$?\" == \"0\" ]; then\n\techo \"Backup (Mail sending) - Successfully completed. \"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (Mail sending) - Successfully completed. \" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \n#\texit 1; \nelse\n\techo \"Backup (Mail sending) - Failed. Seems like some issue.\"; # Mrunal : 26-06-2014 12:04:AM \n\techo \"Backup (Mail sending) - Failed. Seems like some issue.\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE} ; # Mrunal : 26-06-2014 12:04:AM \nfi\n\necho\necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\necho \"++++++++++++++++++++++++++ Mail sending - Ended +++++++++++++++++++++++++++++++++\" # Mrunal : 26-06-2014 12:04:AM \necho \"++++++++++++++++++++++++++ Mail sending - Ended +++++++++++++++++++++++++++++++++\" | sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\n\necho\necho \"\"| sed -e \"s/^/$(date -R) /\" 1>> ${BACKUP_LOG_FILE} 2>> ${ERROR_LOG_FILE}\nexit 0\n"
},
{
"alpha_fraction": 0.5621677041053772,
"alphanum_fraction": 0.5778378844261169,
"avg_line_length": 49.09812927246094,
"blob_id": "751dcf8a84b25781c30287e210541077883a2902",
"content_id": "d285ed5879c2f792809b2e4f38059524b686e99a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 10721,
"license_type": "no_license",
"max_line_length": 291,
"num_lines": 214,
"path": "/scripts/build-docker-image.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n{\n\n#--------------------------------------------------------------------------------------------------------------#\n# File name : build-docker.sh\n# File creation : gnowgi\n# Description :\n# git clone\n# Build Docker-Image via docker build command (using Dockerfile)\n#\n# Last Modification : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM\n# Description : \n# Logs directory check and creation\n# Prerequisites - Checking for OS version and architecture\n# Checking type of user and permission\n# Internet checking\n# Checking wget package\n# Docker application / package checking and installation\n# Creating local copy of replica code via git clone or update via git pull \n# Build Docker-Image via docker build command (using Dockerfile)\n# Verify image creation\n# Start the Docker-container via docker run command (using newly created docker image)\n# Copy host logs(pre-install logs) inside docker container \n# Verify initialization of docker-container and display message of completion\n#--------------------------------------------------------------------------------------------------------------#\n\n#-----------------------------------------------------------------------\n# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM : Old code\n#git clone https://github.com/gnowledge/gstudio.git\n#docker build -t gnowgi/gstudio .\n#-----------------------------------------------------------------------\n\n\n# shell \nsh_c=\"sh -c\"\n\n# bfilename is base file name, extension is file extension and filename is just file name without path and extension\nbfilename=$(basename \"$0\")\nextension=\"${filename##*.}\"\nfilename=\"${filename%.*}\"\n#echo \"Here ----------- :$filename:$extension:$filename:$0:\";\n\n\n# Mrunal : Set HOME variable in deploy.conf\nfile=`readlink -e -f $0`\nfile1=`echo $file | sed -e 's/\\/scripts.*//'` ; \nfile2=`echo $file1 | sed -e 's/\\//\\\\\\\\\\//g'` ;\n# file3=`echo $file1 | sed -e 's:/:\\\\\\/:g'` ;\n#echo \"FIle1 - $file1 and File2 - $file2----------------------------------------\"\nsed -e \"/hHOME/ s/=.*;/=$file2;/\" -i $file1/confs/deploy.conf;\nmore $file1/confs/deploy.conf | grep hHOME; \n\n\n#--------------------------------------------------------------------#\n# Log file details...\n#--------------------------------------------------------------------#\nLOG_DIR=\"$(pwd)/Pre-install_Logs\";\nINSTALL_LOG=\"build-docker-image-$(date +%Y%m%d-%H%M%S).log\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\nINSTALL_LOG_FILE=\"$LOG_DIR/$INSTALL_LOG\"; # Mrunal : Fri Aug 28 17:38:35 IST 2015 : used for redirecting Standard_output(Normal msg)\nHOME=\"\";\nmkdir $LOG_DIR\ntouch $INSTALL_LOG_FILE\n\nlog1=`echo $LOG_DIR | sed -e 's/\\//\\\\\\\\\\//g'` ;\nlog2=`echo $INSTALL_LOG_FILE | sed -e 's/\\//\\\\\\\\\\//g'` ;\n\n\n#echo \"MLOG:$LOG_DIR : $file1/confs/deploy.conf --------------- $filename1\"\nsed -e \"/LOG_DIR/ s/=.*;/=$log1;/\" -i $file1/confs/deploy.conf;\nsed -e \"/INSTALL_LOG/ s/=.*;/=$INSTALL_LOG;/\" -i $file1/confs/deploy.conf;\nsed -e \"/INSTALL_LOG_FILE/ s/=.*;/=$log2;/\" -i $file1/confs/deploy.conf;\n# ---------------- Log files variable def ends here -----------------\n\n\nsource $file1/confs/deploy.conf\nsource $file1/scripts/internet-check.sh\n\n\n\npwd\n#sg docker -c 'pwd'\ndocker ps\necho -e \"\\n\"\nif [[ $? != 0 ]]; then\n echo -e \"\\nCaution-msg : Please check the docker installation Or install docker and restart system to take effect. Try again later after. $?\\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit;\nfi\n\n# if [[ $1 == \"\" ]]; then\n# echo \"Please provide the image name with complete path.(\\path_to_the_dirctory\\file_name)\" ;\n# echo \"(For example '/home/docker/code/school-server_mongokit_v1-20160330-134534' must be the default file name and hit Enter key of Keyboard)\" ;\n# read dock_img_file ;\n# elif [[ -f $dock_img_file ]]; then\n# dock_img_file=$1;\n# elif [[ ! -f $dock_img_file ]]; then\n# echo -e \"Info-msg : Docker image file does not exist. \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} \n# else\n# echo -e \"\\nCaution-msg : Something went wrong.\\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n# exit;\n# fi\n# echo \"Docker image file name entered is $dock_img_file .\" ;\n\n\n\n# Checking for the interent connections\nsource $file1/scripts/internet-check.sh\n\n# We are checking the gstudio repo. If the directory exists then take git pull or else take clone of online repo\necho -e \"\\nInfo-msg : Checking gstudio repo local directory. If the directory exists then take git pull or else take clone of online repo \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n# docker image and container related variables\n_repo_branch=\"\";\n\n\necho -e \"\\nInfo-msg : Please give branch name of online repo \" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\necho -e -n \"\\n${red}Branch name: ${reset}\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nread -t 60 _repo_branch\n\nif [ \"$_repo_branch\" == \"\" ]; then\n echo -e \"\\nInfo-msg : No value provided. So applying default value as replica. \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n _repo_branch=\"replica\";\nfi \n\n# git branch -r | cut -d/ -f2- | grep -v HEAD | grep $_repo_branch > /dev/null 2>&1\n\n# if [ $? -eq 0 ]; then\n# echo -e \"\\nInfo-msg : Value provided is $_repo_branch. \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n# else\n# echo -e \"\\nInfo-msg : Value provided is $_repo_branch. Input is invalid \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n# exit\n# fi\n\nif [ -d \"gstudio\" ]; then\n cd gstudio\n\n git branch -r --list | grep $_repo_branch > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n\techo -e \"\\nInfo-msg : Value is correct hence continuing the procedure. \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n else\n\techo -e \"\\nInfo-msg : No value provided. So applying default value as replica. \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\t_repo_branch=\"replica\";\n fi\t\n\n # Pull the gstudio code from github online repo\n echo -e \"\\nInfo-msg : Pull the gstudio latest code from github online repo and $_repo_branch branch as gstudio directory already exist.\\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n \n git branch --list | grep $_repo_branch > /dev/null 2>&1 # Mrunal : No redirections here please\n if [ $? -eq 0 ]; then # If internet connection is available\n\tgit checkout $_repo_branch # Switch to branch \n else\n\tgit checkout -b $_repo_branch # Create and Switched to branch\n fi\t\n #git pull https://github.com/gnowledge/gstudio.git\n git pull origin $_repo_branch | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE} # Mrunal-uncomment it - kedar ebuddy issue quick fix\n cd ..\nelse \n # Clone the gstudio code from github online repo\n echo -e \"\\nInfo-msg : Clone the gstudio code from github online repo : $_repo_branch \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n git clone https://github.com/gnowledge/gstudio.git -b $_repo_branch | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n\necho -e 'Please select the database for storing the users credentials: \\n 1. sqlite \\n 2. postgresql';\nread _OPTION ;\n\necho -e \"$HOME/scripts/local_settings_changes.sh\"\n\n# Mrunal : for applying comments for sqlite3/postgresql\nbash $HOME/scripts/local_settings_changes.sh $_OPTION $_repo_branch\n\n\n\n# Checking for the interent connections\nsource $file1/scripts/internet-check.sh\n\n\n# Build the docker image (via instructions in Docker file)\necho -e \"\\nInfo-msg : Build the docker image (via instructions in Docker file). This process may take long time {Depends on the internet speed. Approx(45mins - 1.45mins)} . docker build -t $dock_img_name . \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\ndocker build -t $dock_img_name .\n\ndocker images $dock_img_name > /dev/null 2>&1 # Mrunal : No redirections here please\nif [ $? -eq 0 ]; then\n # Docker-Image created successfully\n echo -e \"Info-msg : Docker-Image created successfully. Now initiating the Docker-Container with created docker image($dock_img_name).\\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n bash $HOME/scripts/start-new-container.sh $dock_img_name\nelse\n # Docker-Image creation Failed\n echo -e \"Caution-msg : Docker-Image creation Failed. Please try again. (Error code :$?) \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n exit\nfi\n\n# Mrunal : start the container\n#bash $HOME/scripts/start-new-container.sh $_OPTION $_repo_branch\n\necho -e \"\\nInfo-msg : copy pre-install logs to docker system \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nsudo docker cp ${INSTALL_LOG_FILE} $(docker inspect -f '{{.Id}}' $(docker ps -q --filter=ancestor=$dock_img_name)):/root/DockerLogs/ | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\necho -e \"\\nInfo-msg : Verify the copy process and existence of the file \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nsudo ls /var/lib/docker/aufs/mnt/$(docker inspect -f '{{.Id}}' $(docker ps -q --filter=ancestor=$dock_img_name))/root/DockerLogs/ | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\n\n\ndocker ps -q --filter=ancestor=$dock_img_name > /dev/null 2>&1 # Mrunal : No redirections here please\nif [ $? -eq 0 ]; then\n # Installation completed\n echo -e \"Info-msg : Docker image built successfully. Just enter your ipaddress:port in address bar of your internet browser.\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nelse\n # Installation Failed\n echo -e \"Caution-msg : Docker image building failed. Please try again. (Error code :$?) \\n\" | sed -e \"s/^/$(date +%Y%m%d-%I%M%S%p) $ /\" 2>&1 | tee -a ${INSTALL_LOG_FILE}\nfi\n\n\n# ----------------------------- Shell file code ends here ------------------------------------------\n\n}\n"
},
{
"alpha_fraction": 0.6102752089500427,
"alphanum_fraction": 0.621100902557373,
"avg_line_length": 36.84722137451172,
"blob_id": "01fbc4912d533167a3b05c1718f6d325fac2c2ab",
"content_id": "4e6c9d1455a18fe39907c6f84cc211e288306aee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5450,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 144,
"path": "/confs/local_settings.py.default",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#------------------------------------------ Changing the website instances(Logo and database etc) -------------------------------------------------\nDEBUG = False\nALLOWED_HOSTS = [\"127.0.0.1\", \"*\"]\n\nimport os\n\n# Authentication related and Error reporting emails\nEMAIL_USE_TLS = \"\"\nACCOUNT_ACTIVATION_DAYS = 2\nEMAIL_HOST = 'localhost'\nDEFAULT_FROM_EMAIL = '[email protected]'\nLOGIN_REDIRECT_URL = '/'\nEMAIL_SUBJECT_PREFIX='[clix-ss-error-reporting]'\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\nEMAIL_PORT = \"\"\nADMINS = (\n)\n\n\n# strength of a password\nPASSWORD_MIN_LENGTH = 6\nPASSWORD_COMPLEXITY = { # You can ommit any or all of these for no limit for that particular set\n \"LOWER\": 1, # Lowercase\n# \"UPPER\": 1, # Uppercase\n# \"DIGITS\": 1, # Digits\n}\n\nGSTUDIO_SITE_NAME = \"clix\"\nGSTUDIO_SITE_LANDING_TEMPLATE = \"ndf/landing_page_clix.html\"\nGSTUDIO_SITE_LOGO = \"/static/ndf/css/themes/clix/logo.svg\"\nGSTUDIO_SITE_SECONDARY_LOGO = \"/static/ndf/images/clix-header-icon.png\"\nGSTUDIO_SITE_FAVICON = \"/static/ndf/images/favicon/clix-favicon.png\"\nGSTUDIO_SITE_HOME_PAGE = \"/welcome\"\nGSTUDIO_RESOURCES_EDUCATIONAL_LEVEL = [\"Grade 8\", \"Grade 9\"]\nGSTUDIO_RESOURCES_EDUCATIONAL_SUBJECT = [\"English\", \"Mathematics\", \"Science\", \"Values\", \"Research\", \"Digital Literacy\"]\nGSTUDIO_IMPLICIT_ENROLL = True\nGSTUDIO_USERNAME_SELECTION_WIDGET = True\n\nGSTUDIO_CAPTCHA_VISIBLE = False\n\n# Changes given by dev - Racahna\nGSTUDIO_ENABLE_USER_DASHBOARD = False\nGSTUDIO_SECOND_LEVEL_HEADER = False\nGSTUDIO_MY_GROUPS_IN_HEADER = False\nGSTUDIO_MY_COURSES_IN_HEADER = False\nGSTUDIO_MY_DASHBOARD_IN_HEADER = False\nGSTUDIO_BUDDY_LOGIN = True\nGSTUDIO_OID_HELP = \"5a57065f69602a015794c973\"\n\nGSTUDIO_EDUCATIONAL_SUBJECTS_AS_GROUPS = True\nGSTUDIO_SOCIAL_SHARE_RESOURCE = False\nGSTUDIO_REGISTRATION = False\n\nGSTUDIO_PRIMARY_COURSE_LANGUAGE = u'en'\nGSTUDIO_WORKSPACE_INSTANCE = True\nGSTUDIO_DOC_FOOTER_TEXT = 'Connected Learning Initiative (CLIx), Tata Institute of Social Sciences'\n\n# SESSION_COOKIE_AGE = 30 * 60\n# SESSION_SAVE_EVERY_REQUEST = True\n# SESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\n# Deatils related to database\nMEDIA_ROOT = '/data/media/'\nGSTUDIO_DATA_ROOT = os.path.join('/data/')\n\nGSTUDIO_LOGS_DIRNAME = 'gstudio-logs'\nGSTUDIO_LOGS_DIR_PATH = os.path.join('/data/', GSTUDIO_LOGS_DIRNAME)\n\nRCS_REPO_DIRNAME = 'rcs-repo'\nRCS_REPO_DIR = os.path.join('/data/', RCS_REPO_DIRNAME)\n\nGSTUDIO_MAIL_DIRNAME = 'MailClient'\nGSTUDIO_MAIL_DIR_PATH = os.path.join('/data/', GSTUDIO_MAIL_DIRNAME)\n\n#SQLITE3_DBNAME = 'example-sqlite3.db' # Used for sqlite3 db \n#SQLITE3_DB_PATH = os.path.join('/data/', SQLITE3_DBNAME) # Used for sqlite3 db \n\nMONGODB_DBNAME = 'gstudio-mongodb'\n\n# We have 2 database (postgres and sqlite3) connection details here. Ensure the part of connection details of database not in use is commented. \nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\t\t# Used for postgres db \n 'NAME': 'gstudio_psql',\t\t\t\t\t # Used for postgres db \n 'USER': 'glab',\t\t\t\t\t\t\t # Used for postgres db \n 'PASSWORD':'Gstudi02)1^',\t\t\t\t # Used for postgres db \n 'HOST':'localhost',\t\t\t\t\t\t # Used for postgres db \n 'PORT':'',\t\t\t\t\t\t\t\t # Used for postgres db \n\n# 'ENGINE': 'django.db.backends.sqlite3',\t\t\t\t # Used for sqlite3 db \n# 'NAME': SQLITE3_DB_PATH,\t\t # Used for sqlite3 db \n },\n 'mongodb': {\n 'ENGINE': 'django_mongokit.mongodb',\t\t\t\t # Used for mongo db \n 'NAME': MONGODB_DBNAME,\t\t\t\t\t # Used for mongo db \n 'USER': '',\t\t\t\t\t\t\t # Used for mongo db \n 'PASSWORD': '',\t\t\t\t\t\t\t # Used for mongo db \n 'HOST': '', \t\t\t\t\t\t\t # Used for mongo db \n 'PORT': '',\t\t\t\t\t\t\t # Used for mongo db \n },\n}\n\n#--------------------------------------------- Replication -----------------------------------------------------\n\n# SMTP setting for sending mail (Using gmail SMTP server)\n#EMAIL_USE_TLS = True\n#EMAIL_HOST = 'your_email_id'\n#EMAIL_PORT = 587\n#EMAIL_HOST_USER = 'your_email_id' # mrunal4888@\n#EMAIL_HOST_PASSWORD = 'your_password' \n\n# The following variables are for email id and password for the email account which will be used for receiving SYNCDATA mails\n#SYNCDATA_FETCHING_EMAIL_ID = 'your_email_id'\n#SYNCDATA_FETCHING_EMAIL_ID_PASSWORD = 'your_password'\n#SYNCDATA_FETCHING_IMAP_SERVER_ADDRESS = 'imap_address_of_server'\n\n# Mailing-list ID (ie to this id syncdata mails will be sent)\n#SYNCDATA_SENDING_EMAIL_ID = (\n# 'mailing_list_emil_id',\n#) # Mailing list\n\n#While sending syncdata mails the from field of the mail is set by this variable\n#SYNCDATA_FROM_EMAIL_ID ='Display_name <your_email_id>' \n# sample: 'Gstudio <[email protected]>'\n\n# This is the duration (in secs) at which send_syncdata and fetch_syncdata scripts will be run\n# SYNCDATA_DURATION = 60\n\n#SIGNING KEY Pub. Fill the pub of the key with which to sign syncdata mails here\n#SYNCDATA_KEY_PUB = 'gpg_public_key'\n\n\n# ----------------------------------------------------------------------------\n# following has to be at last\n# just put every thing above it\n\ntry:\n from server_settings import *\n # print \"Server settings applied\"\nexcept:\n # print \"Default settings applied\"\n pass\n\n# ========= nothing to be added below this line ===========\n"
},
{
"alpha_fraction": 0.7599999904632568,
"alphanum_fraction": 0.7599999904632568,
"avg_line_length": 23.83333396911621,
"blob_id": "bc4c640bbbfc97344a03addcecc27e4ec4ebc4e2",
"content_id": "11ce838459248b1bc0c4c35cdd4a9268d8f10fae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 6,
"path": "/scripts/nltk-initialization.py",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport nltk\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('maxent_treebank_pos_tagger')\nnltk.download('punkt')\n\n"
},
{
"alpha_fraction": 0.6328734755516052,
"alphanum_fraction": 0.6773815751075745,
"avg_line_length": 37.81312942504883,
"blob_id": "5bd89ae96781d4d5106bcb773d49af3236cd1573",
"content_id": "d1324f805e7e987f7630db0c47f031cf05f0a9e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7684,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 198,
"path": "/README-update.md",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "CLIx Platform release patch 2.1, version 17.12.r1, Notes (for CG/RJ/TS)\nState: Chhattisgarh, Rajasthan and Telangana \nRelease date: 29 December 2017\n\n# Instruction to apply the update patch\n\n## Download following files:\n1. https://clixplatform.tiss.edu/softwares/clix-schoolserver-updates/update_patch-beb6af2-r2.1-20171229.tar.gz\n2. https://clixplatform.tiss.edu/softwares/clix-schoolserver-updates/update_patch-beb6af2-r2.1-20171229.tar.gz.md5sum\n3. https://clixplatform.tiss.edu/softwares/clix-schoolserver-updates/patch-r2.1.sh\n4. https://clixplatform.tiss.edu/softwares/clix-schoolserver-updates/README-update.md\n\n\n## Copy content (above downloaded files) in Pendrive (not inside any directory please - directly inside the pendrive {root of pendrive})\n\n## Check md5sum for file checksum:\n\tCommand : ``` md5sum update_patch-beb6af2-r2.1-20171229.tar.gz ```\n Ensure that the alphanumeric code (output of the above command) is matching with the content of update_patch-beb6af2-r2.1-20171229.tar.gz.md5sum (which you have downloaded from the server)\n\n## Now we have files in pendrive. Insert pendrive in the School server\n\n## Check the connection of drive with lsblk command\n\tCommand : ``` lsblk ````\n\tExpected output:\n\t\t```\n\t\tcore@clixserver ~ $ lsblk \n\t\tNAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT\n\t\tsda 8:0 0 931.5G 0 disk \n\t\t|-sda4 8:4 0 1G 0 part /usr\n\t\t|-sda2 8:2 0 2M 0 part \n\t\t|-sda9 8:9 0 929.2G 0 part /\n\t\t|-sda7 8:7 0 64M 0 part \n\t\t|-sda3 8:3 0 1G 0 part \n\t\t|-sda1 8:1 0 128M 0 part /boot\n\t\t`-sda6 8:6 0 128M 0 part /usr/share/oem\n\t\tNAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT\n\t\tsdb 8:0 0 931.7G 0 disk \n\t\t`-sdb1 8:1 0 16G 0 part \n\t\t```\n\n## Became root user:\n\tCommand : ``` sudo su ```\n\n## create /mnt/pd:\n\tCommand : ``` mkdir /mnt/pd ```\n\n## Mount the pendrive:\n\tCommand : ``` mount <device> /mnt/pd ```\n\tExample : ``` mount /dev/sdb1 /mnt/pd ```\n\n## Change the directory to /mnt\n\tCommand : ``` cd /mnt/pd ```\n\tExpected output:\n\t\t```\n\t\tcore@clixserver ~ $ cd /mnt/pd\n\t\tcore@clixserver /mnt/pd $ \n\t\t```\n\n## Update command\t\t\t(After the patch is applied it will display the message \"Patch 4 update finished.\")\n\tCommand : ``` bash patch-r2.1.sh ```\n\n## Shutdown command\t\t\t(After the patch is finished. Shutdown the system with following command\")\n\tCommand : ``` shutdown now ```\n\n======================================================================================================# \n\n\n# This file will contain the details about the update patch. This file also includes how the update patch is prepared.\n\n\n## Update-patch number: (One commit no back of gstudio-docker)\n\tSyntax \t\t: \t``` update_patch-<gstudio-docker-repo-commit-no>-r<release-no>-<yyyymmdd> ```\n\tExact Number \t: \t``` update_patch-beb6af2-r2.1-20171229 ``\n\n## Version number: r1\n\n## Commit number\n### gstudio-docker:\t\t\t(https://github.com/mrunal4/gstudio-docker.git - master)\n\tShort\t\t\t:\t``` beb6af2 ```\n\tLong\t\t\t: \t``` beb6af265bd62b6dc34bb0acdfcdcedb6b2bccd0 ```\n\n### gstudio: \t\t\t(https://github.com/gnowledge/gstudio.git - master)\n\tShort\t\t\t:\t``` 225cf7b ```\n\tLong\t\t\t: \t``` 225cf7b5b8c11b916ee33488c5fc2e82ceaffa5d```\n\n### qbank-lite:\t\t\t\t(https://github.com/gnowledge/qbank-lite.git - clixserver)\n\tShort\t\t\t:\t``` 23e2113 ```\n\tLong\t\t\t: \t``` 23e21133c51be72534868e6b1f29f5c38ad217ef ```\n\n### OpenAssessmentsClient:\t(https://github.com/gnowledge/OpenAssessmentsClient.git - clixserver)\n\tShort\t\t\t:\t``` 976b5fc ```\n\tLong\t\t\t: \t``` 976b5fca5a3cf1f058b242b897024e13d67b7c58 ```\n\n\n\n------------------------------------------------------------------------------------------------------\n\nRelease notes: CLIx Platform patch 2.1 \nver 17.12.r1\nRelease date: 29 December 2017\n\nABOUT\nThis release note pertains to the Connected Learning Initiative (CLIx) student platform aka CLIx platform patch 2.1 (henceforth, patch 2.1). \nThis patch should be applied in the states of CG, RJ and TS.\nThe release 2 patch-r2.1 should be applied on top of the CLIx platform having set up by installing platform ver 17.06.r1 followed by ver 17.09.r1. That is,\n\n1) Install the CLIx platform using installer (ver 17.06.r1)\n2) Apply release 2 (patch-r2) (ver 17.09.r1)\n3) Apply patch-r2.1 (ver 17.12.r1)\n\nDOWNLOAD\n1. Patch 2.1 link: https://clixplatform.tiss.edu/softwares/clix-schoolserver-updates/\n2. Open `README-update.md` file as text file and follow instruction given to apply\n\n\nDETAILS\nPlease find below details of fixes and features included in patch 2.1:\nCLIx Platform patch 2.1: RJ/CG/TS\n\n**Gstudio-Docker Features:**\n- Issue of multiple time SSL-certificate exception addition on client machines (browsers) is fixed.\n\n---\n\n\n**Gstudio Features:**\n\n- LMS:\n - Module Enroll:\n - Provision for enrollment at Module level. Now with module level enrollment, all the units under it gets enrolled.\n - Handled use cases with/without (varying) buddies.\n - Provided *Enroll* button on unit card.\n - Activity player header:\n - Added new button *Next Lesson*, *Previous Lesson*.\n - Unit name: provided tooltip, truncating to 25 characters.\n - Tooltips addedd for all actions and CSS updated.\n - Activity Player:\n - Made `Enroll` button more promient from visibility point of view.\n - Lesson state save:\n - In Lessons listing, the state of content tree is saved when you leave the page and is retrieved till the next state change. Includes logout/browser close use cases also\n - Keyboard support binded to traverse Lessons\n- My Desk:\n - Showing `Explore` button if no courses are enrolled.\n- Progress Report:\n - Display unit/group-name\n- Buddy:\n - Buddy selection widget: Arranged buddies to choose in alphabetical order\n- Datatables version updated:\n - Group rows for Quz Responses\n - Export table content to csv and pdf formats\n\n---\n\n**Bug Fixes:**\n- LMS:\n - Enabled browser/default context-menu(right-click event) in course content tree (which was earlier overridden by jqtree)\n - Write Note to open editor on redirection issue resolved.\n- Banner image made full width without distoring aspect ratio.\n- Tools: Policequad, logging issue with buddies fixed.\n- Accessibility issues:\n - Unit header label colors modified\n - Activiy player header, including trunctaing Unit name\n- Login: Trimming username input field.\n- Quiz: bug of not saving nos of attempts resolved.\n\n---\n\n**gstudio/technical updates:**\n- Added `fab` command to backup and restore sql db.\n - `fab backup_psql_data`\n - `fab restore_psql_data:<backup file name with path>`\n- Release Scripts:\n - Created a new folder `doc/release-scripts/` to hold all release scripts.\n - Copied existing release scripts under it.\n - Added a new release script for patch 2.1: `release2-1_nov17.py`\n - This script will update mispelled usernames (both in `Author` and `User` objects).\n - Added a new `user-tokens.json` containg all tokens used for CLIx usernames generation.\n- Updated `bower.json` for `datatables` plugin.\n- Implemented initial phase of Sphinx documentation for gstudio.\n - Created necessary files and folders under `doc/`.\n - Added pip dependencies in `requirements.txt` for same.\n\n---\n\n**Scripts to process after taking pull:**\n- `pip install -r requirements.txt`\n- `bower install`\n- Execute following within python manage.py shell:\n - `execfile('../doc/deployer/release2-1_nov17.py')`\n\n---\n\nGit commit and file summary: https://github.com/gnowledge/gstudio/compare/17.10.r1...master\nIn case of any queries or to report an issue please contact [email protected]\n\n\n\n------------------------------------------------------------------------------------------------------"
},
{
"alpha_fraction": 0.5848193764686584,
"alphanum_fraction": 0.6716963648796082,
"avg_line_length": 48.70454406738281,
"blob_id": "4d1bbe2f0cb77d2e5507447d4365221ebbc094f5",
"content_id": "85193ea3b92d058c298af487594236edbac90339",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2187,
"license_type": "no_license",
"max_line_length": 497,
"num_lines": 44,
"path": "/scripts/Backup-removal-script.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# touch /tmp/gstudio-20160118-1822.tar.bz2 /tmp/gstudio-20160119-1822.tar.bz2 /tmp/gstudio-20160120-1822.tar.bz2 /tmp/gstudio-20160121-1822.tar.bz2 /tmp/gstudio-20160122-1822.tar.bz2 /tmp/gstudio-20160123-1822.tar.bz2 /tmp/gstudio-20160124-1822.tar.bz2 /tmp/gstudio-20160125-1822.tar.bz2 /tmp/gstudio-20160126-1822.tar.bz2 /tmp/gstudio-20160127-1820.tar.bz2 /tmp/gstudio-20160127-1822.tar.bz2 /tmp/gstudio-20160128-1820.tar.bz2 /tmp/gstudio-20160129-1820.tar.bz2 /tmp/gstudio-20160129-1822.tar.bz2\n\nFILE_NAMES=( `ls /tmp/gstudio-*.tar.bz2 ` );\nDateTime_STAMP=$(date +%Y%m%d);\n#echo \"File names are: ${FILE_NAMES[@]} and Timestamp: $DateTime_STAMP\";\n((DateTime_STAMP1 = DateTime_STAMP - 3)); # : Today' s timestamp - No of days \necho \"Today' s Timestamp: $DateTime_STAMP and keep files between : $DateTime_STAMP and $DateTime_STAMP1\";\n\n# : Extract datetime-timestamp and create an array\ndeclare -a DT_FILE_NAMES=();\nfor i in \"${FILE_NAMES[@]}\";\ndo\n# echo \"i = $i\"; # : Testing purpose printing\n SUB=$(echo \"$i\" | cut -d'-' -f 2 );\n DT_FILE_NAMES=( \"${DT_FILE_NAMES[@]}\" \"$SUB\" );\ndone\n\n\n#echo \"Substring : ${DT_FILE_NAMES[@]}\"; # : Testing purpose printing\n#echo \"array : Len of array ${#FILE_NAMES[@]} : Len of first ${#FILE_NAMES} : Array ${FILE_NAMES[@]}\"; # : Testing purpose printing\n#echo \"array : Len of array ${#DT_FILE_NAMES[@]} : Len of first ${#DT_FILE_NAMES} : Array ${DT_FILE_NAMES[@]}\"; # : Testing purpose printing\n\n# : Remove element from array for deletion and create a new / final array for deletion of files\nfor i in \"${DT_FILE_NAMES[@]}\";\ndo\n# echo \"i = $i\"; # : Testing purpose printing\n if [ $i -gt $DateTime_STAMP1 ]; then\n\tFILE_NAMES=(${FILE_NAMES[@]/*$i*/} );\n#\techo \"New array of deletion: ${FILE_NAMES[@]}\";\n#\techo \"Remove element from array for deletion\"; # : Testing purpose printing\n# else\n#\techo \"No action\"; # : Testing purpose printing\n fi\ndone\n\n# : Final printing\necho \"File names selected for deletion: ${FILE_NAMES[@]}\"\necho \"File names before deletion :\" && ls /tmp/gstudio-*.tar.bz2 \nrm -rf \"${FILE_NAMES[@]}\";\necho \"File names after deletion :\" && ls /tmp/gstudio-*.tar.bz2 \necho \"Here:\"\nls /tmp/*$(date +%Y)*\n"
},
{
"alpha_fraction": 0.5988805890083313,
"alphanum_fraction": 0.7350746393203735,
"avg_line_length": 33.934783935546875,
"blob_id": "b2585bf24bc5b44786b10908e95106a94c199b9d",
"content_id": "7ae1063ce5c963ff24d6c0ed4bc778c0d7251119",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1608,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 46,
"path": "/scripts/images-copy.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n# for filename \n\n#filename=$(basename $(ls -dr /home/docker/code/patch-*/ | head -n 1));\n#patch=\"${filename%.*.*}\"; \n#patch=\"patch-7a6c2ac-r5-20190221\"; #earlier patch\n#patch=\"patch-26eaf18-r5-20190320\"; #latest patch\npatch=\"update-patch-c0463c5-r6-20190718\";\n\n# Code to fix the image link started\necho -e \"\\n${cyan}change the directory to /data/ ${reset}\";\ncd /data/;\n\n#code to copy the images inside media started\n\necho -e \"\\n${cyan}Copying the images ${reset}\";\nsudo rsync -avPhz data-updates/05dcae904d485b9750d7fde5f4c05579259ed39e7195525913372f05270ef.png media/6/6/1;\nsudo rsync -avPhz data-updates/7162a4d5f721315b4ca4d9b304ccbacf9c5ac6584d9c5ce80d273cc0d03c4.png media/c/2/8;\nsudo rsync -avPhz data-updates/aa678b02f2c3a95bd6e44be64c6f27bb395e0a9c5960b83fa6670dad29d37.jpg media/c/2/9;\n\n#code to copy the images inside media ended\n\n#changing the directory\n\necho -e \"\\n${cyan}change the directory to /home/docker/code/gstudio/gnowsys-ndf/ ${reset}\";\ncd /home/docker/code/gstudio/gnowsys-ndf;\n\n# running the python file inside the python shell \n\necho -e \"\\n${cyan}running 'fix_absolute_imagelinks.py' file in the python shell\";\necho \"execfile('/home/docker/code/gstudio/doc/release-scripts/fix_absolute_imagelinks.py')\" | python manage.py shell;\n\n#Code to fix the image link ended\n\n"
},
{
"alpha_fraction": 0.520667314529419,
"alphanum_fraction": 0.548804759979248,
"avg_line_length": 28.711111068725586,
"blob_id": "ff6efcbeb94f8ee0c97f6a8084c5a03bfd3a88cd",
"content_id": "d61e7883035135a421726943f8772ff2e1017517",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4016,
"license_type": "no_license",
"max_line_length": 201,
"num_lines": 135,
"path": "/scripts/bulk-User-creation-csv.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\n\n# Mrunal : Set HOME variable in deploy.conf\nfile=`readlink -e -f $0`\nfile1=`echo $file | sed -e 's/\\/scripts.*//'` ; \nfile2=`echo $file1 | sed -e 's/\\//\\\\\\\\\\//g'` ;\n# file3=`echo $file1 | sed -e 's:/:\\\\\\/:g'` ;\nsed -e \"/HOME/ s/=.*;/=$file2;/\" -i $file1/confs/deploy.conf;\nmore $file1/confs/deploy.conf | grep HOME; \n\n\nsource $file1/confs/deploy.conf\n\nOPTION=\"$1\";\nif [[ \"$1\" == \"\" ]]; then\n echo -e 'Please select the type of the users credentials: \\n 1. students \\n 2. teachers \\n';\n read OPTION ;\nfi\n \necho -e \"USER input : $OPTION\";\nif [[ \"$OPTION\" == \"\" ]]; then\n echo \"No input\";\n exit\nelif [[ \"$OPTION\" == \"1\" ]]; then\n echo -e \"\\nGenerating student details\";\n OPTION=\"students\";\nelif [[ \"$OPTION\" == \"2\" ]]; then\n echo -e \"\\nGenerating teachers details\";\n OPTION=\"teachers\";\nelse\n echo \"Invalid input\";\n exit\nfi\n\n# Mrunal : 20160131-2130 : Take user input as School id (small letter of initials of state and school no in 3 digit)\necho \"Please provide the id of School\" ;\necho \"(For example Rajasthan state and school 001 'r001' must be entered and hit Enter key of Keyboard){if need for all school please leave it empty}\" ;\nread sch_id_i ;\necho \"School id entered is $sch_id_i\" ;\n\n# Max no of school as per states\ncl=200; # chhattisgarh\nml=50; # mizoram\nrl=300; # rajasthan\nsl=150; # special interest ( for GNs , testing and clixs team use)\ntl=300; # telangana\n\n# Mrunal : 20160131-2130 : \nif [[ \"${sch_id_i}\" =~ ^[ct,mz,rj,tg,sp]{2}[0-9]{1}$ ]] || [[ \"${sch_id_i}\" =~ ^[ct,mz,rj,tg,sp]{2}[0-9]{2}$ ]] || [[ \"${sch_id_i}\" =~ ^[ct,mz,rj,tg,sp]{2}[0-9]{3}$ ]] || [[ \"${sch_id_i}\" = \"\" ]]; then\n echo \"School id matches the criteria. Continuing the process.\" ;\nelse\n echo \"School id doesn't match the criteria. Hence exiting please restart / re-run the script again.\" ;\n exit;\nfi\n\nfor (( states=1; states<5; states++ ));\ndo\n \n if [[ $states == 1 ]] && [[ \"$sch_id_i\" != \"\" ]]; then\n \tsch_id=\"$sch_id_i\";\n\tnuml=2;\n elif [[ $states == 2 ]] && [[ \"$sch_id_i\" != \"\" ]]; then\n \texit;\n elif [[ $states == 1 ]]; then\n\tnuml=$cl ;\n\tstat=\"ct\";\n elif [[ $states == 2 ]]; then\n\tnuml=$ml ;\n\tstat=\"mz\";\n elif [[ $states == 3 ]]; then\n\tnuml=$rl ;\n\tstat=\"rj\";\n elif [[ $states == 4 ]]; then\n\tnuml=$tl ;\n\tstat=\"tg\";\n elif [[ $states == 5 ]]; then\n\tnuml=$sl ;\n\tstat=\"sp\";\n fi\n \n for (( num=0; num<$numl; num++ ));\n do\n\t# Mrunal : 20160131-2130 : Take username and password from file and add the user. (username as \"username from file\"-\"school id\") \n\t#[ ! -f $INPUT ] && { echo \"$INPUT file not found\"; exit 99; }\n\tINPUT_FILE=\"${OPTION}-credentials-input.csv\" ;\n\tIFS=',' ;\n\tColor=();\n\tAnimal=();\n\tif [[ \"$sch_id_i\" == \"\" ]]; then\n#\t sch_id=\"$stat$(printf \"%03d\" $num)\";\n\t sch_id=\"$stat$num\";\n\tfi\n\ti=0 ;\n\n\twhile read Col1 ;\n\tdo\n\t Col1=${Col1// }; #remove leading spaces\n\t Col1=${Col1%% }; #remove trailing spaces\n\t if [[ \"${Col1}\" != \"\" ]]; then\n\t\tColor[$i]=$Col1;\n#\t\techo \"Color - ${Color[$i]}\" ;\n\t fi\n\t i=$((i+1))\n#\t echo \"i is ${i}\"\n\tdone < $INPUT_FILE\n\t\n#\techo \"Color : ${Color[@]} :\"\n\ti=0;\n\tfor (( c=0; c<${#Color[@]}; c++ ));\n\tdo\n\t\tUname=\"${Color[$c]}-$sch_id\";\n\t\techo \"Username-${i} - $Uname\" ;\n\t\tUPass=$(bash $HOME/scripts/gen-rand-passwd.sh);\n\t\techo \"Password - $UPass\" ;\n\t\tif [ ! -d $HOME/user-details ]; then\n\t\t mkdir $HOME/user-details;\n\t\t echo \"$HOME/user-details\"\n\t\tfi\n\t\techo \"$sch_id;$Uname;$UPass\" >> $HOME/user-details/$sch_id-$OPTION-details.csv;\n\t\tif [[ \"$sch_id_i\" == \"\" ]]; then\n\t\t echo \"$sch_id;$Uname;$UPass\" >> $HOME/user-details/all-$OPTION-details.csv;\n\t\tfi\n\t\t# \techo \"[run] create superuser $1\" ;\n\t\t# \techo \"from django.contrib.auth.models import User ;\n\t\t# if not User.objects.filter(username='$Uname').count():\n\t\t# User.objects.create_user('$Uname', '', '$UPass') \n\t\t# \" | python manage.py shell\n\t\ti=$((i+1))\n\t done\n\tdone\n done\ndone \nexit ;\n \n"
},
{
"alpha_fraction": 0.5205128192901611,
"alphanum_fraction": 0.5743589997291565,
"avg_line_length": 24.866666793823242,
"blob_id": "1ec13fb700a30e7b934a39fb950d515c5499171a",
"content_id": "0a4b5d9fc79ed9d14784f118e17009e574e7570f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 15,
"path": "/scripts/gen-rand-passwd.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Ref : http://www.cyberciti.biz/faq/linux-random-password-generator/\n# http://www.howtogeek.com/howto/30184/10-ways-to-generate-a-random-password-from-the-command-line/\ngenpasswd() {\n tr -dc a-z0-9 < /dev/urandom | head -c ${l} | xargs\n}\n\nif [[ \"$1\" == \"\" || \"$1\" == \"0\" || $1 -lt -25 || $1 -gt 25 ]]; then\n l=6;\nelse\n l=$1;\nfi\n#echo \"l=$1\";\ngenpasswd;\nexit;\n\t\n"
},
{
"alpha_fraction": 0.47830018401145935,
"alphanum_fraction": 0.4972875118255615,
"avg_line_length": 22.04166603088379,
"blob_id": "e9ea7f65f9572a63b0a0e5519385568ae9f04893",
"content_id": "583e522740f977da9d08b37e9bb0782ad2a52cf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1106,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 48,
"path": "/scripts/day-week-year.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nFILE_NAMES=();\n# Code for calculating 7 days --- days\nday=$(date --date=\"today\" +\"%Y%m%d\");\necho -e \"\\n$day ------ Days\";\necho;\ni=0;\nfor (( i=7; i>=1; i-- )); do\n day1=$(date --date=\"$i days ago\" +\"%Y%m%d\");\n echo \"i is $i... $day1\";\n PRESERVE_LIST+=(\"$day1\");\ndone \n\necho;\necho;\n\n# Code for calculating 7 wed --- weeks\nday=$(date --date=\"today\" +\"%Y%m%d\");\necho -e \"\\n$day ------ Weeks\";\necho;\nday=$(date --date=\"this Wed\" +\"%Y%m%d\");\necho \"This Wed: $day\";\necho;\ni=0;\nfor (( i=7; i>=1; i-- )); do\n day1=$(date --date=\"this Wed \"$i\" week ago\" +\"%Y%m%d\");\n echo \"i is $i... $day1\";\n PRESERVE_LIST+=(\"$day1\");\ndone \n\necho;\necho;\n\n# Code for calculating 7 years --- years\nday=$(date --date=\"today\" +\"%Y%m%d\");\necho -e \"\\n$day ------ Years\";\necho;\nday=$(date --date=\"today\" +\"%Y\");\necho \"This year: $day\";\necho;\ni=0;\nfor (( i=7; i>=1; i-- )); do\n day1=$(date --date=\"this year \"$i\" year ago\" +\"%Y\");\n echo \"i is $i... $day1\";\n PRESERVE_LIST+=(\"$day1\");\ndone \necho \"New array of preserving: ${PRESERVE_LIST[@]}\";\n"
},
{
"alpha_fraction": 0.6854731440544128,
"alphanum_fraction": 0.7090182304382324,
"avg_line_length": 46.86170196533203,
"blob_id": "9c007a8e54f62a3a31866f562054e806f6f4c305",
"content_id": "66e971b54621b4761f9fd3011eab0990f9019e17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4502,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 94,
"path": "/scripts/python-files-exec.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#This script is used to execute various \".py\" files in python shell \n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n##code for setting default language started\n\n# get server id (Remove single quote {'} and Remove double quote {\"})\nss_id=`echo $(echo $(more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID | sed 's/.*=//g')) | sed \"s/'//g\" | sed 's/\"//g'`;\n#ss_id=$(more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | sed -n '/.*=/{p;q;}' | sed 's/.*= //g' | sed \"s/'//g\" | sed 's/\"//g')\n\n# Trim leading whitespaces \nss_id=$(echo ${ss_id##*( )});\n# Trim trailing whitespaces \nss_id=$(echo ${ss_id%%*( )});\n\n\necho -e \"\\n${cyan}change the directory to /home/docker/code/gstudio ${reset}\";\ncd /home/docker/code/gstudio/gnowsys-ndf/;\n\n# Variables related to \"set_language\" function (setting default language)\nstate_code=${ss_id:0:2};\nlanguage=\"No Idea\";\nif [ ${state_code} == \"ct\" ] || [ ${state_code} == \"rj\" ]; then\n echo -e \"\\n${cyan}State code is ${state_code}. Hence setting hi as language.${reset}\"\n language=\"hi\";\nelif [ ${state_code} == \"mz\" ]; then\n echo -e \"\\n${cyan}State code is ${state_code}. Hence setting en as language.${reset}\"\n language=\"en\";\nelif [ ${state_code} == \"tg\" ]; then\n echo -e \"\\n${cyan}State code is ${state_code}. Hence setting te as language.${reset}\"\n language=\"te\";\nelse\n echo -e \"\\n${red}Error: Oops something went wrong. Contact system administrator or CLIx technical team - Mumbai. ($directoryname)${reset}\" ;\nfi \nsed -e \"/GSTUDIO_PRIMARY_COURSE_LANGUAGE/ s/=.*/= u'${language}'/\" -i /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/local_settings.py;\n\n\n##code for setting default language ended\n\n\n##code for execution of python files started\n\necho -e \"\\n${cyan} changing the directory to /home/docker/code/gstudio/gnowsys-ndf ${reset}\";\ncd /home/docker/code/gstudio/gnowsys-ndf;\n\n# Executing rectify_faultyassessment_iframetags.py\necho -e \"\\n${cyan}running 'rectify_faultyassessment_iframetags.py' file in the python shell ${reset}\";\necho \"execfile('/home/docker/code/gstudio/doc/deployer/rectify_faultyassessment_iframetags.py')\" | python manage.py shell;\n\n# Executing release2-1_nov17.py\necho -e \"\\n${cyan}running 'release2-1_nov17.py' file in the python shell ${reset}\";\necho \"execfile('/home/docker/code/gstudio/doc/release-scripts/release2-1_nov17.py')\" | python manage.py shell;\n\n# Executing delete_duplicate_authors.py\necho -e \"\\n${cyan}running 'delete_duplicate_authors.py' file in the python shell ${reset}\";\necho \"execfile('/home/docker/code/gstudio/doc/deployer/delete_duplicate_authors.py')\" | python manage.py shell;\n\n#code for syncing sp99, sp100 and cc user-csvs started\n\necho -e \"\\n${cyan}To sync sp99,sp100 and cc user csvs ${reset}\";\npython manage.py sync_users /home/docker/code/user-csvs/sp99_users.csv; #syncing sp99 user csvs\npython manage.py sync_users /home/docker/code/user-csvs/sp100_users.csv; #syncing sp100 user csvs\npython manage.py sync_users /home/docker/code/user-csvs/cc_users.csv; #syncing cc user csvs\n\n#code for syncing sp99, sp100 and cc user-csvs ended\n\n# Executing fix_for_multipletagid_toggler_modf.py\necho -e \"\\n${cyan}running 'fix_for_multipletagid_toggler_modf.py' file in the python shell ${reset}\";\necho \"execfile('/home/docker/code/gstudio/doc/release-scripts/fix_for_multipletagid_toggler_modf.py')\" | python manage.py shell;\n\n# Executing fix_stunted_transcript.py\necho -e \"\\n${cyan}running 'fix_stunted_transcript.py' file in the python shell ${reset}\";\necho \"execfile('/home/docker/code/gstudio/doc/release-scripts/fix_stunted_transcript.py')\" | python manage.py shell;\n\n# Executing fix_505error_of_enotes_upload.py\necho -e \"\\n${cyan}running 'fix_505error_of_enotes_upload.py' file in the python shell ${reset}\";\necho \"execfile('/home/docker/code/gstudio/doc/release-scripts/fix_505error_of_enotes_upload.py')\" | python manage.py shell;\n\n# Executing fix_notabletodraw_painturl.py\necho -e \"\\n${cyan}running 'fix_notabletodraw_painturl.py' file in the python shell ${reset}\";\necho \"execfile('/home/docker/code/gstudio/doc/release-scripts/fix_notabletodraw_painturl.py')\" | python manage.py shell;\n\n##code for execution of python file ended\n\n\n\n"
},
{
"alpha_fraction": 0.7345132827758789,
"alphanum_fraction": 0.7345132827758789,
"avg_line_length": 21.799999237060547,
"blob_id": "f7951e1af19c4ffd578d5cfd1e1f6fd58ac38a9e",
"content_id": "e8a2991b4b132370c54524314d3bed4846b1f1c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 5,
"path": "/scripts/start-qbank.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"[run] run qbank-lite app\"\ncd /home/docker/code/gstudio/gnowsys-ndf/qbank-lite\npython main.py &"
},
{
"alpha_fraction": 0.5664395689964294,
"alphanum_fraction": 0.5760263204574585,
"avg_line_length": 42.52757263183594,
"blob_id": "06c9bc2cb259df5136e79b973f3e925cd6033d40",
"content_id": "d667befc430f3e7f8d0b47e5634550598ef4841c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 29207,
"license_type": "no_license",
"max_line_length": 309,
"num_lines": 671,
"path": "/scripts/mrulogger.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#--------------------------------------------------------------------#\n# Logging and useful functions for bash \n# File name : mrulogger.sh\n# File version : 1.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : Thu Jan 18 00:54:21 IST 2018\n# Modified by : None\n# Modified on : Not yet\n# Description : This file is used for having logging and useful functions for bash.\n# Important : 1. Change / Add / Fill your detail in \"Fill in your details here\" block.\n# 2. Color code are used. Add more to it if needed\n# 3. LOG_LEVEL is set to 5 by default to have everything logged in.\n# 4. Watchout for different parameters for LOG functions\".\n# Future scope : 1. Documentation improvements.\n# 2. Cutdown the on number of lines.\n# 3. Have more no of valiadation code.\n# 4. Handle usage / command line arguments crisply\n# References : 1. \n# 2. http://www.goodmami.org/2011/07/04/Simple-logging-in-BASH-scripts.html\n#--------------------------------------------------------------------#\n\nexec 3>&2 # logging stream (file descriptor 3) defaults to STDERR\n\n################ Colored output details here ################ \n# Following variables are used to store the color codes for displaying the content on terminal\n#Ref: https://misc.flogisoft.com/bash/tip_colors_and_formatting\nreset=\"\\e[0m\" ;\nblack=\"\\e[38;5;0m\" ;\nred=\"\\e[38;5;1m\" ;\ngreen=\"\\e[38;5;2m\" ;\nyellow=\"\\e[38;5;3m\" ;\nblue=\"\\e[38;5;4m\" ;\npurple=\"\\e[38;5;5m\" ;\ncyan=\"\\e[38;5;6m\" ;\ngrey=\"\\e[38;5;245m\" ;\nwhite=\"\\e[38;5;256m\" ;\nbrown=\"\\e[38;5;94m\" ;\nlight_green=\"\\e[38;5;119m\" ;\npink=\"\\e[38;5;199m\" ;\norange=\"\\e[38;5;208m\" ;\nolive=\"\\e[38;5;100m\" ;\norange=\"\\e[38;5;208m\" ;\n#############################################################\n\n\n################# Fill in your details here ################# \n# Levels defined for different type of messages\nCRITICAL_LEVEL=0; \nERROR_LEVEL=1;\nWARNING_LEVEL=2;\nINFO_LEVEL=3;\nFUNCTION_NAME_LEVEL=4;\nSCRIPT_NAME_LEVEL=5;\n\n# Set Type of logs to print on screen as well as log in file\n# Highest numebr will have all types of logs and lowest will have only mentioned\n# if LOG_LEVEL=0, then CRITICAL_LEVEL\n# if LOG_LEVEL=1, then CRITICAL_LEVEL + ERROR_LEVEL\n# if LOG_LEVEL=2, then CRITICAL_LEVEL + ERROR_LEVEL + WARNING_LEVEL\n# if LOG_LEVEL=3, then CRITICAL_LEVEL + ERROR_LEVEL + WARNING_LEVEL + INFO_LEVEL\n# if LOG_LEVEL=4, then CRITICAL_LEVEL + ERROR_LEVEL + WARNING_LEVEL + INFO_LEVEL + FUNCTION_NAME_LEVEL\n# if LOG_LEVEL=5, then CRITICAL_LEVEL + ERROR_LEVEL + WARNING_LEVEL + INFO_LEVEL + FUNCTION_NAME_LEVEL + SCRIPT_NAME_LEVEL\nLOG_LEVEL=5;\n#############################################################\n\n\ndeclare selected_usb_disk; \ndeclare i n;\n\n################ Log file details are here ################## \n# Set the filename\nSCRIPT_LOG=$(pwd)/$0.log;\n# Set the filename\nif [ -f $SCRIPT_LOG ]; then\n echo -e \"${green}$SCRIPT_LOG already exist.${reset}\"; # Use echo here as it is called at startup\nelse\n touch $SCRIPT_LOG;\n echo -e \"${green}$SCRIPT_LOG doesn't exist. Hence created a blank file.${reset}\"; # Use echo here as it is called at startup\nfi\n#############################################################\n\n\n################ Functions written are here ################# \nfunction SCRIPT_ENTRY(){\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`; \n script_name=\"{SCRIPT name:\"\"${script_name%.*}\"\"}\";\n timeAndDate=`date`;\n log_msg=\"\\n${grey}[$timeAndDate] [SCRIPT_ENTRY] [ $script_name $source_filename] ${reset}\";\n LOG_PRINT $SCRIPT_NAME_LEVEL $log_msg;\n}\n\nfunction SCRIPT_EXIT(){\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`;\n script_name=\"{SCRIPT name:\"\"${script_name%.*}\"\"}\";\n log_msg=\"\\n${grey}[$timeAndDate] [SCRIPT_EXIT] [ $script_name $source_filename] ${reset}\";\n LOG_PRINT $SCRIPT_NAME_LEVEL $log_msg;\n}\n\n# --- x ---\n\nfunction FUNCTION_ENTRY(){\n local function_name=\"{FUNCTION name: ${FUNCNAME[1]}}\";\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`;\n script_name=\"{SCRIPT name:\"\"${script_name%.*}\"\"}\";\n timeAndDate=`date`;\n log_msg=\"\\n${grey}[$timeAndDate] [FUNCTION_ENTRY] [ $script_name $function_name $source_filename] ${reset}\";\n LOG_PRINT $FUNCTION_NAME_LEVEL $log_msg;\n}\n\nfunction FUNCTION_EXIT(){\n local function_name=\"; {FUNCTION name: ${FUNCNAME[1]}}\";\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`;\n script_name=\"{SCRIPT name:\"\"${script_name%.*}\"\"}\";\n timeAndDate=`date`;\n log_msg=\"\\n${grey}[$timeAndDate] [FUNCTION_EXIT] [ $script_name $function_name $source_filename] ${reset}\";\n LOG_PRINT $FUNCTION_NAME_LEVEL $log_msg;\n}\n\n# --- x ---\n\nfunction INFO(){\n local function_name=\"; {FUNCTION name: ${FUNCNAME[1]}}\";\n local msg=\"$1\";\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`;\n timeAndDate=`date`;\n if [ \"$3\" == \"\" ] || [ \"$#\" -le \"2\" ]; then\n font_color=\"olive\"; \n else\n eval font_color=$3;\n fi\n# log_msg=\"\\n${cyan}[$timeAndDate] [INFO MSG] [$script_name $function_name] $msg $source_filename ${reset}\" \n log_msg=\"\\n${grey}[$timeAndDate] [INFO MSG] [ $script_name $function_name $source_filename] \\n${!font_color}$msg ${reset}\";\n LOG_PRINT $INFO_LEVEL $log_msg;\n}\n\nfunction WARNING(){\n local function_name=\"; {FUNCTION name: ${FUNCNAME[1]}}\";\n local msg=\"$1\";\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`;\n timeAndDate=`date`;\n log_msg=\"\\n${grey}[$timeAndDate] [WARNING MSG] [ $script_name $function_name $source_filename] \\n${yellow}$msg ${reset}\";\n LOG_PRINT $WARNING_LEVEL $log_msg\n}\n\nfunction ERROR(){\n local function_name=\"; {FUNCTION name: ${FUNCNAME[1]}}\";\n local msg=\"$1\";\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`;\n timeAndDate=`date`;\n log_msg=\"\\n${grey}[$timeAndDate] [ERROR MSG] [ $script_name $function_name $source_filename] \\n${orange}$msg ${reset}\";\n LOG_PRINT $ERROR_LEVEL $log_msg;\n}\n\nfunction CRITICAL(){\n local function_name=\"; {FUNCTION name: ${FUNCNAME[1]}}\";\n local msg=\"$1\";\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n script_name=`basename \"$0\"`;\n timeAndDate=`date`;\n log_msg=\"\\n${grey}[$timeAndDate] [CRITICAL MSG] [ $script_name $function_name $source_filename] \\n${red}$msg ${reset}\";\n LOG_PRINT $CRITICAL_LEVEL $log_msg;\n}\n\n# --- x ---\n\nfunction LOG_PRINT(){\n local function_name=\"{FUNCTION name: ${FUNCNAME[1]}}\";\n if [ \"$LOG_LEVEL\" == \"\" ]; then\n LOG_LEVEL=0;\n fi\n if [ \"$LOG_LEVEL\" -ge \"$1\" ]; then\n # Expand escaped characters, wrap at 70 chars, indent wrapped lines\n# echo -e \"$log_msg\" | fold -w70 -s | sed '2~1s/^/ /' ;\n echo -e \"$same_line_input_echo_argument\" \"$log_msg\" | tee -a $SCRIPT_LOG ;\n# echo -e \"$log_msg\" >&3; #Working - only prints in log file\n fi\n}\n\n# --- x ---\n\nfunction GET_INPUTS(){\n local function_name=\"; {FUNCTION name: ${FUNCNAME[1]}}\";\n local msg=\"$1\";\n local source_filename=\"; {SOURCE SCRIPT name: $2} \";\n CHECK_FOR_EMPTY_SOURCE;\n timeAndDate=`date`;\n if [ \"$3\" == \"\" ] || [ \"$#\" -le \"2\" ]; then\n font_color=\"olive\"; \n else\n eval font_color=$3;\n fi\n log_msg=\"\\n${grey}[$timeAndDate] [GET_INPUTS] [ $script_name $function_name $source_filename] \\n${!font_color}$msg ${brown}\";\n# log_msg=\"\\n${olive}[$timeAndDate] [GET_INPUTS] [ $script_name $function_name $source_filename] \\n$msg ${reset}\";\n same_line_input_echo_argument=\"-n\";\n LOG_PRINT $INFO_LEVEL $log_msg;\n read input\n echo -e ${reset};\n same_line_input_echo_argument=\"\";\n CHECK_COMMAND_STATUS;\n if [ \"${command_status_value}\" == \"Successful\" ]; then\n if [ \"${input}\" == \"\" ]; then\n CRITICAL \"Invalid input. Value entered is '$input'. Please try again.\";\n exit;\n else\n INFO \"Input has some value. Value entered is '$input'\" \"\" \"green\"; \n fi\n else\n ERROR \"Oops something went wrong.\";\n fi\n}\n\n# --- x ---\n\n# Check if source is empty \nfunction CHECK_FOR_EMPTY_SOURCE(){\n if [[ \"$source_filename\" == \"; {SOURCE SCRIPT name: } \" ]]; then\n source_filename=\"\";\n fi\n}\n\n# Check for command execution status (Correct execution returns 0 else the error code. Hence print sucess/failed message with return value{response code})\nfunction CHECK_COMMAND_STATUS(){\n cmd_status_val=$?;\n if [ $cmd_status_val -eq 0 ]; then\n# \tlocal function_name=\"{FUNCTION name: ${FUNCNAME[1]}}\";\n INFO \"Command execution successful. (Execution value: $cmd_status_val)\" \"\" \"green\";\n command_status_value=\"Successful\"; \n else\n ERROR \"Command execution failed. (Execution value: $cmd_status_val)\";\n command_status_value=\"Failed\";\n fi\n# local function_name=\"{FUNCTION name: ${FUNCNAME[1]}}\";\n\n} \n\n# Fetch all connected USB disknames\nfunction GET_USB_DISKNAMES(){\n i=0;\n \n # Store list of disknames in an arrary\n all_disknames=($(lsblk | grep disk | awk '{print $1}'));\n\n # for each disknames\n for ((d=0; d<${#all_disknames[@]}; d++));\n do\n # Find connected disknames from all disk names 1 by 1\n find /dev/disk/by-id/ -lname \"*${all_disknames[$d]}\" | grep usb ; \n if [ $? -eq 0 ]; then\n # Store list of USB disknames in an arrary\n usb_disknames[$i]=\"${all_disknames[$d]}\";\n ((i++));\n fi\n done\n}\n# Check whether \"/\" is mounted from inserted disk or not...\nfunction CHECK_IF_ROOT_MOUNTED_FROM_USB_DISK(){\n # Fetch all connected USB disknames\n GET_USB_DISKNAMES;\n\n # for each USB disknames\n for ((d=0; d<${#usb_disknames[@]}; d++));\n do\n df -h | grep -w / | grep \"${usb_disknames[$d]}\"; \n if [ $? -eq 0 ]; then\n GET_INPUTS \"'\\' (root patition) is mounted from connected USB disk.\\nIdeally it should not be the case.\\nPlease consult before going ahead.\\n\\nDo you want to continue?(Y/N)\";\n eval continue_root_mounted_from_usb_disk=$input;\n if [[ \"$continue_root_mounted_from_usb_disk\" == \"\" ]]; then\n CRITICAL \"No input. Hence exiting. Please try again later.\\n\" \"\";\n exit;\n elif [[ \"$continue_root_mounted_from_usb_disk\" == \"n\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"N\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"no\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"No\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"NO\" ]]; then\n INFO \"Input is '$continue_root_mounted_from_usb_disk'. Hence exiting. Thank you.\\n\" \"\" \"pink\";\n exit;\n elif [[ \"$continue_root_mounted_from_usb_disk\" == \"y\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"Y\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"yes\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"Yes\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"YES\" ]]; then\n INFO \"Input is '$continue_root_mounted_from_usb_disk'. Continuing the process.\\n\" \"\" \"green\";\n break;\n else\n CRITICAL \"Input is '$continue_root_mounted_from_usb_disk'. Oops!!! Something went wrong.\\n\" \"\";\n exit;\n fi\n fi\n done \n}\n\n# Check whether inserted disk is correct or not...\nfunction CHECK_CORRECT_USB_DISK(){\n if [ ! -z \"$selected_usb_disk\" ]; then\n lsblk | grep part | grep \"$selected_usb_disk\";\n if [ $? -eq 0 ]; then\n MOUNT_PARTITION \"/dev/$selected_usb_disk\" \"/mnt/\";\n if [ -d \"/mnt/home/core/data\" ] && [ -d \"/mnt/home/core/setup-software\" ]; then\n correct_usb_disk=\"Found\";\n INFO \"We found that you have already selected ${usb_disknames[$d]}$i partition and also detected required files for installations in the same. Hence continuing the process.\" \"\" \"green\";\n return 0; # To directly exit this function\n fi\n fi\n fi\n\n # Fetch all connected USB disknames\n GET_USB_DISKNAMES;\n\n if [ ${#usb_disknames[@]} -eq 0 ]; then\n for (( n=1; n<=5; n++ )); \n do\n GET_USB_DISKNAMES;\n if [ ${#usb_disknames[@]} -eq 0 ]; then\n sleep 5;\n INFO \"Waiting for the installer (pen drive / portable HDD). (Try $n of 5)\" \"\" \"orange\";\n if [[ $n == 5 ]]; then\n CRITICAL \"Installer (pen drive / portable HDD) not found. Retry installation.\";\n disk_status=\"Not_found\";\n #exit; # For testing comment here\n fi\n elif [ ${#usb_disknames[@]} -gt 0 ]; then\n INFO \"Disk found. Verifying the disk.\";\n disk_status=\"Found\";\n break\n fi\n done\n fi \n correct_usb_disk=\"NotFound\";\n # for each USB disknames\n for ((d=0; d<${#usb_disknames[@]}; d++));\n do\n for i in {1,2,3,9}; \n do\n # Check whether partition exist\n lsblk | grep part | grep ${usb_disknames[$d]}$i;\n if [ $? -eq 0 ]; then\n MOUNT_PARTITION \"/dev/${usb_disknames[$d]}$i\" \"/mnt/\";\n if [ -d \"/mnt/home/core/data\" ] && [ -d \"/mnt/home/core/setup-software\" ]; then\n correct_usb_disk=\"Found\";\n INFO \"We have detected required files for installations in ${usb_disknames[$d]}$i partition of ${usb_disknames[$d]} disk.\" \"\" \"green\";\n GET_INPUTS \"Is this your installer disk?(Y/N)\";\n eval continue_root_mounted_from_usb_disk=$input;\n if [[ \"$continue_root_mounted_from_usb_disk\" == \"\" ]]; then\n CRITICAL \"No input. Hence exiting and continuing with the process.\" \"\";\n elif [[ \"$continue_root_mounted_from_usb_disk\" == \"n\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"N\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"no\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"No\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"NO\" ]]; then\n INFO \"Input is '$continue_root_mounted_from_usb_disk'. Hence exiting. Thank you.\" \"\" \"pink\";\n elif [[ \"$continue_root_mounted_from_usb_disk\" == \"y\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"Y\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"yes\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"Yes\" ]] || [[ \"$continue_root_mounted_from_usb_disk\" == \"YES\" ]]; then\n INFO \"Input is '$continue_root_mounted_from_usb_disk'. Continuing the process.\" \"\" \"green\";\n #UMOUNT_PARTITION \"/dev/${usb_disknames[$d]}$i\" \"/mnt/\";\n correct_usb_disk=\"FoundAndContinue\";\n selected_usb_disk=\"${usb_disknames[$d]}$i\";\n break;\n else\n CRITICAL \"Input is '$continue_root_mounted_from_usb_disk'. Oops!!! Something went wrong.\" \"\";\n fi\n fi\n UMOUNT_PARTITION \"/dev/${usb_disknames[$d]}$i\" \"/mnt/\";\n fi\n done\n done\n if [ \"$correct_usb_disk\" == \"Found\" ]; then\n CRITICAL \"Disk was having required files for installations. But you have denied for continuing the installation process with this disk.\\nHence please insert correct disk and try again later.\";\n correct_usb_disk=\"NotFound\";\n exit;\n elif [ \"$correct_usb_disk\" == \"FoundAndContinue\" ]; then\n echo \"\";\n else\n CRITICAL \"This disk is not disk we are looking for. Please insert correct disk and try again.\";\n correct_usb_disk=\"NotFound\";\n exit;\n fi\n}\n\nfunction MOUNT_PARTITION(){\n UMOUNT_PARTITION \"$1\" \"$2\" \"$3\";\n if [ -z mount_options ]; then\n mount_options=\"\";\n fi\n INFO \"mounting $1 in $2\" \"\" \"cyan\"; \n# sudo mount $mount_options $mount_source $mount_destination;\n sudo mount $3 $1 $2;\n}\nfunction UMOUNT_PARTITION(){\n if [ -z mount_options ]; then\n mount_options=\"\";\n fi\n INFO \"unmounting $1 in $2\" \"\" \"cyan\"; \n# sudo umount $mount_options $mount_source $mount_destination;\n sudo umount $3 $1 $2;\n}\n\nfunction CHECK_FILE_EXISTENCE(){\n local filename=\"$1\";\n if [[ -f $filename ]]; then\n INFO \"File ($filename) already exists.\" \"\" \"green\";\n file_existence_status=\"Present\";\n elif [[ ! -f $filename ]]; then\n INFO \"File ($filename) doesn't exists.\" \"\" \"yellow\";\n file_existence_status=\"Not_Present\";\n if [[ \"$2\" == \"create\" ]]; then\n touch $filename;\n if [ \"$?\" == \"0\" ]; then\n INFO \"File ($filename) doesn't exists. Got signal to create the same. Hence created successfully.\" \"\" \"green\";\n directory_existence_status=\"Present\";\n else\n INFO \"File ($filename) doesn't exists. Got signal to create the same. Unfortunately failed to create.\" \"\" \"yellow\";\n fi\n fi\n else\n ERROR \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ($filename)\" \"\" \"orange\";\n fi \n}\n\nfunction CHECK_DIRECTORY_EXISTENCE(){\n directoryname=\"$1\";\n if [[ -d $directoryname ]]; then\n INFO \"Directory ($directoryname) already exists.\" \"\" \"green\";\n directory_existence_status=\"Present\";\n elif [[ ! -d $directoryname ]]; then\n INFO \"Directory ($directoryname) doesn't exists.\" \"\" \"yellow\";\n directory_existence_status=\"Not_Present\";\n if [[ \"$2\" == \"create\" ]]; then\n mkdir -p $directoryname;\n if [ \"$?\" == \"0\" ]; then\n INFO \"Directory ($directoryname) doesn't exists. Got signal to create the same. Hence created successfully.\" \"\" \"green\";\n directory_existence_status=\"Present\";\n else\n INFO \"Directory ($directoryname) doesn't exists. Got signal to create the same. Unfortunately failed to create.\" \"\" \"yellow\";\n fi\n fi\n else\n ERROR \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ($directoryname)\" \"\" \"orange\";\n fi \n}\n\n# This function will check the type of content (file, directory or No idea)\nfunction TYPE_OF_CONTENT(){\n # Type of content\n # D=Directory\n # F=File\n # L=Soft link\n # N=No idea\n content=\"$1\";\n if [[ -d $content ]]; then\n INFO \"$content is a directory\" \"\" \"green\";\n content_type=\"D\";\n elif [[ -f $content ]]; then\n INFO \"$content is a file\" \"\" \"green\";\n content_type=\"F\";\n elif [[ -L $content ]]; then\n INFO \"$content is a soft link\" \"\" \"green\";\n content_type=\"L\";\n else\n INFO \"$content is not valid\" \"\" \"green\";\n content_type=\"N\";\n #exit 1;\n fi\n}\n\n\n# This function will validate, copy the content increamentally from source to destination. (Can take care partial copy)\n# + Directory existence \n# + Data integrity\n# + In case of partial copy rsync will handle it\nfunction RSYNC_CONTENT(){\n CHECK_DIRECTORY_EXISTENCE \"$destination_path\" \"create\"\n \n if [ \"$directory_existence_status\" == \"Present\" ]; then\n INFO \"Destination directory exists. Hence proceeding to check for the source content.\" \"\" \"green\";\n\t\n TYPE_OF_CONTENT \"$source_path\";\n \tif [ \"$content_type\" == \"F\" ] || [[ \"$source_path\" =~ \".\" ]] ; then\n \n\t CHECK_FILE_EXISTENCE \"$source_path\"\n \n \t if [ \"$file_existence_status\" == \"Present\" ]; then\n INFO \"Source file exists. Hence proceeding to copy the content.\" \"\" \"green\";\n\t \n INFO \"copy clix-server data and necessary files from $source_path to $destination_path. \\nThis may take time, please be patient. (Approx 15-30 min depending on the system performance)\" \"\" \"green\";\n sudo rsync -avPh \"$1\" \"$source_path\" \"$destination_path\"; # For testing comment here\n \t CHECK_COMMAND_STATUS;\n elif [ \"$file_existence_status\" == \"Not_Present\" ]; then\n CRITICAL \"Source file doesn' t exists. Hence skipping the process of copying the content and continuing with the process\";\n exit 1; \t\t # For continuing please comment herer \n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n \telif [ \"$content_type\" == \"D\" ] || [[ \"$source_path\" != *\".\"* ]]; then\n \n\t CHECK_DIRECTORY_EXISTENCE \"$source_path\"\n \n \t if [ \"$directory_existence_status\" == \"Present\" ]; then\n INFO \"Source directory exists. Hence proceeding to copy the content.\" \"\" \"green\";\n\t \n INFO \"copy clix-server data and necessary files from $source_path to $destination_path. \\nThis may take time, please be patient. (Approx 15-30 min depending on the system performance)\" \"\" \"green\";\n sudo rsync -avPh \"$1\" \"$source_path\" \"$destination_path\"; # For testing comment here\n \t CHECK_COMMAND_STATUS;\n elif [ \"$directory_existence_status\" == \"Not_Present\" ]; then\n CRITICAL \"Source directory doesn' t exists. Hence skipping the process of copying the content and continuing with the process\";\n exit 1;\n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n\telse\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n elif [ \"$directory_existence_status\" == \"Not_Present\" ]; then\n CRITICAL \"Destination directory doesn' t exists. Hence skipping the process of copying the content and continuing with the process\";\n exit 1;\n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n}\n\nfunction CHECK_FOR_ALREADY_LOADED_DOCKER_IMAGE(){ #docker_load_validation\n #echo \"docker_image_name:$docker_image_name\" # For testing uncomment here\n docker images | grep $docker_image_grep_name;\n CHECK_COMMAND_STATUS;\n if [ \"${command_status_value}\" == \"Successful\" ]; then\n INFO \"$docker_image_name docker image already loaded.\" \"\" \"green\";\n elif [ \"${command_status_value}\" == \"Failed\" ]; then\n CRITICAL \"$docker_image_name docker image is not loaded.\" \"\";\n LOADING_DOCKER_IMAGE;\n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n}\n\nfunction LOADING_DOCKER_IMAGE(){ #docker_load\n CHECK_FILE_EXISTENCE $docker_image_path;\n if [ \"$file_existence_status\" == \"Present\" ]; then\n INFO \"Loading $docker_image_name docker image\" \"\" \"green\";\n WARNING \"caution : it may take long time\";\n docker load < $docker_image_path; # For testing comment here\n CHECK_COMMAND_STATUS;\n if [ \"${command_status_value}\" == \"Successful\" ]; then\n INFO \"$docker_image_name docker image loaded successfully.\" \"\" \"green\";\n else\n CRITICAL \"$docker_image_name docker image could not be loaded.\\nPlease try again\" \"\";\n exit 1;\n fi\n elif [ \"$file_existence_status\" == \"Not_Present\" ]; then\n CRITICAL \"$docker_image_path docker image tar file could not be located/found.\\nPlease try again\" \"\";\n echo \"0\" > $setup_progress_status_filename;\n exit 1;\n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n}\n\nfunction CHECK_FOR_ALREADY_STARTED_DOCKER_CONTAINER(){\n docker ps -a | grep $docker_container_name; # >> /dev/null\n CHECK_COMMAND_STATUS;\n if [ \"${command_status_value}\" == \"Successful\" ]; then\n INFO \"$docker_image_name docker container already started.\" \"\" \"green\";\n elif [ \"${command_status_value}\" == \"Failed\" ]; then\n STARTING_DOCKER_CONTAINER;\n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n}\n\nfunction STARTING_DOCKER_CONTAINER(){\n INFO \"Running $docker_container_name docker container\" \"\" \"green\";\n WARNING \"caution : it may take long time\";\n# docker run $docker_flag $docker_volumes $docker_ports --name=\"$docker_container_name\" $docker_image_name; # For testing comment here\n CHECK_FILE_EXISTENCE $docker_compose_filename;\n if [ \"$file_existence_status\" == \"Present\" ]; then\n INFO \"Loading $docker_container_name docker container\" \"\" \"green\";\n WARNING \"caution : it may take long time\";\n docker-compose -f $docker_compose_filename up -d;\n CHECK_COMMAND_STATUS;\n if [ \"${command_status_value}\" == \"Successful\" ]; then\n INFO \"$docker_container_name docker container started successfully.\" \"\" \"green\";\n else\n CRITICAL \"$docker_container_name docker container could not be started.\\nPlease try again\" \"\";\n exit 1;\n fi\n elif [ \"$file_existence_status\" == \"Not_Present\" ]; then\n CRITICAL \"$docker_compose_filename docker-compose file could not be located/found.\\nPlease try again\" \"\";\n echo \"0\" > $setup_progress_status_filename;\n exit 1;\n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai.\";\n exit 1;\n fi\n \n}\n\nfunction GET_SETUP_PROGRESS(){\n CHECK_FILE_EXISTENCE $1 \"create\";\n setup_progress_status=$(more $1);\n INFO \"Setup progress status value:${setup_progress_status}\" \"\" \"blue\";\n}\nfunction SET_SETUP_PROGRESS(){\n CHECK_FILE_EXISTENCE $1 \"create\";\n echo -e \"$2\" > $1;\n setup_progress_status=$(more $1);\n INFO \"Setup progress status value:${setup_progress_status}\" \"\" \"blue\";\n}\n\nfunction SET_LANGUAGE(){\n state_code=\"$1\";\n if [ ${state_code} == \"ct\" ] || [ ${state_code} == \"rj\" ]; then\n INFO \"State code is ${state_code}. Hence setting hi as language.\" \"\" \"cyan\"\n language=\"hi\";\n elif [ ${state_code} == \"mz\" ]; then\n INFO \"State code is ${state_code}. Hence setting en as language.\" \"\" \"cyan\"\n language=\"en\";\n elif [ ${state_code} == \"tg\" ]; then\n INFO \"State code is ${state_code}. Hence setting te as language.\" \"\" \"cyan\"\n language=\"te\";\n else\n CRITICAL \"Error: Oops something went wrong. Contact system administator or CLIx technical team - Mumbai. ($directoryname)\";\n exit 1;\n fi \n}\n\n\n#############################################################\n\n\n\n################ Usage block details are here ############### \n# This is for handling command line arguments(in this case OPTIONS) passed while executing the file\n# usage message will be displayed when invalid argument is passed or invalid value for the argument is passed to provide help text.\nusage() {\n echo -e \"${cyan}Usage:\\n $0 [OPTIONS] \\nOptions: \\n -h : display this help message \\n -l : log / verbosity level (0-5) \\n -f FILE : redirect logging to FILE instead of STDERR. Please provide full path${reset}\"\n}\n\nwhile getopts \":hl:loglevel:f:\" opt; do\n case \"$opt\" in\n h) usage; exit 0 ;;\n l) LOG_LEVEL=$OPTARG ;;\n f) SCRIPT_LOG=$OPTARG ;;\n# l) exec 3>>$OPTARG ;;\n \\?) echo -e \"${red}Invalid options: -$OPTARG ${reset}\"; usage; exit 1 ;;\n :) echo -e \"${red}Invalid options argument(value): -$OPTARG requires an argument ${reset}\"; usage; exit 1 ;;\n \\*) echo -e \"${red}Invalid options: $1 ${reset}\"; usage; exit 1 ;;\n esac\ndone\nshift $((OPTIND -1)) # Here i need to come back : https://dustymabe.com/2013/05/17/easy-getopt-for-a-bash-script/ \nwhile getopts \":hl:loglevel:f:\" opt; do\n case \"$opt\" in\n h) usage; exit 0 ;;\n \"l\"|\"loglevel\") LOG_LEVEL=$OPTARG ;;\n f) SCRIPT_LOG=$OPTARG ;;\n# l) exec 3>>$OPTARG ;;\n \\?) echo -e \"${red}Invalid options: -$OPTARG ${reset}\"; usage; exit 1 ;;\n :) echo -e \"${red}Invalid options argument(value): -$OPTARG requires an argument ${reset}\"; usage; exit 1 ;;\n \\*) echo -e \"${red}Invalid options: $1 ${reset}\"; usage; exit 1 ;;\n esac\ndone\nshift $((OPTIND -1)) # This \n\n#############################################################\n"
},
{
"alpha_fraction": 0.6648606657981873,
"alphanum_fraction": 0.7263931632041931,
"avg_line_length": 50.63999938964844,
"blob_id": "6b8740c474160350f12a48c31b5500f762cd96ec",
"content_id": "fecfeb8118afb663e5fc4dd374300d72a208c5b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2584,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 50,
"path": "/scripts/tools-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n\n#filename=$(basename $(ls /mnt/update_*.tar.gz | head -n 1));\n#update_patch=\"${filename%.*.*}\";\n#update_patch=\"update_patch-beb6af2-r2.1-20171229\"\n#patch=$(basename $(tar -tf /mnt/patch-*.tar.gz | head -n 1));\npatch=\"update-patch-c0463c5-r6-20190718\";\n\n#echo -e \"\\n${cyan}copy updated patch from /mnt/${update_patch}/tools-updates/setup-software/* to /home/core/setup-software/ ${reset}\";\n#sudo rsync -avzPh /mnt/${update_patch}/tools-updates/setup-software/* /home/core/setup-software/;\n\n#echo -e \"\\n${cyan}remove /home/core/setup-software/Tools/biomechanic ${reset}\";\n#sudo rm -rf /home/core/setup-software/Tools/biomechanic;\n\n# code to execute script git-offline-tools-update.sh inside the container\necho -e \"\\n${cyan}Update various tools offline ${reset}\";\ndocker exec -it gstudio /bin/sh -c \"/bin/bash /home/docker/code/${patch}/tools-updates/git-offline-tools-update.sh\";\n\n\n#copying the chrome version 69 and firefox version 65.0.1 for windows and ubuntu\necho -e \"\\n${cyan}Copying the chromev69 folder from patch folder to setup-software ${reset}\";\nsudo rsync -avPhz /mnt/update-patch-r6/${patch}/tools-updates/chrome_v69 /home/core/setup-software/i2c-softwares/Browsers/ ;\n\necho -e \"\\n${cyan}Copying the firefox_v65.0.1 folder from patch folder to setup-software ${reset}\";\nsudo rsync -avPhz /mnt/update-patch-r6/${patch}/tools-updates/firefox_v65.0.1 /home/core/setup-software/i2c-softwares/Browsers/ ;\n\n#copying the libre office version 6.1.5.2 for ubuntu\necho -e \"\\n${cyan}Copying the LibreOffice_6.1.5.2_Linux_x86-64_deb folder from patch folder to setup-software ${reset}\";\nsudo rsync -avPhz /mnt/update-patch-r6/${patch}/tools-updates/LibreOffice_6.1.5.2_Linux_x86-64_deb /home/core/setup-software/i2c-softwares/LibreOffice/ ;\n\n#copying the scratch version 1.4.0.6 for ubuntu\necho -e \"\\n${cyan}Copying the scratch_1.4.0.6_dfsg1-5_all.deb package from patch folder to setup-software ${reset}\";\nsudo rsync -avPhz /mnt/update-patch-r6/${patch}/tools-updates/scratch_1.4.0.6_dfsg1-5_all.deb /home/core/setup-software/i2c-softwares/Scratch/ ;\n\n#installing google chrome version 69 \necho -e \"\\n${cyan}Installing Google Chrome Version 69 ${reset}\";\nsudo dpkg -i /home/core/setup-software/i2c-softwares/Browsers/chrome_v69/google-chrome-stable_v69_amd64.deb;\n\n\n"
},
{
"alpha_fraction": 0.6940988898277283,
"alphanum_fraction": 0.6979266405105591,
"avg_line_length": 54,
"blob_id": "668967494a2e9104bb2f9b782b6a6182f3142347",
"content_id": "92cf66ff9746f265bc9422e1338aa846ea074940",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3135,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 57,
"path": "/scripts/patch-rollback/2.1/rollback-patch-2.1-host.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# File to be triggered from host system\n\npatch_no=\"2.1\";\n\n# get server id (Remove single quote {'} and Remove double quote {\"})\nss_id=`docker exec -it gstudio bash -c \"more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID | sed 's/.*=//g' | sed \\\"s/'//g\\\" | sed 's/\\\"//g'\"`\nss_id=`tr -dc '[[:print:]]' <<< \"$ss_id\"`\n\n# get state code\nstate_code=${ss_id:0:2};\n\n# get server code (Remove single quote {'} and Remove double quote {\"})\nss_code=`docker exec -it gstudio bash -c \"more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID_SECONDARY | sed 's/.*=//g' | sed \\\"s/'//g\\\" | sed 's/\\\"//g'\"`\nss_code=`tr -dc '[[:print:]]' <<< \"$ss_code\"`\n\n# get server name (Remove single quote {'} and Remove double quote {\"})\n#ss_name=`docker exec -it gstudio bash -c \"more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_NAME | sed 's/.*=//g' | sed \\\"s/'//g\\\" | sed 's/\\\"//g'\"`\nss_name=`tr -dc '[[:print:]]' <<< \"$ss_name\"`\n\n# dir name\ndir_name=${patch_no}-${ss_id}-$(date +%Y%m%d-%H%M%S);\n\nif [[ ! -d /mnt/${dir_name}/ ]]; then\n mkdir -p /mnt/${dir_name}/\nfi\n\necho -e \"copy rollback-patch-2.1-container.sh from /mnt/ in /home/core/setup-software/\"\nrsync -avzPh /mnt/rollback-patch-2.1-container.sh /mnt/git-log-details.sh /home/core/setup-software/;\n\necho -e \"Trigger/execute git-log-details.sh inside gstudio container (before)\"\ndocker exec -it gstudio /bin/sh -c \"/bin/bash /softwares/git-log-details.sh > /softwares/git-log-details-before.log\";\n\necho -e \"\\nBackup oac-index.html(/home/core/setup-software/oac/index.html) in /mnt/${dir_name}/index-oac-before.html \\n\" \nrsync -avzPh /home/core/setup-software/oac/index.html /mnt/${dir_name}/index-oac-before.html\n\necho -e \"Trigger/execute rollback-patch-2.1-container.sh inside gstudio container\"\ndocker exec -it gstudio /bin/sh -c \"/bin/bash /softwares/rollback-patch-2.1-container.sh\";\n\necho -e \"rename oac and oat dir inside /home/core/setup-software/ to oac-cs.tiss.edu and oat-cs.tiss.edu respectively\"\nmv /home/core/setup-software/oac /home/core/setup-software/oac-cs.tiss.edu;\nmv /home/core/setup-software/oat /home/core/setup-software/oat-cs.tiss.edu;\n\necho -e \"rsync/copy new oac and oat from /mnt/oac-oat to /home/core/setup-software/\"\nrsync -avzPh /mnt/oac-oat/oac oac-oat/oat /home/core/setup-software/;\n\necho -e \"Trigger/execute git-log-details.sh inside gstudio container (after)\"\ndocker exec -it gstudio /bin/sh -c \"/bin/bash /softwares/git-log-details.sh > /softwares/git-log-details-after.log\";\n\necho -e \"\\nBackup oac-index.html(/home/core/setup-software/oac/index.html) in /mnt/${dir_name}/index-oac-after.html \\n\" \nrsync -avzPh /home/core/setup-software/oac/index.html /mnt/${dir_name}/index-oac-after.html\n\n\n\n# move new oac and oat from /home/core/setup-software/ to /mnt/${dir_name}/ \nrsync -avzPh /home/core/setup-software/git-log-details-before.log /home/core/setup-software/git-log-details-after.log /home/core/data/server_settings.py /home/core/setup-software/git-log-details-before.log /mnt/${dir_name}/;\n"
},
{
"alpha_fraction": 0.5740803480148315,
"alphanum_fraction": 0.5788052678108215,
"avg_line_length": 35.58024597167969,
"blob_id": "6c8e1861f73b0c492c892fd076c5c27223060f8d",
"content_id": "38931652c342ba1e6d7f7d6043785a374f0af722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2963,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 81,
"path": "/scripts/install-to-disk.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsource ./mrulogger.sh;\n\nSCRIPT_ENTRY ;\n\nfunction install-to-disk(){\n\n sudo dmesg -n1;\n\n # Mrunal : below part is added by Mrunal as suggested by Nagarjuna\n INFO \"\\n\\n\\n*** CLIx Server Installer - Release Version May 2018 ***\\n\\n\n School server installation \\n\\n\n Note : \\nThis installation is a one-time or infrequently used process.\n It uses a terminal-console which may show many details during installation.\n This is normal. Please let the installation proceed.\\n\" \"$BASH_SOURCE\" \"cyan\";\n \n sleep 5\n \n INFO \"\\n${cyan}Starting clix ${reset}\" \"$BASH_SOURCE\" \"cyan\";\n docker start gstudio\n INFO \"\\n\\n\\n\" \"$BASH_SOURCE\" \"cyan\";\n \n sleep 5\n\n check_disk_h=`lsblk | grep TYPE`\n check_disk_d=`lsblk | grep disk`\n GET_INPUTS \"Name the disk where do you want to install the server?\n (For example 'sda' or 'sdb' or 'sdc')\n {if you are not sure and want to exit simply type enter}\n \\n$check_disk_h \\n$check_disk_d\n \\ndisk name (with help of above information) :\" \"$BASH_SOURCE\" \"olive\";\n\n eval disk_i=$input;\n if [[ \"$disk_i\" == \"\" ]]; then\n\n CRITICAL \"No input. Hence exiting. Please try again later.\" \"$BASH_SOURCE\";\n exit;\n\n else \n\n INFO \"Disk entered is $disk_i \\n\" \"$BASH_SOURCE\" \"green\";\n check_disk=`lsblk | grep $disk_i | grep disk | wc -l`\n if [[ \"$check_disk\" != \"1\" ]]; then\n CRITICAL \"Invalid input. Hence exiting. Please try again later.\" \"$BASH_SOURCE\";\n exit;\n fi\n\n GET_INPUTS \"\\nCaution: \\nIt will format $disk_i disk \\nAre you sure you want to proceed?\\nY/N :\" \"$BASH_SOURCE\" \"red\";\n\n eval part_format_i=$input;\n if [[ \"$part_format_i\" == \"\" ]]; then\n\n CRITICAL \"No input. Hence exiting. Please try again later.\\n\" \"$BASH_SOURCE\";\n exit;\n\n elif [[ \"$part_format_i\" == \"n\" ]] || [[ \"$part_format_i\" == \"N\" ]] || [[ \"$part_format_i\" == \"no\" ]] || [[ \"$part_format_i\" == \"No\" ]] || [[ \"$part_format_i\" == \"NO\" ]]; then\n\n INFO \"Input is '$part_format_i'. Hence exiting. Thank you.\\n\" \"$BASH_SOURCE\" \"pink\";\n exit;\n\n elif [[ \"$part_format_i\" == \"y\" ]] || [[ \"$part_format_i\" == \"Y\" ]] || [[ \"$part_format_i\" == \"yes\" ]] || [[ \"$part_format_i\" == \"Yes\" ]] || [[ \"$part_format_i\" == \"YES\" ]]; then\n\n INFO \"Input is '$part_format_i'. Continuing the process.\\n\" \"$BASH_SOURCE\" \"green\";\n\n else\n\n CRITICAL \"Input is '$part_format_i'. Oops!!! Something went wrong.\\n\" \"$BASH_SOURCE\";\n exit;\n fi\n \n fi\n\n INFO \"Installing coreos, the host operating system to /dev/$disk_i \\n\" \"$BASH_SOURCE\" \"cyan\";\n sudo /home/core/setup-software/coreos/coreos-install -d /dev/$disk_i -C stable -c /home/core/setup-software/coreos/cloud-config.yaml -V 1010.5.0 -b http://localhost/softwares/coreos/mirror ;\n\n}\n\ninstall-to-disk | tee install-to-disk.log;\nSCRIPT_EXIT;\nexit;\n"
},
{
"alpha_fraction": 0.6304070949554443,
"alphanum_fraction": 0.642175555229187,
"avg_line_length": 24.560976028442383,
"blob_id": "f3666a530fb601c104163c6ebc9eaf0309d2f649",
"content_id": "c6c7cb0e4fe05ee70fb777aa72e4cd0f2e2f34f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3144,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 123,
"path": "/scripts/patch-rollback/2.1/git-log-details.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\n#--------------------------------------------------------------------#\n# Backup of gstudio \n# File name : git-log-details.sh\n# File version : 1.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : 26-06-2014 12:04:AM\n# Modified by : None\n# Modified on : Not yet\n# Description : This file is used for taking backup of gstudio\n# 1. Capture git log details\n#\t\t\t\t 2. Take backup of rcs via cp (copy -rv) command\n#\t\t\t\t 3. Take backup of mongodb via mongodbdump command\n#\t\t\t\t 4. Create a compressed file (TAR File - tar.bz2)\n#\t\t\t\t 5. Optional - Move the backup directory to /tmp/ after successful creation of tar.bz2 file\n#--------------------------------------------------------------------#\n\n\n# log commit details - started\necho -e \"\\nDate : $(date) \\n\" \n\necho -e \"\\n\\nDetails of gstudio-docker \\n\" \ncd /home/docker/code/\n\necho -e \"\\ngstudio-docker : $(pwd) \\n\" \necho -e $(pwd)\n\necho -e \"\\ngstudio-docker : git branch \\n\" \ngit branch \n\necho -e \"\\ngstudio-docker : git log - latest 10 commits \\n\" \ngit log -n 10 \n\necho -e \"\\ngstudio-docker : git status \\n\" \ngit status \n\necho -e \"\\ngstudio-docker : git diff \\n\" \ngit diff \n\n\necho -e \"\\n\\nDetails of gstudio \\n\" \ncd /home/docker/code/gstudio/\n\necho -e \"\\ngstudio : $(pwd) \\n\" \necho -e $(pwd)\n\necho -e \"\\ngstudio : git branch \\n\" \ngit branch \n\necho -e \"\\ngstudio : git log - latest 10 commits \\n\" \ngit log -n 10 \n\necho -e \"\\ngstudio : git status \\n\" \ngit status \n\necho -e \"\\ngstudio : git diff \\n\" \ngit diff \n\n\necho -e \"\\n\\nDetails of OpenAssessmentsClient \\n\" \ncd /home/docker/code/OpenAssessmentsClient/\n\necho -e \"\\n'OpenAssessmentsClient' - strategy adopted for updating oac and oat is as follows: \\n\n- Building 'oac' and 'oat' locally from 'gnowledge/OpenAssessmentsClient' with 'clixserver' branch. \\n\n- Testing it locally and packaging oac, oat as a replacement. \\n\n- This decision is taken because building oac and oat is network dependent operation and sometimes build doesn't happen smoothly. \\n\\n\" \n\n# echo -e \"\\nOpenAssessmentsClient : $(pwd) \\n\" \n# echo -e $(pwd)\n\n# echo -e \"\\nOpenAssessmentsClient : git branch \\n\" \n# git branch \n\n# echo -e \"\\nOpenAssessmentsClient : git log - latest 10 commits \\n\" \n# git log -n 10 \n\n# echo -e \"\\nOpenAssessmentsClient : git status \\n\" \n# git status \n\n# echo -e \"\\nOpenAssessmentsClient : git diff \\n\" \n# git diff \n\n\necho -e \"\\n\\nDetails of qbank-lite \\n\" \ncd /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/\n\necho -e \"\\nqbank-lite : $(pwd) \\n\" \necho -e $(pwd)\n\necho -e \"\\nqbank-lite : git branch \\n\" \ngit branch \n\necho -e \"\\nqbank-lite : git log - latest 10 commits \\n\" \ngit log -n 10 \n\necho -e \"\\nqbank-lite : git status \\n\" \ngit status \n\necho -e \"\\nqbank-lite : git diff \\n\" \ngit diff \n\n\n\necho -e \"\\npip freeze \\n\" \npip freeze\n\necho -e \"\\nps aux\\n\"\nps aux\n\necho -e \"\\nFiles inside /data/updates_archives \\n\" \nls -ltrh /data/updates_archives\n\n\necho -e \"\\nDate : $(date) \\n\" \n# log commit details - ended\n\n\n\n\necho -e \"\\nBackup server_settings.py(/home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py) in /data/ \\n\" \nrsync -avzPh /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py /data/\n"
},
{
"alpha_fraction": 0.6155388355255127,
"alphanum_fraction": 0.6611528992652893,
"avg_line_length": 34,
"blob_id": "176cd52185e744bd4a0b14a6b409e8294d929a6e",
"content_id": "74e78794494cd64db7de8db26d54eb7ceeb8e585",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1995,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 57,
"path": "/scripts/ss-gpg-setup.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# # Following variables are used to store the color codes for displaying the content on terminal\n# red=\"\\033[0;91m\" ;\n# green=\"\\033[0;32m\" ;\n# brown=\"\\033[0;33m\" ;\n# blue=\"\\033[0;34m\" ;\n# cyan=\"\\033[0;36m\" ;\n# reset=\"\\033[0m\" ;\n# echo='echo -e' ;\n\n\n\n# $echo \"\\n${blue}Enter your Full name. (Example: Mrunal Nachankar) ${reset}\" ;\n# read install_user_fullname\n\n# $echo \"\\n${blue}Enter your State name. (Example: Maharashtra) ${reset}\" ;\n# read install_state_name\n\n# $echo \"\\n${blue}Enter your School name. (Example: Kendriya Vidyalaya Karanja) ${reset}\" ;\n# read install_school_name\n\n# $echo \"\\n${blue}Enter your School id. (Example: 001, 002 or 0025)${reset}\" ;\n# read install_school_id\n\n# # Assembling full school id\n# install_state_name_init=${install_state_name,,} # ${a,,} - lowercase and ${a^^} - uppercase \n# install_state_name_init=${install_state_name_init:0:3}\n# $echo \"${install_state_name_init}\"\n# install_full_school_id=${install_state_name_init}${install_school_id}\n# $echo \"${install_full_school_id}\"\n\n# $echo \"\\n${blue}Enter your Full School id. (Example: m001, m002 or m0025)${reset}\" ;\n# read install_full_school_id\n\n# $echo \"\\n${blue}Enter your email id. (Example: [email protected]) ${reset}\" ;\n# read install_email_id\n\n# $echo \"\\n${blue} $install_user_fullname : $install_state_name : $install_school_name : $install_school_id : $install_email_id ${reset}\" ;\n\n\n\n# exit\n\n# # Mrunal : 20160131-2130 : Take user input as School id (small letter of initials of state and school no in 3 digit)\n# echo \"Please provide the id of School\" ;\n# echo \"(For example Rajasthan state and school 001 'r001' must be entered and hit Enter key of Keyboard)\" ;\n# read sch_id ;\n# echo \"School id entered is $sch_id\" ;\n\n# # Mrunal : 20160131-2130 : \n# if [[ \"${sch_id}\" =~ [a-z]{1}[0-9]{3} ]]; then\n# echo \"School id doesn't match the criteria. Hence exiting please restart / re-run the script again.\" ;\n# exit ;\n# else\n# echo \"School id matches the criteria. Continuing the process.\" ;\n# fi\n"
},
{
"alpha_fraction": 0.5680751204490662,
"alphanum_fraction": 0.5795187950134277,
"avg_line_length": 46.33333206176758,
"blob_id": "1fdadb3160cc2773fb7712d4913821c79e9ca55f",
"content_id": "d7e97549da7b7d2ca06c34bf218cab802b6bda94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3408,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 72,
"path": "/scripts/local_settings_changes.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Ref : http://stackoverflow.com/questions/17998763/sed-commenting-a-line-matching-a-specific-string-and-that-is-not-already-comme\n# http://unix.stackexchange.com/questions/89913/sed-ignore-line-starting-whitespace-for-match\n#sed -e '/Used for postgres db/ s/^#*/#/' -i $dHOME/confs/local_settings.py.default \n\n# Mrunal : Set dHOME variable in deploy.conf\nfile=`readlink -e -f $0`\nfile1=`echo $file | sed -e 's/\\/scripts.*//'` ; \nfile2=`echo $file1 | sed -e 's/\\//\\\\\\\\\\//g'` ;\n# file3=`echo $file1 | sed -e 's:/:\\\\\\/:g'` ;\nsed -e \"/hHOME/ s/=.*;/=$file2;/\" -i $file1/confs/deploy.conf;\nmore $file1/confs/deploy.conf | grep hHOME; \n\n\nsource $file1/confs/deploy.conf\n\ndHOME=\"/home/docker/code\"\n\nOPTION=\"$1\";\nif [[ \"$1\" == \"\" ]]; then\n echo -e 'Please select the database for storing the users credentials: \\n 1. sqlite \\n 2. postgresql';\n read OPTION ;\nfi\n \necho -e \"USER input : $OPTION\";\nif [[ \"$OPTION\" == \"\" ]]; then\n echo \"No input\";\nelif [[ \"$OPTION\" == \"1\" ]]; then\n echo \"Used for sqlite db\";\n sed -e '/Used for sqlite3 db/ s/^\\s*#*//' -i $hHOME/confs/local_settings.py.default; \n sed -e '/Used for postgres db/ s/^\\s*#*/#/' -i $hHOME/confs/local_settings.py.default; \n #sed -e '/Used for postgres db/ s/^\\s*#*/#/' -i $hHOME/scripts/initialize.sh;\n #sed -e '/echo[[:space:]]\\+\"psql*/,+7 s/^\\s*#*/#/' -i $hHOME/scripts/initialize.sh;\n# sed -e '/echo[[:space:]]\\+\"psql*/,+7 s/^\\s*#*/#/' -i scripts/initialize.sh;\n# sed -e '/echo[[:space:]]\\+\"from[[:space:]]\\+django.contrib.auth.models[[:space:]]\\+import[[:space:]]\\+User*/,+3 s/^\\s*#*//' -i scripts/initialize.sh ;\n# sed -e '/[[:space:]]\\+User.objects.create_superuser*/ s/^\\s*#*/ /' -i scripts/initialize.sh;\nelif [[ \"$OPTION\" == \"2\" ]]; then\n echo \"Used for postgres db\";\n sed -e '/Used for sqlite3 db/ s/^\\s*#*/#/' -i $hHOME/confs/local_settings.py.default; \n sed -e '/Used for postgres db/ s/^\\s*#*/ /' -i $hHOME/confs/local_settings.py.default; \n #sed -e '/Used for postgres db/ s/^\\s*#*/ /' -i $hHOME/scripts/initialize.sh;\n #sed -e '/echo[[:space:]]\\+\"psql*/,+7 s/^\\s*#*/ /' -i $hHOME/scripts/initialize.sh;\n# sed -e '/echo[[:space:]]\\+\"from[[:space:]]\\+django.contrib.auth.models[[:space:]]\\+import[[:space:]]\\+User*/,+3 s/^\\s*#*/#/' -i scripts/initialize.sh;\n# sed -e '/echo[[:space:]]\\+\"psql*/,+7 s/^\\s*#*//' -i scripts/initialize.sh;\n# sed -e '/[[:space:]]\\+User.objects.create_superuser*/ s/^\\s*#*/# /' -i scripts/initialize.sh;\nelse\n echo \"Invalid input\";\nfi\n\n# site_name=\"$2\";\n# if [[ \"$2\" == \"\" ]]; then\n# echo -e 'Please provide the site name of this instance (example clix):';\n# read site_name ;\n# fi\n \n# echo -e \"USER input : $site_name\";\n# if [[ \"$site_name\" == \"\" ]]; then\n# echo \"No input\";\n# elif [[ \"$site_name\" == \"clix\" ]]; then\n# echo \"Used for clix\";\n# sed -e '/GSTUDIO_SITE_NAME/ s/=*/= \"clix\"/' -i $dHOME/confs/local_settings.py.default; \n# sed -e '/GSTUDIO_SITE_LANDING_TEMPLATE/ s/^\\s*#*//' -i $dHOME/confs/local_settings.py.default; \n# elif [[ \"$site_name\" == \"metastudio\" ]]; then\n# echo \"Used for metastudio\";\n# sed -e '/GSTUDIO_SITE_NAME/ s/=*/= \"$site_name\"/' -i $dHOME/confs/local_settings.py.default; \n# sed -e '/GSTUDIO_SITE_LANDING_TEMPLATE/ s/^\\s*#*/#/' -i $dHOME/confs/local_settings.py.default; \n# else\n# echo \"Invalid input\";\n# fi\n\nexit;\n"
},
{
"alpha_fraction": 0.5973496437072754,
"alphanum_fraction": 0.7059123516082764,
"avg_line_length": 39.06122589111328,
"blob_id": "c1456927784749955f1477fc3736ca910598d9e4",
"content_id": "a6039e8536e359672f8afd2d856b2ee68fbecd6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1962,
"license_type": "no_license",
"max_line_length": 362,
"num_lines": 49,
"path": "/scripts/course-import-and-export-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\necho -e \"\\n${cyan}change the directory to /home/docker/code/gstudio ${reset}\"\ncd /home/docker/code/gstudio/gnowsys-ndf/\n\n# echo -e \"\\n${cyan}Purge module : 590c048ea31c74012efaddb4 ${reset}\";\n# python manage.py purge_node 590c048ea31c74012efaddb4 y\n\necho -e \"\\n${cyan}Import module(s) or unit(s)${reset}\";\nmodule_and_units=('pre-clix-survey_2017-09-15_13-13' 'english-beginner_2017-09-15_12-25' 'english-elementary_2017-09-15_12-28' 'basic-astronomy_2017-09-15_12-34' 'linear-equations_2017-09-15_12-53' 'health-and-disease_2017-09-15_12-57' 'sound_2017-09-15_13-03' 'ecosystem_2017-09-15_13-07' 'atomic-structure_2017-09-15_13-09' 'post-clix-survey_2017-09-15_13-15')\nfor m_or_u_name in \"${module_and_units[@]}\"\ndo\n echo -e \"\\n${cyan}Import module/unit: ${m_or_u_name} ${reset}\";\n python manage.py group_import /data/data_export/${m_or_u_name} y y y\n rm /home/docker/code/gstudio/gnowsys-ndf/5*\ndone\n\n# Code - schema update scripts - started\necho -e \"\\n${cyan}change the directory to /home/docker/code/gstudio ${reset}\"\ncd /home/docker/code/gstudio/gnowsys-ndf/\n\necho -e \"\\n${cyan}apply fab update_data ${reset}\"\nfab update_data\n\necho -e \"\\n${cyan}execute python manage.py unit_assessments https://clixserver y ${reset}\"\npython manage.py unit_assessments https://clixserver y\n\necho -e \"\\n${cyan}execute release2_sept17.py ${reset}\"\necho \"execfile('../doc/deployer/release2_sept17.py')\" | python manage.py shell\n\necho -e \"\\n${cyan}updating teacher' s agency type ${reset}\"\npython manage.py teacher_agency_type_update\n\necho -e \"\\n${cyan}collectstatic ${reset}\"\necho yes | python manage.py collectstatic\n\n# Code - schema update scripts - ended"
},
{
"alpha_fraction": 0.6076310276985168,
"alphanum_fraction": 0.6301094889640808,
"avg_line_length": 63.32083511352539,
"blob_id": "5a7491f1ce3c603778e82e8c3403c3a0b96749eb",
"content_id": "b349e7c23f4cbb2777c914be3b3b12d81b8b3236",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 15437,
"license_type": "no_license",
"max_line_length": 616,
"num_lines": 240,
"path": "/Dockerfile",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "# set the base image to Ubuntu 14.04\nFROM ubuntu:14.04 \n\n# file Author / Maintainer to GN \nMAINTAINER [email protected], [email protected], [email protected]\n\n# file adding auser \"docker\" \n# -m : home directory\n# -s : shell (user's login shell)\nRUN useradd -ms /bin/bash docker\n\nRUN su docker\n\n# Setting the path for the installation directory\nRUN export DATE_LOG=`echo $(date \"+%Y%m%d-%H%M%S\")`\nENV LOG_DIR_DOCKER=\"/root/DockerLogs\" \nENV LOG_INSTALL_DOCKER=\"/root/DockerLogs/$(DATE_LOG)-gsd-install.log\"\n\nRUN echo \"PATH=\"$LOG_DIR_DOCKER \\\n && mkdir -p $LOG_DIR_DOCKER \\\n && touch ${LOG_INSTALL_DOCKER} \\\n && ls ${LOG_INSTALL_DOCKER} \\\n && echo \"Logs driectory and file created\" | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \n\n# update the repository sources list\nRUN apt-get update | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# install nginx\n# add all the repositories (nginx, ffmpeg and ffmpeg2theora, nodejs{bower} and yarn)\nRUN apt-get install -y python-software-properties | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && apt-get install -y software-properties-common python-software-properties | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && add-apt-repository -y ppa:nginx/stable | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && add-apt-repository -y ppa:mc3man/trusty-media | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && apt-get install -y curl \\\n && curl -sL https://deb.nodesource.com/setup_7.x | sudo -E bash - \\\n && curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - \\\n && echo \"deb https://dl.yarnpkg.com/debian/ stable main\" | sudo tee /etc/apt/sources.list.d/yarn.list \\\n && apt-get install apt-transport-https -y --force-yes\n\n\n# update the keys and repository sources list\nRUN apt-key update && apt-get update | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# install packages related application and ( ssh, mail {for mailbox / mail relaying}, sqlite and postgresql, ffmpeg and ffmpeg2theora, bash {commands} auto completion and crontab, SCSS/SAAS stylesheets {ruby}, nodejs{bower}, wget, duplicity, rabbitmq-server, yarn and cffi dependency{libssl-dev and libffi-dev}, and lxml dependency {libxml2-dev and libxslt1-dev})\nRUN apt-get install -y dialog net-tools build-essential git python python-pip python-setuptools python-dev rcs emacs24 libjpeg-dev memcached libevent-dev libfreetype6-dev zlib1g-dev nginx supervisor curl g++ make openssh-client openssh-server mailutils postfix sqlite3 libpq-dev postgresql postgresql-contrib python-psycopg2 ffmpeg gstreamer0.10-ffmpeg ffmpeg2theora bash-completion cron ruby ruby-dev nodejs wget duplicity rabbitmq-server yarn libssl-dev libffi-dev libxml2-dev libxslt1-dev | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\nRUN easy_install pip | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# install uwsgi now because it takes a little while\nRUN pip install uwsgi nodeenv | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# for editing SCSS/SAAS stylesheets {compass}\nRUN gem install compass | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# for nodejs{bower}\nRUN npm install -g bower | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n\n# create code directory as it can't find dirctory while coping\nRUN mkdir -p /home/docker/code/ \\\n && mkdir -p /data/db \\\n && mkdir -p /data/rcs-repo \\\n && mkdir -p /data/media \\\n && mkdir -p /data/heartbeats \\\n && mkdir -p /backups/incremental \\\n && mkdir -p /softwares \\\n && echo \"code, data, rcs-repo, media, benchmark-dump, heartbeats, backups and softwares driectories are created\" | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n\n# install our code\n# ADD . /home/docker/code/ \n\n# change the working directory to \"/home/docker/code\"\nWORKDIR \"/home/docker/code/\" \n\n# install gstudio docker code\nRUN git clone https://[email protected]/mrunal4/gstudio-docker.git \nRUN mv gstudio-docker/* . && rm -rf gstudio-docker\n\n# install gstudio app code\nRUN git clone -b dlkit https://[email protected]/gnowledge/gstudio.git \nRUN cd gstudio && git reset --hard $commitid && cd ..\n\nRUN wget http://103.36.84.69:9001/static.tgz\nRUN tar -xvzf static.tgz && rm -rf static.tgz\n\n# RUN pip install to install pip related required packages as per requirements.txt\nRUN pip install -r /home/docker/code/gstudio/requirements.txt | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# Clone dlkit repos at manage.py level\nRUN cd /home/docker/code/gstudio/gnowsys-ndf/ \\\n && git clone https://bitbucket.org/cjshaw/dlkit_runtime.git \\\n && git clone https://bitbucket.org/cjshaw/dlkit.git \\\n && cd dlkit \\\n && git submodule update --init --recursive\n\n# Clone qbank repos at manage.py level\nRUN cd /home/docker/code/gstudio/gnowsys-ndf/ \\\n && git clone https://github.com/gnowledge/qbank-lite.git \\\n && cd qbank-lite \\\n# && pip install -r /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/requirements.txt \\\n && git submodule update --init --recursive \n# && python main.py\n\n# Clone OpenAssessmentsClient repos at gstudio level\nRUN cd /home/docker/code/ \\\n && git clone https://github.com/gnowledge/OpenAssessmentsClient.git \\\n && cd OpenAssessmentsClient \\\n && yarn \\\n && yarn build \\\n && mkdir /softwares/oac /softwares/oat \\\n && cp -av /home/docker/code/OpenAssessmentsClient/build/prod/* /softwares/oac/ \\\n && cp -av /home/docker/code/OpenAssessmentsClient/build/prod/* /softwares/oat/ \\\n && cp -av /softwares/oat/author.html /softwares/oat/index.html\n\n#bower install\nRUN cd /home/docker/code/gstudio/gnowsys-ndf/ \\\n && bower install --allow-root | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# backup all scripts, confs and restore schema dump (restore factory dump)\nRUN mkdir /root/.backup_defaults \\\n && cp -av /home/docker/code/confs /root/.backup_defaults/ \\\n && cp -av /home/docker/code/scripts /root/.backup_defaults/ \\\n && cp -av /home/docker/code/Dockerfile /home/docker/code/AUTHORS /home/docker/code/README.md /root/.backup_defaults/ \\\n && rm -rf /data/db /data/media /data/rcs-repo \\\n && cp -av /home/docker/code/schema_dump/data/* /data/ \\\n && rm -rf /home/docker/code/schema_dump/data\n \n\n# checking the present working directory and copying of configfiles : {copying the '.emacs' file in /root/ } , {copying the 'maintenance' files in /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/ndf/templates/ } , {copying wsgi file to appropriate location}, {copying postgresql conf file to appropriate location} , \nRUN pwd | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && cp -v /home/docker/code/confs/emacs /root/.emacs | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && cp -v /home/docker/code/maintenance/maintenance* /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/ndf/templates/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && cp -v /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/wsgi.py /home/docker/code/gstudio/gnowsys-ndf/wsgi.py | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && rm /etc/postgresql/9.3/main/postgresql.conf | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && cp -v /home/docker/code/confs/local_settings.py.default /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/local_settings.py \\\n && cat /home/docker/code/confs/bash_compl >> /root/.bashrc | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \n \n# setup all the configfiles (nginx, supervisord and postgresql)\nRUN echo \"daemon off;\" >> /etc/nginx/nginx.conf \\\n && rm /etc/nginx/sites-enabled/default | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && mv -v /etc/nginx/nginx.conf /tmp/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/nginx.conf /etc/nginx/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/nginx-app.conf /etc/nginx/sites-enabled/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/supervisor-app.conf /etc/supervisor/conf.d/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/postgresql.conf /etc/postgresql/9.3/main/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && mv -v /etc/mailname /tmp/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/mailname /etc/mailname | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && mv -v /etc/postfix/main.cf /tmp/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/main.cf /etc/postfix/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/sasl_passwd /etc/postfix/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && ln -s /home/docker/code/confs/sasl_passwd.db /etc/postfix/ | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \n\nRUN echo \"Size of deb packages files : \" du -hs /var/cache/apt/archives/ \\\n && ls -ltr /var/cache/apt/archives/ \\\n && du -hs /var/cache/apt/archives/* \\\n && rm -rf /var/cache/apt/archives/*.deb\n\n\n# mongodb installation\n\n# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added\nRUN groupadd -r mongodb \\\n && useradd -r -g mongodb mongodb | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\nRUN apt-get update \\\n && apt-get install -y --no-install-recommends \\\n ca-certificates \\\n numactl | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n# && rm -rf /var/lib/apt/lists/*\n\n# grab gosu for easy step-down from root\n#RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n#RUN curl -o /usr/local/bin/gosu -SL \"https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture)\" \\\n# && curl -o /usr/local/bin/gosu.asc -SL \"https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture).asc\" \\\n# && gpg --verify /usr/local/bin/gosu.asc \\\n# && rm /usr/local/bin/gosu.asc \\\n# && chmod +x /usr/local/bin/gosu | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# gpg: key 7F0CEB10: public key \"Richard Kreuter <[email protected]>\" imported\nRUN apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys 492EAFE8CD016A07919F1D2B9ECBEC467F0CEB10 | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# Mrunal : 12012016 : Changed the source for mongodb from \"http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.1 multiverse\" to \"http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.2 multiverse\" \n# install uwsgi now because it takes a little while\nRUN echo \"deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.2 multiverse\" > /etc/apt/sources.list.d/mongodb-org.list | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# Mrunal : 12012016 : Get the stable packages instead of unstable eg.- \"mongodb-org-unstable\" to \"mongodb-org\" \n# Mrunal : 01022016 : Install specific version of mongodb : apt-get install mongodb-org-unstable=3.1.5 mongodb-org-unstable-shell=3.1.5 mongodb-org-unstable-mongos=3.1.5 mongodb-org-unstable-tools=3.1.5\n \nRUN set -x \\\n && apt-get update \\\n && apt-get install -y --force-yes\\\n mongodb-org=3.2.4 \\\n mongodb-org-server=3.2.4 \\\n mongodb-org-shell=3.2.4 \\\n mongodb-org-mongos=3.2.4 \\\n mongodb-org-tools=3.2.4 \\\n# && rm -rf /var/lib/apt/lists/* \\\n && rm -rf /var/lib/mongodb \\\n && mv /etc/mongod.conf /etc/mongod.conf.orig | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\nRUN mkdir -p /data/db && chown -R mongodb:mongodb /data/db | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\nVOLUME /data \nVOLUME /backups\nVOLUME /softwares\nVOLUME /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/webapps/CLIx/datastore/repository/AssetContent\n\n# Exposing the ports - {ssh} , {smtp} , {https (with ssl)} , {http} , {for developement user (Developer)} , {smtpd command (to test mail machanism locally)} , {imap : gnowledge} , {smtp : gnowledge} , {mongodb}\nRUN echo \"EXPOSE 22 25 443 80 8000 1025 143 587\" | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\nEXPOSE 22 25 443 80 8000 1025 143 587 27017 8080 5555\n\n# {change this line for your timezone} and {nltk installation and building search base} and {creation of schema_files directory}\nRUN ln -sf /usr/share/zoneinfo/Asia/Kolkata /etc/localtime | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && pip install -U pyyaml nltk | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && /home/docker/code/scripts/nltk-initialization.py | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER} \\\n && mkdir /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/ndf/management/commands/schema_files | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n\n# Setting data directory for \"collectstatic\"\nRUN cd /data/ \\\n && wget http://clixplatform.tiss.edu/softwares/initial-schema-dump-clixplatform/clean-data-kedar-mrunal-20170324-clixplatform.tar.bz2 \\\n && tar xvjf clean-data-kedar-mrunal-20170324-clixplatform.tar.bz2 \n\n# Restore default postgres database\nRUN /etc/init.d/postgresql start \\\n && echo \"psql -f /data/pgdata.sql;\" | sudo su - postgres \\\n && crontab /home/docker/code/confs/mycron \\\n && rm /etc/rc.local \\\n && /etc/init.d/rc.local start \\\n && ln -s /home/docker/code/confs/rc.local /etc/ \\\n && /etc/init.d/postgresql start\n\n# fab update\nRUN cd /home/docker/code/gstudio/gnowsys-ndf/ \\\n && pip install flower \\\n && pip install Fabric==1.12.0\n\n# Perform collectstatic\nRUN echo yes | /usr/bin/python /home/docker/code/gstudio/gnowsys-ndf/manage.py collectstatic\n\nCMD /home/docker/code/scripts/initialize.sh | sed -e \"s/^/$(date +%Y%m%d-%H%M%S) : /\" 2>&1 | tee -a ${LOG_INSTALL_DOCKER}\n"
},
{
"alpha_fraction": 0.5678040385246277,
"alphanum_fraction": 0.6524496674537659,
"avg_line_length": 38.58260726928711,
"blob_id": "7f1973c5232d5e812423ea18b9bc05e41676ee81",
"content_id": "546e9ad80ec9508a85bfb1ca8b185e0a96149e0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4572,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 115,
"path": "/scripts/patch/patch-r6.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#Created On March 2019\n#This script file \"patch-r6.sh\" should be run using \"bash patch-r6.sh\"\n\n#Following are the git commit numbers of repositories that have been modified:\n\n # Inside the code folder--\n # gstudio-docker= \"c0463c5a55a92629edbca0ee34b8c7cbba161d3a\"\n # gstudio= \"235eb4e9818a333e132595664838a22c8e4b4d11\"\n # qbank-gstudio-scripts=\"002cbdff2e596f2dab6f0b2c14efd5a561b3dae0\"\n\n # Inside the tools folder--\n # Astroamer_Planet_Trek_Activity= \"39f1cc7cb1cd567f69477b20830bf7f9b89be4d6\"\n # Motions_of_the_Moon_Animation= \"c4feb76dbb784e6c4bb86c76c02d3ff73353d107\"\n # Rotation_of_Earth_Animation= \"2c070c5b54550b519ed4429f82cc9c7358e38b18\"\n # food_sharing_tool= \"dfa73432caedb121c567f2f3484bc7d8cfd39f1a\"\n # sugarizer= \"239b9d716c0b0686f1389610cea31b91e58665c2\"\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\n#ss_id stores the school ID \nss_id=`echo $(echo $(more /home/core/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID | sed 's/.*=//g')) | sed \"s/'//g\" | sed 's/\"//g'`;\n\n# Trim leading whitespaces \nss_id=$(echo ${ss_id##*( )});\n# Trim trailing whitespaces \nss_id=$(echo ${ss_id%%*( )});\n\nfunction apply_patch() {\n #For filename\n\n #patch=$(basename $(tar -tf /mnt/patch-*.tar.gz | head -n 1));\n #patch=\"patch-7a6c2ac-r5-20190221\"; #earlier patch\n #patch=\"patch-26eaf18-r5-20190320\"; #latest patch\n patch=\"update-patch-c0463c5-r6-20190718\";\n\n echo -e \"\\n${cyan}The folder named \"$patch\" is present which has all the code, data and tools updates ${reset}\";\n\n echo -e \"\\n${cyan}Changing the Directory ${reset}\";\n cd /mnt/update-patch-r6/;\n \n #code for checking the md5sum for file checksum started\n\n echo \"\\n${cyan}File integrity check started. Please wait\";\n \n #if [ -f patch-26eaf18-r5-20190320.tar.gz.md5sum ]; then\n if [ -f ${patch}.tar.gz.md5sum ]; then\n #echo \"\\n${cyan}patch-26eaf18-r5-20190320.tar.gz.md5sum file is present ${reset}\"\n echo \"\\n${cyan} ${patch}.tar.gz.md5sum file is present ${reset}\"\n\n #if md5sum --status -c patch-26eaf18-r5-20190320.tar.gz.md5sum && echo OK; then\n if md5sum --status -c ${patch}.tar.gz.md5sum && echo OK; then\n echo \"\\n${cyan}File integrity check was successful ${reset}\";\n else\n #echo \"\\n${cyan}File integrity Check Failed. Please download the correct patch-26eaf18-r5-20190320.tar.gz file ${reset}\";\n echo \"\\n${cyan}File integrity Check Failed. Please download the correct ${patch}.tar.gz file ${reset}\";\n exit;\n fi\n else\n #echo \"\\n${cyan}patch-26eaf18-r5-20190320.tar.gz.md5sum file not present. Please Download It ${reset}\";\n echo \"\\n${cyan} ${patch}.tar.gz.md5sum file not present. Please Download It ${reset}\";\n exit;\n fi\n\n #code for checking the md5sum for file checksum ended\n \n echo -e \"\\n${cyan}Extracting the files ${reset}\";\n sudo tar -xvzf ${patch}.tar.gz;\n\n #code for triggering various scripts to update code,data and tools started\n\n echo -e \"\\n${cyan}Applying code updates ${reset}\";\n \tsudo bash ${patch}/code-updates/code-update.sh;\n\n #echo -e \"\\n${cyan}Step 1 successfully completed ${reset}\";\n\n \techo -e \"\\n${cyan}Applying data updates ${reset}\";\n \tsudo bash ${patch}/data-updates/data-update.sh;\n\n #echo -e \"\\n${cyan}Step 2 successfully completed ${reset}\";\n\n \techo -e \"\\n${cyan}Applying tools updates ${reset}\";\n \tsudo bash ${patch}/tools-updates/tools-update.sh;\n\n #echo -e \"\\n${cyan}Step 3 successfully completed ${reset}\"; \n\n #code for triggering various scripts to update code,data and tools ended \n\n #code to remove the patch folder created by extraction of tar file\n echo -e \"\\n${cyan}Removing the ${patch} folder ${reset}\";\n cd /mnt/update-patch-r6/ ;\n sudo rm -rf ${patch}; \n \n echo -e \"\\n${cyan}Congratulations!!! Patch is applied successfully. ${reset}\";\n\n} \n\napply_patch | tee update-patch-r6-${ss_id}.log && rsync -avPhz update-patch-r6-${ss_id}.log /home/core/; # logs are stored in this file\n\n\n#for restarting the system\n\necho -e \"\\n${cyan}School server will be restarting in 10 sec ${reset}\";\nsleep 10;\nsudo reboot;\n\n\n \n \n\n\n \n"
},
{
"alpha_fraction": 0.6061791181564331,
"alphanum_fraction": 0.6093077659606934,
"avg_line_length": 37.164180755615234,
"blob_id": "8527e67935f22244d915b991151bbfb1f54803eb",
"content_id": "df9e6ccf19b8b9fb23fc41df38e15eb38df8ca13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5114,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 134,
"path": "/scripts/cleaning-up-qbank-hardcoded-file-names.py",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "# This script is meant to fix a bug introduced by the CLIx authoring\n# tool. Somehow 5 image links are being reported as hardcoded in\n# the field, so they cannot load (still point to MIT production\n# authoring site). This script:\n# 1) iterates through all items,\n# 2) checks all question text / choices / answer feedbacks\n# 3) replaces any instance of \"https://qbank-clix.mit.edu...\" with\n# the appropriate \"AssetContent:<guid>\" source instead.\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\n\nDLKIT_DATABASES = ['assessment']\nCOLLECTIONS = ['Item']\n\nMC = MongoClient()\n\nMEDIA_REGEX = re.compile('(https://)')\nCURRENT_ID = ''\nITEMS_UPDATED = []\nNUM_ITEMS = 0\n\ndef grab_matching_asset(source_url, files_map):\n \"\"\"\n From a given files_map, like {\n <guid>: {\n assetId: \"\",\n assetContentId: \"\",\n assetContentTypeId: \"\"\n }\n }\n We need to return the <guid> corresponding to the\n assetContentId at the end of the source_url.\n\n source_url format will be\n https://hostname/api/v1/repositories/:id/assets/:id/contents/:id/stream\n\n We need that last :id to match to the files_map\n \"\"\"\n if not source_url.endswith('/stream'):\n return None\n asset_content_id = source_url.split('/')[-2]\n for label, asset_map in files_map.items():\n if asset_map['assetContentId'] == asset_content_id:\n return label\n return None\n\ndef replace_url_in_display_text(potential_display_text, dict_files_map):\n if ('text' in potential_display_text and\n potential_display_text['text'] is not None and\n 'https://' in potential_display_text['text']):\n # assume markup? Wrap this in case it's not a valid XML doc\n # with a single parent object\n wrapped_text = '<wrapper>{0}</wrapper'.format(potential_display_text['text'])\n soup = BeautifulSoup(wrapped_text, 'xml')\n media_file_elements = soup.find_all(src=MEDIA_REGEX)\n media_file_elements += soup.find_all(data=MEDIA_REGEX)\n for media_file_element in media_file_elements:\n print('Found one invalid source in item {0}'.format(CURRENT_ID))\n if 'src' in media_file_element.attrs:\n media_key = 'src'\n else:\n media_key = 'data'\n\n invalid_source = media_file_element[media_key]\n\n # Now need to find the label corresponding to this source\n if not invalid_source.endswith('/stream'):\n # this points somewhere else on the Internet? Not a qbank URL\n continue\n\n media_label = grab_matching_asset(invalid_source, dict_files_map)\n\n if media_label is not None:\n print('Replaced with AssetContent:{0}'.format(media_label))\n if str(CURRENT_ID) not in ITEMS_UPDATED:\n ITEMS_UPDATED.append(str(CURRENT_ID))\n media_file_element[media_key] = 'AssetContent:{0}'.format(media_label)\n potential_display_text['text'] = soup.wrapper.renderContents().decode('utf-8')\n else:\n for new_key, value in potential_display_text.items():\n if isinstance(value, list):\n new_files_map = dict_files_map\n if 'fileIds' in potential_display_text:\n new_files_map = potential_display_text['fileIds']\n potential_display_text[new_key] = check_list_children(value, new_files_map)\n return potential_display_text\n\ndef check_list_children(potential_text_list, list_files_map):\n updated_list = []\n for child in potential_text_list:\n if isinstance(child, dict):\n files_map = list_files_map\n if 'fileIds' in child:\n files_map = child['fileIds']\n updated_list.append(replace_url_in_display_text(child, files_map))\n elif isinstance(child, list):\n updated_list.append(check_list_children(child, list_files_map))\n else:\n updated_list.append(child)\n return updated_list\n\n\nfor db_name in DLKIT_DATABASES:\n db = MC[db_name]\n\n collections = db.collection_names()\n for collection_name in collections:\n if collection_name not in COLLECTIONS:\n continue\n\n collection = MC[db_name][collection_name]\n\n for document in collection.find():\n NUM_ITEMS += 1\n CURRENT_ID = str(document['_id'])\n original_files_map = {}\n if 'fileIds' in document:\n original_files_map = document['fileIds']\n\n for key, data in document.items():\n if isinstance(data, dict):\n document[key] = replace_url_in_display_text(data, original_files_map)\n elif isinstance(data, list):\n document[key] = check_list_children(data, original_files_map)\n collection.save(document)\n\nprint('Updated {0} items out of {1}'.format(len(ITEMS_UPDATED),\n str(NUM_ITEMS)))\nprint ITEMS_UPDATED\nprint('Done!')\n"
},
{
"alpha_fraction": 0.34719911217689514,
"alphanum_fraction": 0.34803107380867004,
"avg_line_length": 39.9886360168457,
"blob_id": "1a9a63959b59b2c6615627d9230063e30b9c754e",
"content_id": "e37170d93be8e0e39952fb69f31d23aadae0a8f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3606,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 88,
"path": "/scripts/system-heartbeat.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nfunction system-heartbeat() {\n\n echo -e \"Info-msg : Checking directory existence. \\n\" \n if [[ -d /data/system-heartbeat/ ]]; then\n echo \"/data/system-heartbeat/ directory exists\"\n else\n mkdir -p /data/system-heartbeat/\n echo \"/data/system-heartbeat/ directory created successfully\"\n fi\n\n echo -e \"Info-msg : hostname of server (hostname). \\n\" \n hostname \n\n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n # echo -e \"Info-msg : School server name (from server settings). \\n\" \n # more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py \n\n # echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : internal ip addresses of the system (ip address). \\n\" \n intfs=($(ifconfig -a | sed 's/[ \\t].*//;/^\\(\\)$/d'));\n ips=($(ifconfig -a | awk '/inet addr/{print substr($2,6)}'));\n for (( a=0; a<${#ips[@]}; a++ ));\n do\n if [[ ${intfs[$a]} != \"lo\" ]]; then\n echo \"${intfs[$a]}- ${ips[$a]}\" ;\n fi\n done\n\n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : external ip addresses of the system (public ip address). \\n\" \n ips_pub=$(wget http://ipecho.net/plain -O - -q ; echo)\n echo \"$ips_pub\" \n \n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : RAM details (free -h). \\n\" \n free -h \n \n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : HDD details (df -h). \\n\" \n df -h \n \n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : Size description in '/data/'. \\n\" \n du -hs /data/*\n\n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : No of directories and files in '/data/media/'. \\n\" \n echo -e \"No of directories : $(find /data/media/ -type d | wc -l) \\n\"\n echo -e \"No of files : $(find /data/media/ -type f | wc -l) \\n\"\n\n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : uptime (uptime). \\n\" \n uptime\n\n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n echo -e \"Info-msg : current process (uptime). \\n\" \n ps aux\n\n echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n # echo -e \"Info-msg : docker images (docker images). \\n\" \n # docker images\n\n # echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n # echo -e \"Info-msg : docker all containers (docker ps -a). \\n\" \n # docker ps -a\n\n # echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n\n # echo -e \"Info-msg : docker running containers (docker ps). \\n\" \n # docker ps\n \n # echo -e \" \\n\\n------------------------------------------------------------------------------------ \\n\\n\" \n} \n\nsystem-heartbeat | tee /data/system-heartbeat/system-heartbeat-$(date +\\%Y\\%m\\%d-\\%H\\%M\\%S).log;"
},
{
"alpha_fraction": 0.8214285969734192,
"alphanum_fraction": 0.8214285969734192,
"avg_line_length": 27,
"blob_id": "e9d2f700191390b40097ae517dac30f2561f654b",
"content_id": "c06cca28c785fa139a93d181d887101eacc05e9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/confs/drop_database.sql",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "drop database gstudio_psql;\n"
},
{
"alpha_fraction": 0.7010309100151062,
"alphanum_fraction": 0.7010309100151062,
"avg_line_length": 24.866666793823242,
"blob_id": "e1ea379d5baafe32ec8e57c87883410a3f514718",
"content_id": "d653c26fd374f056be4433d7f0331b921b0da7e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 15,
"path": "/scripts/restore_schema_dump.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"[run] restoring schema_dump starting\"\n\necho \"[run] restore rcs_repo\"\ncp -v /home/docker/code/schema_dump/rcs_repo/* /data/rcs_repo/\n\necho \"[run] restore mongo data\"\ncp -v /home/docker/code/schema_dump/db/* /data/db/\n\necho \"[run] restore mongo data\"\necho \"psql\n\tpostgres_restore < postgres_dump.sql;\" | sudo su - postgres ; \n\necho \"[run] restoring schema_dump ending\"\n"
},
{
"alpha_fraction": 0.5765822529792786,
"alphanum_fraction": 0.6696202754974365,
"avg_line_length": 31.9375,
"blob_id": "b717e29e8df83935c3009dafeaf54f6123a31724",
"content_id": "8b2e07088c5cdd7af173a005ac56031144d65da1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1580,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 48,
"path": "/scripts/patch/patch-r2-tg-teachers.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Following variables are used to store the color codes for displaying the content on terminal\nblack=\"\\033[0;90m\" ;\nred=\"\\033[0;91m\" ;\ngreen=\"\\033[0;92m\" ;\nbrown=\"\\033[0;93m\" ;\nblue=\"\\033[0;94m\" ;\npurple=\"\\033[0;95m\" ;\ncyan=\"\\033[0;96m\" ;\ngrey=\"\\033[0;97m\" ;\nwhite=\"\\033[0;98m\" ;\nreset=\"\\033[0m\" ;\n\nfunction apply_patch() {\n\n\t# fetch the filename (patch name)\n\t#filename=$(basename $(ls /mnt/update_*.tar.gz | head -n 1));\n\t#update_patch=\"${filename%.*.*}\";\n\tupdate_patch=\"update_patch-1597e41-r2-20170929\"\n\n\techo -e \"\\n${cyan}patch directory name : ${update_patch} and this update shell file name is $(readlink -f $0) ${reset}\"\n\n\techo -e \"\\n${cyan}change directory /mnt/pd ${reset}\"\n\tcd /mnt/pd\n\n\techo -e \"\\n${cyan}copy from /mnt/pd/patch-r2* /mnt/pd/update_patch-1597e41-r2-20170929.tar.gz* /mnt/pd/update_patch-06a676e-r2.1-20171013.tar.gz /mnt/pd/README-update.md to /mnt/ ${reset}\"\n\trsync -avzPh /mnt/pd/patch-r2* /mnt/pd/update_patch-1597e41-r2-20170929.tar.gz* /mnt/pd/update_patch-06a676e-r2.1-20171013.tar.gz /mnt/pd/README-update.md /mnt/\n\n\techo -e \"\\n${cyan}change directory /mnt/ ${reset}\"\n\tcd /mnt/\n\n\techo -e \"\\n${cyan}Applying code updates - patch-r2 ${reset}\"\n\tsudo bash patch-r2.sh\n\n echo -e \"\\n${cyan}Patch 2 update finished ${reset}\"\n\n\techo -e \"\\n${cyan}Applying code updates ${reset}\"\n\tsudo bash ${update_patch}/code-updates/code-update.sh\n\n\techo -e \"\\n${cyan}Applying code updates - patch-r2.1 ${reset}\"\n\tsudo bash patch-r2.1.sh\n\n\techo -e \"\\n${cyan}Patch 2.1 update finished ${reset}\"\n\n} \n\napply_patch | tee /mnt/patch-r2-tg-teachers.log;"
},
{
"alpha_fraction": 0.6092867851257324,
"alphanum_fraction": 0.6350135803222656,
"avg_line_length": 47.543148040771484,
"blob_id": "ed0b17c6f8fd946c1854bb5bf216822c1cb91538",
"content_id": "510b5bcdfa31286f889b31c4c6aa9594d84708d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 9562,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 197,
"path": "/scripts/initialize.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#--------------------------------------------------------------------------------------------------------------#\n# File name : git-pull.sh\n# File creation : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n# Description :\n# get updated additional schema STs, ATs and RTs\n#\t\t\t\tstart mongod\n#\t\t\t\tsyncdb\n#\t\t\t\tcreate superuser\n#\t\t\t\tcreate or update gstudio schema in mongodb\n#\t\t\t\tCheck git diff\n#\n# Last Modification : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n#\t\t\t\tproperty_order_reset\n#\t\t\t\tSync_existing\n#\t\t\t\tSnapshot\n#\t\t\t\tgit pull\n#\t\t\t\tsmtpd - localhost\n# Last Modification : Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n#\t\t\t\tstarting most of the services - mongodb, cron, postgresql, postfix, ssh\n#\t\t\t\tcreate deafult database and user to access postgres\n#\t\t\t\tReplaced username='admin' with username='administrator' \n#\t\t\t\tgit pull\n#\t\t\t\tsmtpd - localhost\n# Last Modification : Mrunal M. Nachankar : Mon, 20-11-2017 12:15:PM \n#\t\t\t\tmove the places of logs\n#--------------------------------------------------------------------------------------------------------------#\n\n# Executing script as sourcing because we need to set GNUM_ARC variable from /home/docker/code/scripts/numa-arch-check.sh. Soc that we can use it here\n. /home/docker/code/scripts/numa-arch-check.sh ;\necho \"numa arch : \" $GNUM_ARC ;\n\nif [ $GNUM_ARC == \"NO\" ]; then\n echo \"[run] start mongod\";\n mongod --config /home/docker/code/confs/mongod.conf & \nelif [ $GNUM_ARC == \"YES\" ]; then\n echo \"[run] start mongod with numactcl\";\n numactl --interleave=all mongod --config /home/docker/code/confs/mongod.conf & \nelse\n echo \"No idea about arch hence starting normally.\";\n echo \"[run] start mongod\";\n mongod --config /home/docker/code/confs/mongod.conf & \nfi\n#sleep 60;\n\necho \"Starting cron service {Crontab}\" ;\n/usr/sbin/cron ;\n\necho \"[run] start postgresql\" ; # Used for postgres db\n/etc/init.d/postgresql start ; # Used for postgres db\n#/etc/init.d/postgresql status ; # Used for postgres db\n\necho \"[run] start postfix\" ;\n/etc/init.d/postfix start ;\n\necho \"[run] start ssh\" ;\n/etc/init.d/ssh start ;\n\necho \"[run] start memcache\" ;\n/etc/init.d/memcached start\n\necho \"[run] start rabbitmq-server\" ;\n/etc/init.d/rabbitmq-server start \n\necho \"[run] go to the code folder\" ;\ncd /home/docker/code/gstudio/gnowsys-ndf/ ;\n\necho \"[run] start celery\"\nexport C_FORCE_ROOT=\"true\"\npython manage.py celeryd -f /var/log/celeryd.log -l INFO &\n\n# echo \"[run] git-pull started\" ;\t\t\t\t\t \t\t\t# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n# #bash /home/docker/code/git-pull.sh ;\t\t\t\t\t\t# Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n# echo \"[run] git-pull completed\" ;\n\n# echo \"[run] create deafult database and user to access postgres\" ;\n# echo \"psql\n# CREATE DATABASE gstudio_psql;\n# CREATE USER glab WITH PASSWORD 'Gstudi02)1^';\n# ALTER ROLE glab SET client_encoding TO 'utf8';\n# ALTER ROLE glab SET default_transaction_isolation TO 'read committed';\n# ALTER ROLE glab SET timezone TO 'UTC';\n# GRANT ALL PRIVILEGES ON DATABASE gstudio_psql TO glab;\n# \" | sudo su - postgres ; \n\n# sleep 60;\n\n# echo \"[run] syncdb\" ;\n# python manage.py syncdb --noinput ;\n\n# echo \"[run] create superuser\" ;\n# echo \"from django.contrib.auth.models import User\n# if not User.objects.filter(username='administrator').count():\n# User.objects.create_superuser('administrator', '[email protected]', 'changeit')\n# \" | python manage.py shell ;\n\n# # the above script is suggested by\n# # https://github.com/novapost/docker-django-demo\n\n# echo \"[run] get updated additional schema STs, ATs and RTs\" ;\n# cp -v /home/docker/code/gstudio/doc/schema_directory/* /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/ndf/management/commands/schema_files/ ;\n\n# echo \"[run] create or update gstudio schema in mongodb\" ;\n# python manage.py filldb ;\n# python manage.py create_schema STs_run1.csv ; \n# python manage.py create_schema ATs.csv ;\n# python manage.py create_schema RTs.csv ; \n# python manage.py create_schema STs_run2.csv ;\n\n# echo \"[run] property_order_reset\" ;\t\t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n# echo \"execfile('property_order_reset.py')\" | python manage.py shell ;\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\n# echo \"[run] create_auth_objs.py\" ;\t\t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n# echo \"execfile('../doc/deployer/create_auth_objs.py')\" | python manage.py shell ;\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\n# echo \"[run] Sync_existing\" ;\t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n# python manage.py sync_existing_documents ;\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\n\necho \"[run] logout_all_users\" ;\t\t\t\t\t\t\t # Mrunal M. Nachankar : Mon Apr 9 16:32:09 IST 2018 \necho \"execfile('../doc/deployer/logout_all_users.py')\" | python manage.py shell ;\t # Mrunal M. Nachankar : Mon Apr 9 16:32:09 IST 2018 \n\necho \"[run] smtpd.sh\" ;\t\t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \nbash /home/docker/code/scripts/smtpd.sh ; \t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\necho \"[run] generate-self-certified-certificate-ssl..sh\" ;\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \nbash /home/docker/code/scripts/generate-self-certified-certificate-ssl.sh ; \t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\necho \"[run] school server gpg setup\" ;\nbash /home/docker/code/scripts/ss-gpg-setup.sh\n\necho \"[run] start qbank-lite\" ;\t\t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \nbash /home/docker/code/scripts/start-qbank.sh ; \t\t\t\t\t\t # Mrunal M. Nachankar : Mon, 07-09-2015 12:15:AM \n\n# applying host entry (clixserver)\n/usr/bin/rsync -avzPh /etc/hosts /tmp/\n/bin/sed -i ' 1 s/.*/& clixserver/' /tmp/hosts\n#/usr/bin/rsync -avzPh /tmp/hosts /etc/\n/bin/cp /tmp/hosts /etc/\n\n# nginx-app logs\nif [[ -f /var/log/nginx/school.server.org.error.log ]] && [[ -f /var/log/nginx/school.server.org.access.log ]] ; then\n # files present in new location (/data/nginx-logs/) copy files appending \".old\" to the filenames \n if [[ -f /data/nginx-logs/school.server.org.error.log ]] && [[ -f /data/nginx-logs/school.server.org.access.log ]] ; then\n echo -e \"\\nWarning: (nginx-app logs) - Files found on both the locations. Please check nginx-app.conf for the logs paths.\" ;\n mv -v /var/log/nginx/school.server.org.error.log* /data/nginx-logs/school.server.org.error.log.old\n mv -v /var/log/nginx/school.server.org.access.log* /data/nginx-logs/school.server.org.access.log.old\n # files absent in new location (/data/nginx-logs/) copy files with the exact filenames\n elif [[ ! -f /data/nginx-logs/school.server.org.error.log ]] && [[ ! -f /data/nginx-logs/school.server.org.access.log ]] ; then\n echo -e \"\\nInfo: (nginx-app logs) - File found in old location and hence moving it to new location.\" ;\n if [[ -d /data/nginx-logs ]]; then\n echo -e \"\\nInfo: (nginx-app logs) - Directory already exists\" ; \n elif [[ ! -d /data/nginx-logs ]]; then\n echo -e \"\\nInfo: (nginx-app logs) - Directory doesn't exists. Hence creating it.\" ;\n mkdir -p /data/nginx-logs ;\n fi\n mv -v /var/log/nginx/school.server.org.error.log* /data/nginx-logs/\n mv -v /var/log/nginx/school.server.org.access.log* /data/nginx-logs/\n else\n echo -e \"\\nError: (nginx-app logs) - Oops something went wrong (/data/nginx-logs/*.logs). Contact system administator or CLIx technical team - Mumbai.\" ;\n fi\nelif [[ ! -f /var/log/nginx/school.server.org.error.log ]] && [[ ! -f /var/log/nginx/school.server.org.access.log ]] ; then\n echo -e \"\\nInfo: (nginx-app logs) - Files doesn't exists. No action taken.\" ;\nelse\n echo -e \"\\nError: (nginx-app logs) - Oops something went wrong (/var/log/nginx/*.logs). Contact system administator or CLIx technical team - Mumbai.\" ;\nfi\n\n# nginx logs\nif [[ -f /var/log/nginx/error.log ]] && [[ -f /var/log/nginx/access.log ]] ; then\n # files present in new location (/data/nginx-logs/) copy files appending \".old\" to the filenames \n if [[ -f /data/nginx-logs/error.log ]] && [[ -f /data/nginx-logs/access.log ]] ; then\n echo -e \"\\nWarning: (nginx logs) - Files found on both the locations(error.log and access.log). Please check nginx-app.conf for the logs paths.\" ;\n mv -v /var/log/nginx/error.log* /data/nginx-logs/error.log.old\n mv -v /var/log/nginx/access.log* /data/nginx-logs/access.log.old\n # files absent in new location (/data/nginx-logs/) copy files with the exact filenames\n elif [[ ! -f /data/nginx-logs/error.log ]] && [[ ! -f /data/nginx-logs/access.log ]] ; then\n echo -e \"\\nInfo: (nginx logs) - File found in old location and hence moving it to new location.\" ;\n if [[ -d /data/nginx-logs ]]; then\n echo -e \"\\nInfo: (nginx logs) - Directory already exists\" ; \n elif [[ ! -d /data/nginx-logs ]]; then\n echo -e \"\\nInfo: (nginx logs) - Directory doesn't exists. Hence creating it.\" ;\n mkdir -p /data/nginx-logs ;\n fi\n mv -v /var/log/nginx/error.log* /data/nginx-logs/\n mv -v /var/log/nginx/access.log* /data/nginx-logs/\n else\n echo -e \"\\nError: (nginx logs) - Oops something went wrong (/data/nginx-logs/*.logs). Contact system administator or CLIx technical team - Mumbai.\" ;\n fi\nelif [[ ! -f /var/log/nginx/error.log ]] && [[ ! -f /var/log/nginx/access.log ]] ; then\n echo -e \"\\nInfo: (nginx logs) - Files doesn't exists. No action taken.\" ;\nelse\n echo -e \"\\nError: (nginx logs) - Oops something went wrong (/var/log/nginx/*.logs). Contact system administator or CLIx technical team - Mumbai.\" ;\nfi\n\necho \"[run] supervisord\" ;\nsupervisord -n ;"
},
{
"alpha_fraction": 0.5598907470703125,
"alphanum_fraction": 0.6166601777076721,
"avg_line_length": 37.83333206176758,
"blob_id": "716d2832f531c5bee42c05a138a11be34f6cf096",
"content_id": "99726a7473473fdabb19030716b40b793fc372ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 5126,
"license_type": "no_license",
"max_line_length": 255,
"num_lines": 132,
"path": "/scripts/execute-docker-ps-startup.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# ---------------------------------------------\n# Help text starts here\n\n#Ref : - http://www.cyberciti.biz/faq/how-do-i-add-jobs-to-cron-under-linux-or-unix-oses/\n\n# List all your cron jobs\n#crontab -l\n\n# List all your cron jobs of specific user\n#crontab -u username -l\n\n# Remove / delete all your cron jobs\n#crontab -d\n\n# Remove / delete all your cron jobs of specific user\n#crontab -u username -d\n\n# * * * * * command to be executed\n# - - - - -\n# | | | | |\n# | | | | ----- Day of week (0 - 7) (Sunday=0 or 7)\n# | | | ------- Month (1 - 12)\n# | | --------- Day of month (1 - 31)\n# | ----------- Hour (0 - 23)\n# ------------- Minute (0 - 59)\n\n# Your cron job looks as follows for system jobs:\n# 1 2 3 4 5 USERNAME /path/to/command arg1 arg2\n# OR\n# 1 2 3 4 5 USERNAME /path/to/script.sh\n\n\n# Few examples\n\n# To run /path/to/command five minutes after midnight, every day, enter:\n# 5 0 * * * /path/to/command\n\n# Run /path/to/script.sh at 2:15pm on the first of every month, enter:\n# 15 14 1 * * /path/to/script.sh\n\n# Run /scripts/phpscript.php at 10 pm on weekdays, enter:\n# 0 22 * * 1-5 /scripts/phpscript.php\n\n# Run /root/scripts/perl/perlscript.pl at 23 minutes after midnight, 2am, 4am ..., everyday, enter:\n# 23 0-23/2 * * * /root/scripts/perl/perlscript.pl\n\n# Run /path/to/unixcommand at 5 after 4 every Sunday, enter:\n# 5 4 * * sun /path/to/unixcommand\n# How do I use operators?\n\n# An operator allows you to specifying multiple values in a field. There are three operators:\n\n# The asterisk (*) : This operator specifies all possible values for a field. For example, an asterisk in the hour time field would be equivalent to every hour or an asterisk in the month field would be equivalent to every month.\n# The comma (,) : This operator specifies a list of values, for example: \"1,5,10,15,20, 25\".\n# The dash (-) : This operator specifies a range of values, for example: \"5-15 days\" , which is equivalent to typing \"5,6,7,8,9,....,13,14,15\" using the comma operator.\n# The separator (/) : This operator specifies a step value, for example: \"0-23/\" can be used in the hours field to specify command execution every other hour. Steps are also permitted after an asterisk, so if you want to say every two hours, just use */2.\n\n\n\n\n# Special stringMeaning\n# @reboot Run once, at startup.\n# @yearly Run once a year, \"0 0 1 1 *\".\n# @annually (same as @yearly)\n# @monthly Run once a month, \"0 0 1 * *\".\n# @weekly Run once a week, \"0 0 * * 0\".\n# @daily Run once a day, \"0 0 * * *\".\n# @midnight (same as @daily)\n# @hourly Run once an hour, \"0 * * * *\".\n\n# Help text completes here\n# ---------------------------------------------\n\n\n#write out current crontab\ncrontab -l > mycron\n\n\n# print the existing cron\necho -e \"\\n-------------------existing cron is starting here---------------------\\n\"\nmore mycron\necho -e \"\\n-------------------existing cron is ending here---------------------\\n\"\n\n\nCommand='docker ps';\nDate_time='@reboot'; # * * * * * or @daily @weekly @monthly \nOutput_redirections=' >> /tmp/cron-job-custom.log'\nFile=`readlink -e -f mycron`;\necho \"File name : $File \"\n\n# check for no of matched line with our commands\ngot_match=$(sed -n \"/$Command/p\" $File | wc -l)\n\nif [[ \"$got_match\" != \"0\" ]] && [ -f $File ] ; then \n sed -e \"/$Command/ s/^\\s*#*/#/\" -i $File # Example for path (log file path) search and uncomment line\n #sed -e '/\\/tmp\\/t.log/ s/^\\s*#*/#/' -i mycron # Example for path (log file path) search and comment line\n #sed -e '/hello/ s/^\\s*#*//' -i mycron # Example for word search and uncomment line\n #echo \"Mrunal-$got_match---00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"\n\n #remove matched line\n # got=$(sed -i.bak \"/$Command/d\" $File ) # Delete lines in a text file that containing a specific string - with backup file\n #echo \"Mrunal-$got---00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"\n #sed '/pattern to match/d' mycron # Delete lines in a text file that containing a specific string - without backup file\n #sed -i.bak '/pattern to match/d' mycron # Delete lines in a text file that containing a specific string - with backup file\nelif [[ -f $File ]] ; then\n #echo new cron into cron file\n echo \"$Date_time $Command $Output_redirections\" >> $File # Delete lines in a text file that containing a specific string - without backup file\nelif [[ ! -f $File ]] ; then\n #echo file does not exist\n echo -e \"File does not exist - $File\" \nfi\n\n\n# print the new cron changes\necho -e \"\\n-------------------new cron changes is starting here---------------------\\n\"\nmore $File\necho -e \"\\n-------------------new cron changes is ending here---------------------\\n\"\n\n\n# install / apply new cron file\ncrontab mycron\n\n\n# print the new cron jobs\necho -e \"\\n-------------------new cron jobs is starting here---------------------\\n\"\ncrontab -l\necho -e \"\\n-------------------new cron jobs is ending here---------------------\\n\"\n\n# remove the mycron file\n#rm $File\n"
},
{
"alpha_fraction": 0.7109375,
"alphanum_fraction": 0.7135416865348816,
"avg_line_length": 30.83333396911621,
"blob_id": "5d2e7be54cc5f442414d92da8d8e9c186f3d3ce7",
"content_id": "ca2a3c558f0ec31de90d9aca3f278a39f1280be1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 12,
"path": "/scripts/rsync-update.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/bash\n\n#Mrunal : Take git pull\necho -e \"Process started\"\n\n# Git pull docker code\ndocker exec -it gstudio /bin/sh -c \"cd /home/docker/code/ && git pull https://github.com/mrunal4/gstudio-docker.git\"\n\n# Git pull gstudio app code\ndocker exec -it gstudio /bin/sh -c \"cd /home/docker/code/gstudio/ && git pull https://github.com/gnowledge/gstudio.git\"\n\necho -e \"Process ended\"\n\n\n"
},
{
"alpha_fraction": 0.6412344574928284,
"alphanum_fraction": 0.6543077826499939,
"avg_line_length": 47.432525634765625,
"blob_id": "bdc1ff7155e76a032cfbfbeeca6ac0404c677bbc",
"content_id": "f61c5b0917dbb704168f5af8b9828fdcf644b9ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 13998,
"license_type": "no_license",
"max_line_length": 356,
"num_lines": 289,
"path": "/scripts/backup.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\n#--------------------------------------------------------------------#\n# Backup of gstudio \n# File name : Backup-script-mrunal.sh\n# File version : 2.0\n# Created by : Mr. Mrunal M. Nachankar\n# Created on : 26-06-2014 12:04:AM\n# Modified by : Mr. Mrunal M. Nachankar\n# Modified on : Sun Jun 3 20:00:46 IST 2018\n# Description : This file is used for taking backup of gstudio\n# 1. Check for backup directory - If don't exist please create the same.\n#\t\t\t\t\t1.1\tBackup directory : /home/glab/rcs-db-backup/<yyyy-mm-dd> i.e for 26th June 2015 it will be \"/home/glab/rcs-db-backup/2015-06-26\"\n#\t\t\t\t\t1.2 In backup directory we will have 2 sub directories \"rcs\" for rcs repo backup and \"mongodb\" for mongodb database backup (mongodb dump)\n#\t\t\t\t 2. Take backup of rcs via cp (copy -rv) command\n#\t\t\t\t 3. Take backup of mongodb via mongodbdump command\n#\t\t\t\t 4. Create a compressed file (TAR File - tar.bz2)\n#\t\t\t\t 5. Optional - Move the backup directory to /tmp/ after successful creation of tar.bz2 file\n#--------------------------------------------------------------------#\n\nsleep 60; # To start mongo\n\n# get current year\ncur_year=`date +\"%Y\"`\n\n# platform name\nplatform=\"gstudio\"\n\n# get server id (Remove single quote {'} and Remove double quote {\"})\nss_id=`echo $(echo $(more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID | sed 's/.*=//g')) | sed \"s/'//g\" | sed 's/\"//g'`\n\n# get state code\nstate_code=${ss_id:0:2};\n\n# get server code (Remove single quote {'} and Remove double quote {\"})\nss_code=`echo $(echo $(more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_ID_SECONDARY | sed 's/.*=//g')) | sed \"s/'//g\" | sed 's/\"//g'`\n\n# get server name (Remove single quote {'} and Remove double quote {\"})\nss_name=`echo $(echo $(more /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py | grep -w GSTUDIO_INSTITUTE_NAME | sed 's/.*=//g')) | sed \"s/'//g\" | sed 's/\"//g'`\n\nsyncthing_base_directory=\"/backups/syncthing\";\nsyncthing_year_directory=\"${syncthing_base_directory}/${cur_year}\";\nsyncthing_variable_directory=\"${cur_year}/${state_code}/${ss_code}-${ss_id}/${platform}\";\nsyncthing_sync_content_source=\"/data/gstudio-exported-users-analytics-csvs /data/gstudio_tools_logs /data/activity-timestamp-csvs /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/local_settings.py /data/git-commit-details.log /data/system-heartbeat /data/qbank/qbank_data.tar.gz\"\nsyncthing_sync_content_destination=\"${syncthing_base_directory}/${syncthing_variable_directory}\";\n\n# ---------------------------------- x ---------------------------------- \n\necho -e \"\\nPostgres backup file exist. So performing incremental backup \\n\".\nif [ ! -d /data/postgres-dump ]; then\n mkdir /data/postgres-dump\nfi\necho \"pg_dumpall > pg_dump_all.sql;\" | sudo su - postgres ; \n\nmv /var/lib/postgresql/pg_dump_all.sql /data/postgres-dump/\n\n# ---------------------------------- x ---------------------------------- \n\necho -e \"\\nBackup /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/webapps/CLIx/datastore/studentResponseFiles in /data/assessment-media/ \\n\" \nrsync -avzPh /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/webapps/CLIx/datastore/studentResponseFiles /data/assessment-media/\n\necho -e \"\\nBackup local_settings.py(/home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/local_settings.py) and server_settings.py(/home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py) in /data/ \\n\" \nrsync -avzPh /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/local_settings.py /home/docker/code/gstudio/gnowsys-ndf/gnowsys_ndf/server_settings.py /data/\n\n# ---------------------------------- x ---------------------------------- \n\n# Add soft link for analytics progressCSV file\nif [[ ! -L /softwares/gstudio-exported-users-analytics-csvs ]]; then\n ln -s /data/gstudio-exported-users-analytics-csvs /softwares/gstudio-exported-users-analytics-csvs\nfi\n\n# Add soft link for gstudio_tools_logs file\nif [[ ! -L /softwares/gstudio_tools_logs ]]; then\n ln -s /data/gstudio_tools_logs /softwares/gstudio_tools_logs\nfi\n\n# Add soft link for activity-timestamp-csvs file\nif [[ ! -L /softwares/activity-timestamp-csvs ]]; then\n ln -s /data/activity-timestamp-csvs /softwares/activity-timestamp-csvs\nfi\n\n# Add soft link for assessment-media file\nif [[ ! -L /softwares/assessment-media/studentResponseFiles ]]; then\n if [[ ! -d /softwares/assessment-media ]]; then\n mkdir -p /softwares/assessment-media\n fi\n ln -s /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/webapps/CLIx/datastore/studentResponseFiles /softwares/assessment-media/studentResponseFiles\nfi\n\n# Add soft link for qbank_data.tar.gz file\nif [[ ! -L /softwares/qbank_data.tar.gz ]]; then\n if [[ -f /data/qbank/qbank_data.tar.gz ]]; then\n ln -s /data/qbank/qbank_data.tar.gz /softwares/qbank_data.tar.gz\n else\n echo -e \"Source file not found. {Source filename: /data/qbank/qbank_data.tar.gz}\"\n fi\nfi\n\n# ---------------------------------- x ---------------------------------- \n\n# log commit details - in /data/git-commit-details.log - started\necho -e \"\\nDate : $(date) \\n\" > /data/git-commit-details.log\n\necho -e \"\\n\\nDetails of gstudio-docker \\n\" 2>&1 | tee -a /data/git-commit-details.log\ncd /home/docker/code/\n\necho -e \"\\ngstudio-docker : $(pwd) \\n\" 2>&1 | tee -a /data/git-commit-details.log\n$(pwd)\n\necho -e \"\\ngstudio-docker : git branch \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit branch 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\ngstudio-docker : git log - latest 5 commits \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit log -n 5 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\ngstudio-docker : git status \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit status 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\ngstudio-docker : git diff \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit diff 2>&1 | tee -a /data/git-commit-details.log\n\n\necho -e \"\\n\\nDetails of gstudio \\n\" 2>&1 | tee -a /data/git-commit-details.log\ncd /home/docker/code/gstudio/\n\necho -e \"\\ngstudio : $(pwd) \\n\" 2>&1 | tee -a /data/git-commit-details.log\n$(pwd)\n\necho -e \"\\ngstudio : git branch \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit branch 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\ngstudio : git log - latest 5 commits \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit log -n 5 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\ngstudio : git status \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit status 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\ngstudio : git diff \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit diff 2>&1 | tee -a /data/git-commit-details.log\n\n\necho -e \"\\n\\nDetails of OpenAssessmentsClient \\n\" 2>&1 | tee -a /data/git-commit-details.log\ncd /home/docker/code/OpenAssessmentsClient/\n\necho -e \"\\n'OpenAssessmentsClient' - strategy adopted for updating oac and oat is as follows: \\n\n- Building 'oac' and 'oat' locally from 'gnowledge/OpenAssessmentsClient' with 'clixserver' branch. \\n\n- Testing it locally and packaging oac, oat as a replacement. \\n\n- This decision is taken because building oac and oat is network dependent operation and sometimes build doesn't happen smoothly. \\n\\n\" 2>&1 | tee -a /data/git-commit-details.log\n\n# echo -e \"\\nOpenAssessmentsClient : $(pwd) \\n\" 2>&1 | tee -a /data/git-commit-details.log\n# $(pwd)\n\n# echo -e \"\\nOpenAssessmentsClient : git branch \\n\" 2>&1 | tee -a /data/git-commit-details.log\n# git branch 2>&1 | tee -a /data/git-commit-details.log\n\n# echo -e \"\\nOpenAssessmentsClient : git log - latest 5 commits \\n\" 2>&1 | tee -a /data/git-commit-details.log\n# git log -n 5 2>&1 | tee -a /data/git-commit-details.log\n\n# echo -e \"\\nOpenAssessmentsClient : git status \\n\" 2>&1 | tee -a /data/git-commit-details.log\n# git status 2>&1 | tee -a /data/git-commit-details.log\n\n# echo -e \"\\nOpenAssessmentsClient : git diff \\n\" 2>&1 | tee -a /data/git-commit-details.log\n# git diff 2>&1 | tee -a /data/git-commit-details.log\n\n\necho -e \"\\n\\nDetails of qbank-lite \\n\" 2>&1 | tee -a /data/git-commit-details.log\ncd /home/docker/code/gstudio/gnowsys-ndf/qbank-lite/\n\necho -e \"\\nqbank-lite : $(pwd) \\n\" 2>&1 | tee -a /data/git-commit-details.log\n$(pwd)\n\necho -e \"\\nqbank-lite : git branch \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit branch 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\nqbank-lite : git log - latest 5 commits \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit log -n 5 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\nqbank-lite : git status \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit status 2>&1 | tee -a /data/git-commit-details.log\n\necho -e \"\\nqbank-lite : git diff \\n\" 2>&1 | tee -a /data/git-commit-details.log\ngit diff 2>&1 | tee -a /data/git-commit-details.log\n\n\necho -e \"\\nDate : $(date) \\n\" >> /data/git-commit-details.log\n# log commit details - in /data/git-commit-details.log - ended\n\nif [[ ! -d ${syncthing_sync_content_destination} ]]; then\n mkdir -p ${syncthing_sync_content_destination};\nfi\n\n\nif [[ ! -f /${syncthing_base_directory}/.stfolder ]]; then\n touch /${syncthing_base_directory}/.stfolder;\nfi\n\nif [[ ! -f /${syncthing_base_directory}/.stignore ]]; then\n touch /${syncthing_base_directory}/.stignore;\nfi\n\n\nif [[ ! -f /${syncthing_year_directory}/.stfolder ]]; then\n touch /${syncthing_year_directory}/.stfolder;\nfi\n\nif [[ ! -f /${syncthing_year_directory}/.stignore ]]; then\n touch /${syncthing_year_directory}/.stignore;\nfi\n\nif [[ ! -f /${syncthing_year_directory}/.gnupg ]]; then\n rsync -avPh /root/.gnupg /${syncthing_year_directory}/.gnupg;\nfi\n\n# ---------------------------------- x ---------------------------------- \n\necho -e \"\\nCopy content for syncing via syncthing\"\nrsync -avzPh ${syncthing_sync_content_source} ${syncthing_sync_content_destination}/;\n\necho -e \"\\nCopy content for syncing via syncthing also in /softwares\"\nrsync -avzPh ${syncthing_base_directory} /softwares/;\n\n# ---------------------------------- x ---------------------------------- \n\necho -e \"\\nCreate tar file of the syncthing content\"\ncd /backups/\ntar -cvzf ${ss_code}-${ss_id}-syncthing.tar.gz syncthing \n\n# Add soft link for analytics tar.gz file\nif [[ ! -L /softwares/${ss_code}-${ss_id}-syncthing.tar.gz ]]; then\n ln -s /backups/${ss_code}-${ss_id}-syncthing.tar.gz /softwares/${ss_code}-${ss_id}-syncthing.tar.gz\nfi\n\n# Ref: https://stackoverflow.com/questions/9981570/copying-tarring-files-that-have-been-modified-in-the-last-14-days\necho -e \"\\nCreate tar file of the analytics csvs for last 24hrs, last 7days and last 30days\"\ncd /softwares/syncthing/${syncthing_variable_directory}/\necho \"tar cvzf ${ss_code}-${ss_id}-progressCSV.tar gstudio-exported-users-analytics-csvs ; /softwares/syncthing/${syncthing_variable_directory} ; $(pwd)\"\ntar cvzf ${ss_code}-${ss_id}-progressCSV.tar.gz gstudio-exported-users-analytics-csvs\nif [[ ! -L /softwares/${ss_code}-${ss_id}-progressCSV.tar.gz ]]; then\n ln -s /softwares/syncthing/${syncthing_variable_directory}/${ss_code}-${ss_id}-progressCSV.tar.gz /softwares/${ss_code}-${ss_id}-progressCSV.tar.gz\nfi\ncd /softwares/syncthing/${syncthing_variable_directory}/\nfind gstudio-exported-users-analytics-csvs -name \"*.csv\" -mtime 0 -print | xargs tar cvzf ${ss_code}-${ss_id}-progressCSV-last-24hrs.tar.gz\nif [[ ! -L /softwares/${ss_code}-${ss_id}-progressCSV-last-24hrs.tar.gz ]]; then\n ln -s /softwares/syncthing/${syncthing_variable_directory}/${ss_code}-${ss_id}-progressCSV-last-24hrs.tar.gz /softwares/${ss_code}-${ss_id}-progressCSV-last-24hrs.tar.gz\nfi\nfind gstudio-exported-users-analytics-csvs -name \"*.csv\" -mtime -7 -print | xargs tar cvzf ${ss_code}-${ss_id}-progressCSV-last-7days.tar.gz\nif [[ ! -L /softwares/${ss_code}-${ss_id}-progressCSV-last-7days.tar.gz ]]; then\n ln -s /softwares/syncthing/${syncthing_variable_directory}/${ss_code}-${ss_id}-progressCSV-last-7days.tar.gz /softwares/${ss_code}-${ss_id}-progressCSV-last-7days.tar.gz\nfi\nfind gstudio-exported-users-analytics-csvs -name \"*.csv\" -mtime -30 -print | xargs tar cvzf ${ss_code}-${ss_id}-progressCSV-last-30days.tar.gz\nif [[ ! -L /softwares/${ss_code}-${ss_id}-progressCSV-last-30days.tar.gz ]]; then\n ln -s /softwares/syncthing/${syncthing_variable_directory}/${ss_code}-${ss_id}-progressCSV-last-30days.tar.gz /softwares/${ss_code}-${ss_id}-progressCSV-last-30days.tar.gz\nfi\n\n# ---------------------------------- x ---------------------------------- \n\necho -e \"\\nCreate tar file of the gstudio_tools_logs content\"\ncd /data/\ntar -cvzf ${ss_code}-${ss_id}-gstudio_tools_logs.tar.gz gstudio_tools_logs \n\n# Add soft link for analytics tar.gz file\nif [[ ! -L /softwares/${ss_code}-${ss_id}-gstudio_tools_logs.tar.gz ]]; then\n ln -s /data/${ss_code}-${ss_id}-gstudio_tools_logs.tar.gz /softwares/${ss_code}-${ss_id}-gstudio_tools_logs.tar.gz\nfi\n\n# ---------------------------------- x ---------------------------------- \n\necho -e \"\\nCreate tar file of the activity-timestamp-csvs content\"\ncd /data/\ntar -cvzf ${ss_code}-${ss_id}-activity-timestamp-csvs.tar.gz activity-timestamp-csvs \n\n# Add soft link for activity-timestamp tar.gz file\nif [[ ! -L /softwares/${ss_code}-${ss_id}-activity-timestamp-csvs.tar.gz ]]; then\n ln -s /data/${ss_code}-${ss_id}-activity-timestamp-csvs.tar.gz /softwares/${ss_code}-${ss_id}-activity-timestamp-csvs.tar.gz\nfi\n\n# ---------------------------------- x ---------------------------------- \n\necho -e \"\\nCreate tar file of the assessment-media content\"\ncd /data/\ntar -cvzf ${ss_code}-${ss_id}-assessment-media.tar.gz assessment-media \n\n# Add soft link for assessment-media tar.gz file\nif [[ ! -L /softwares/${ss_code}-${ss_id}-assessment-media.tar.gz ]]; then\n ln -s /data/${ss_code}-${ss_id}-assessment-media.tar.gz /softwares/${ss_code}-${ss_id}-assessment-media.tar.gz\nfi\n\n# ---------------------------------- x ---------------------------------- \n"
},
{
"alpha_fraction": 0.6822429895401001,
"alphanum_fraction": 0.7289719581604004,
"avg_line_length": 25.75,
"blob_id": "dae800144a0cdfae8daabfa1a0e982dd025ad7a5",
"content_id": "c1ac95c7527cd3887ed43628abb7b568d1f53040",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 8,
"path": "/scripts/start-ka-lite-server.sh",
"repo_name": "gnowledge/gstudio-docker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\necho \"Starting ka-lite (kal3) server\";\ndocker run -itd -h ka-lite.net 8180:80\ndocker start kal3\ndocker exec -it kal3 bash /etc/init.d/ka-lite start\necho \"Started successfully ka-lite (kal3) server\";\n"
}
] | 67 |
Jyzaark/3D-plot-Python
|
https://github.com/Jyzaark/3D-plot-Python
|
27697ef89acdc7061cc4102acd9cb53e504fbb05
|
7514bc38cdd8b95305e8d563c55a2a36097f8531
|
1f1873987e280d3949337bd866d7478e320d50d4
|
refs/heads/master
| 2021-05-25T21:16:49.843624 | 2020-04-06T13:56:16 | 2020-04-06T13:56:16 | 253,922,833 | 1 | 0 | null | 2020-04-07T22:07:09 | 2020-04-07T22:03:00 | 2020-04-06T13:56:17 | null |
[
{
"alpha_fraction": 0.39254170656204224,
"alphanum_fraction": 0.48315343260765076,
"avg_line_length": 26.47747802734375,
"blob_id": "2efbc4077ee21eca3a057c204d552b84fbe731b3",
"content_id": "271de91f2a7eba3a08f085fcaef70b3703f418b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3057,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 111,
"path": "/code_3D_plot.py",
"repo_name": "Jyzaark/3D-plot-Python",
"src_encoding": "UTF-8",
"text": "\"\"\" 3D plot with matplotlib \"\"\"\n\n#import\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\nimport matplotlib.pyplot as plt\naxes = plt.gca()\n\n#size of your matrix data Z : n*n with the heights\n#you have to extract the x,y,z data in 3 lists\n\n#for z\n\ndef list_z_n(n):\n c=[]\n for i in range (n):\n for j in range (n):\n c.append(Z[i][j])\n M = np.asarray(c)\n return M.reshape((n,n))\n \ndef zprime_n(n):\n l=[]\n for j in reversed(range (n)):\n for i in reversed(range (n)):\n l.append((list_z_n(n)[i])[j])\n return l\n\n#for x\n\ndef list_x_n(n):\n c = [i for i in reversed(range (0,n))]\n return c\n\n#for y\n\ndef list_y_n(n):\n c = [i for i in reversed(range (0,n))]\n return c\n\n#for the three\n\ndef data_array_n(n): \n c = []\n for i in range (n):\n for j in range (n):\n c.append(i)\n c.append(j)\n c.append(list_z_n(n)[i][j])\n M = np.asarray(c)\n M = M.reshape((n*n,3))\n return(M)\n\n#by linear interpolation\n\ndef graph_3D_n(n):\n #create the 3D plot\n plt.rcParams['legend.fontsize'] = 10\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n #data\n xp = list_x_n(n)\n yl = list_y_n(n)\n #make the plot\n for i in range (0,n): \n yp = [i for k in (range (0,n))]\n zi = list_z_n(n)[i] \n for i in reversed(range(0,n)): \n xl = [i for k in (range (0,n))]\n ziprime = zprime_n(n)[(i*n):(i*n+n)]\n plt.plot(xp, yp, zi,'blue')\n plt.plot(xl,yl, ziprime,'red')\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.xlim(0,n)\n plt.ylim(0,n)\n plt.title(\"linear interpolation\")\n plt.show()\n \n \n#with colours\n\ndef gr_3D_color_n(n):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n Xs = data_array_n(n)[:,0]\n Ys = data_array_n(n)[:,1]\n Zs = data_array_n(n)[:,2]\n surf = ax.plot_trisurf(Xs, Ys, Zs, cmap=cm.jet, linewidths=1)\n #cmap='viridis', edgecolor='none')\n fig.colorbar(surf)\n ax.xaxis.set_major_locator(MaxNLocator(5)) \n ax.yaxis.set_major_locator(MaxNLocator(6)) \n ax.zaxis.set_major_locator(MaxNLocator(5))\n fig.tight_layout() \n ax.set_title('linear interpolation') \n plt.show()\n \n \n#Z example:\n\nZ=[[0, 0, 0, 1, 2, 2, 2, 3, 4, 5, 5, 5, 5, 5, 4, 4, 5, 4, 4, 4, 4, 3, 3, 2, 2],\n [0, 1, 1, 2, 2, 3, 3, 3, 4, 5, 6, 6, 6, 5, 5, 5, 6, 5, 5, 5, 4, 4, 3, 3, 3],\n [1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 4, 4, 3, 3],\n [1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 6, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 3],\n [2, 2, 3, 3, 4, 4, 4, 4, 5, 6, 6, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4],\n [2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4],\n [2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 5, 5, 4],\n [3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5],\n [3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5],\n [3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5]] \n \n \n"
},
{
"alpha_fraction": 0.7599999904632568,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 36.5,
"blob_id": "fc2386e519590c7fbedfb7d568e253914abaff4f",
"content_id": "25793337797a955ac4251d1f7f103b8b8e2c12a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 4,
"path": "/README.md",
"repo_name": "Jyzaark/3D-plot-Python",
"src_encoding": "UTF-8",
"text": "# 3D-surface-plot-Python\n\nThe n*n Z matrix can contain heights of water for example, to plot the submarine surface.\nIf you have any question, tel me!\n"
}
] | 2 |
jensen-lawrence/CMASS-WISE-HOD
|
https://github.com/jensen-lawrence/CMASS-WISE-HOD
|
b36edc8f74643120e47b014123b3c09991c3b6ff
|
c3014610a857880793dc0661e32408acc13e0a97
|
969626a9a12eabe505ca9bde3a0a04b793beb6dc
|
refs/heads/main
| 2023-06-28T19:25:09.312216 | 2021-08-04T00:37:36 | 2021-08-04T00:37:36 | 364,038,384 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5063763856887817,
"alphanum_fraction": 0.5107309222221375,
"avg_line_length": 38.219512939453125,
"blob_id": "77f91b7af37bddc179f3d61c11dcd060cd304e49",
"content_id": "e785abd74b2ad589741ddd41fac74d4ab8ee1ff6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3215,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 82,
"path": "/src/get_model_info.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "# ----------------------------------------------------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------------------------------------------------\n\nimport json\nfrom astropy.cosmology import Planck15\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Model Parameter Extraction and Dictionary Creation\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Getting model parameters\ndef get_model_params(params_file):\n \"\"\"\n Using the parameters provided in params_file, creates a combined dictionary of the BOSS-CMASS and WISE HOD model\n parameters.\n\n Parameters\n ----------\n params_file : str\n String representation of the path to the .json file containing the BOSS-CMASS and WISE HOD model parameters.\n\n Returns\n -------\n model_params : dict\n Dictionary containing the BOSS-CMASS and WISE HOD parameters.\n \"\"\"\n with open(params_file) as f:\n model_params = json.load(f)\n f.close()\n\n if model_params['halo_params']['cosmo_model'] == 'Planck15':\n model_params['halo_params']['cosmo_model'] = Planck15\n\n return model_params\n\n# Generating model dictionaries\ndef get_model_dicts(params_file):\n \"\"\"\n Using the parameters provided in params_file, creates a dictionary of the BOSS-CMASS HOD model parameters and a\n dictionary of the WISE HOD model parameters that can be read by halomod.\n\n Parameters\n ----------\n params_file : str\n String representation of the path to the .json file containing the BOSS-CMASS and WISE HOD model parameters.\n\n Returns\n -------\n cmass_model : dict\n Dictionary containing the BOSS-CMASS HOD parameters in a format that halomod can read.\n wise_model : dict\n Dictionary containing the WISE HOD parameters in a format that halomod can read.\n \"\"\"\n model_params = get_model_params(params_file)\n halo_params = model_params['halo_params']\n del halo_params['zmin']\n del halo_params['zmax']\n\n cmass_model = halo_params.copy()\n cmass_model['hod_params'] = {\n 'M_min': model_params['CMASS HOD']['M_min']['val'],\n 'M_1': model_params['CMASS HOD']['M_1']['val'],\n 'alpha': model_params['CMASS HOD']['alpha']['val'],\n 'M_0': model_params['CMASS HOD']['M_0']['val'],\n 'sig_logm': model_params['CMASS HOD']['sig_logm']['val'],\n 'central': model_params['CMASS HOD']['central']['val']\n }\n\n wise_model = halo_params.copy()\n wise_model['hod_params'] = {\n 'M_min': model_params['WISE HOD']['M_min']['val'],\n 'M_1': model_params['WISE HOD']['M_1']['val'],\n 'alpha': model_params['WISE HOD']['alpha']['val'],\n 'M_0': model_params['WISE HOD']['M_0']['val'],\n 'sig_logm': model_params['WISE HOD']['sig_logm']['val'],\n 'central': model_params['WISE HOD']['central']['val']\n }\n\n return cmass_model, wise_model\n\n# ----------------------------------------------------------------------------------------------------------------------"
},
{
"alpha_fraction": 0.5582982301712036,
"alphanum_fraction": 0.5682392716407776,
"avg_line_length": 39.24067687988281,
"blob_id": "9529897f7f3fa2c32abf192199a96fc2fbff96a5",
"content_id": "dc4ef00106bcd87162235ab264bdcf48a6537532",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11870,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 295,
"path": "/src/plot_results.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "# ----------------------------------------------------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------------------------------------------------\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom colour import Color\nfrom getdist.mcsamples import MCSamples\nfrom getdist import plots\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Angular Cross-Correlation Plots\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Plot CMASS autocorrelation function w(theta)\ndef cmass_autocorr_plot(cmass_wise_hod, sampled=[], plot_title='', output='', dpi=200):\n \"\"\"\n Generates a plot of the observed and calculated CMASS angular autocorrelation functions.\n\n Parameters\n ----------\n cmass_wise_hod : CMASS_WISE_HOD\n The instance of the CMASS_WISE_HOD class whose observed and calculated CMASS autocorrelations will\n be plotted.\n sampled : array-like, optional\n Array-like object of the form [sampled_param, sampled_range].\n sampled_param : str\n String representation of one of the model parameters to be sampled over during graphing.\n sampled_range : array-like\n Array containing the range of values over which sampled_param will be sampled.\n plot_title : str, optional\n String representation of the plot title.\n output : str, optional\n String representation of the path and file name the plot will be saved as.\n dpi : int, optional\n The dots-per-inch (resolution) of the graph.\n\n Returns\n -------\n None\n \"\"\"\n # Plot while varying one parameter\n if bool(sampled): \n sampled_param = sampled[0]\n sampled_range = sampled[1]\n\n min_colour = Color('#00FFFA')\n max_colour = Color('#4B0083')\n colour_range = list(min_colour.range_to(max_colour, len(sampled_range)))\n\n counter = 0\n for sampled_value in sampled_range:\n cmass_auto = cmass_wise_hod.cmass_auto\n cmass_auto.update(hod_params={sampled_param: sampled_value})\n plt.plot(cmass_wise_hod.thetas[3:], cmass_wise_hod.corr_cmass_auto(), str(colour_range[counter]),\n label=f'{sampled_param} = {sampled_value}')\n counter += 1\n\n # Plot holding all parameters fixed\n else:\n plt.plot(cmass_wise_hod.thetas[3:], cmass_wise_hod.corr_cmass_auto(), color='dodgerblue', label='Model')\n \n plt.errorbar(cmass_wise_hod.thetas[3:], cmass_wise_hod.data[:7,1], np.sqrt(np.diag(cmass_wise_hod.covariance[:7,:7])),\n color='springgreen', label='CMASS Autocorrelation Data')\n\n # Plot formatting\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(r'$\\theta$', fontsize=12)\n plt.xticks(fontsize=10)\n plt.ylabel(r'$w(\\theta)$', fontsize=12)\n plt.yticks(fontsize=10)\n plt.legend(loc='best', fontsize=10)\n\n if bool(plot_title):\n plt.title(plot_title, fontsize=9)\n\n if bool(output):\n plt.savefig(output, dpi=dpi)\n\n else:\n plt.savefig('cmass_autocorr_plot.png', dpi=dpi)\n\n plt.close()\n\n# Plot CMASS-WISE cross-correlation function w(theta)\ndef crosscorr_plot(cmass_wise_hod, sampled=[], plot_title='', output='', dpi=200):\n \"\"\"\n Generates a plot of the observed and calculated CMASS-WISE angular cross-correlation functions.\n\n Parameters\n ----------\n cmass_wise_hod : CMASS_WISE_HOD\n The instance of the CMASS_WISE_HOD class whose observed and calculated CMASS-WISE cross-correlations will\n be plotted.\n sampled : array-like, optional\n Array-like object of the form [sampled_param, sampled_range].\n sampled_param : str\n String representation of one of the model parameters to be sampled over during graphing.\n sampled_range : array-like\n Array containing the range of values over which sampled_param will be sampled.\n plot_title : str, optional\n String representation of the plot title.\n output : str, optional\n String representation of the path and file name the plot will be saved as.\n dpi : int, optional\n The dots-per-inch (resolution) of the graph.\n\n Returns\n -------\n None\n \"\"\"\n # Plot while varying one parameter\n if bool(sampled): \n sampled_param = sampled[0]\n sampled_range = sampled[1]\n\n min_colour = Color('#00FFFA')\n max_colour = Color('#4B0083')\n colour_range = list(min_colour.range_to(max_colour, len(sampled_range)))\n\n counter = 0\n for sampled_value in sampled_range:\n cross = cmass_wise_hod.cross\n cross.halo_model_2.update(hod_params={sampled_param: sampled_value})\n plt.plot(cmass_wise_hod.thetas, cmass_wise_hod.corr_cross(), str(colour_range[counter]),\n label=f'{sampled_param} = {sampled_value}')\n counter += 1\n\n # Plot holding all parameters fixed\n else:\n plt.plot(cmass_wise_hod.thetas, cmass_wise_hod.corr_cross(), color='dodgerblue', label='Model')\n \n plt.errorbar(cmass_wise_hod.thetas, cmass_wise_hod.data[7:,1], np.sqrt(np.diag(cmass_wise_hod.covariance[7:,7:])),\n color='springgreen', label='Cross-correlation Data')\n\n # Plot formatting\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(r'$\\theta$', fontsize=12)\n plt.xticks(fontsize=10)\n plt.ylabel(r'$w(\\theta)$', fontsize=12)\n plt.yticks(fontsize=10)\n plt.legend(loc='best', fontsize=10)\n\n if bool(plot_title):\n plt.title(plot_title, fontsize=9)\n\n if bool(output):\n plt.savefig(output, dpi=dpi)\n\n else:\n plt.savefig('crosscorr_plot.png', dpi=dpi)\n\n plt.close()\n\n# Generate titles for w(theta) plots\ndef get_corr_title(params, loglike_func):\n \"\"\"\n Generates titles for the auto-correlation and cross-correlation plots.\n\n Parameters\n ----------\n params : dict\n Dictionary of the CMASS-WISE HOD model parameters.\n loglike_func : function\n Function that calculates the log-likelihood that the observed BOSS-CMASS and WISE data were produced by an\n HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n\n Returns\n -------\n title : str\n Correlation plot title.\n \"\"\"\n # Get CMASS title components\n cmass_s1 = r'$M_{\\min} = $' + f'{params[\"CMASS HOD\"][\"M_min\"][\"val\"]}'\n cmass_s2 = r'$M_{1} = $' + f'{params[\"CMASS HOD\"][\"M_1\"][\"val\"]}'\n cmass_s3 = r'$\\alpha = $' + f'{params[\"CMASS HOD\"][\"alpha\"][\"val\"]}'\n cmass_s4 = r'$M_{0} = $' + f'{params[\"CMASS HOD\"][\"M_0\"][\"val\"]}'\n cmass_s5 = r'$\\sigma_{\\log{M}} = $' + f'{params[\"CMASS HOD\"][\"sig_logm\"][\"val\"]}'\n cmass_s6 = f'central = {params[\"CMASS HOD\"][\"central\"][\"val\"]}'\n cmass_title = f'CMASS : {cmass_s1}, {cmass_s2}, {cmass_s3}, {cmass_s4}, {cmass_s5}, {cmass_s6}\\n'\n\n # Get WISE title components\n wise_s1 = r'$M_{\\min} = $' + f'{params[\"WISE HOD\"][\"M_min\"][\"val\"]}'\n wise_s2 = r'$M_{1} = $' + f'{params[\"WISE HOD\"][\"M_1\"][\"val\"]}'\n wise_s3 = r'$\\alpha = $' + f'{params[\"WISE HOD\"][\"alpha\"][\"val\"]}'\n wise_s4 = r'$M_{0} = $' + f'{params[\"WISE HOD\"][\"M_0\"][\"val\"]}'\n wise_s5 = r'$\\sigma_{\\log{M}} = $' + f'{params[\"WISE HOD\"][\"sig_logm\"][\"val\"]}'\n wise_s6 = f'central = {params[\"WISE HOD\"][\"central\"][\"val\"]}'\n wise_title = f'WISE : {wise_s1}, {wise_s2}, {wise_s3}, {wise_s4}, {wise_s5}, {wise_s6}\\n'\n\n # Get R title components\n R_s1 = r'$R_{ss} = $' + f'{params[\"galaxy_corr\"][\"R_ss\"][\"val\"]}'\n R_s2 = r'$R_{cs} = $' + f'{params[\"galaxy_corr\"][\"R_cs\"][\"val\"]}'\n R_s3 = r'$R_{sc} = $' + f'{params[\"galaxy_corr\"][\"R_sc\"][\"val\"]}'\n\n # Calculate log-likliehood\n likelihood = loglike_func(\n cmass_M_min = params[\"CMASS HOD\"][\"M_min\"][\"val\"],\n cmass_M_1 = params[\"CMASS HOD\"][\"M_1\"][\"val\"],\n cmass_alpha = params[\"CMASS HOD\"][\"alpha\"][\"val\"],\n cmass_M_0 = params[\"CMASS HOD\"][\"M_0\"][\"val\"],\n cmass_sig_logm = params[\"CMASS HOD\"][\"sig_logm\"][\"val\"],\n wise_M_min = params[\"WISE HOD\"][\"M_min\"][\"val\"],\n wise_M_1 = params[\"WISE HOD\"][\"M_1\"][\"val\"],\n wise_alpha = params[\"WISE HOD\"][\"alpha\"][\"val\"],\n wise_M_0 = params[\"WISE HOD\"][\"M_0\"][\"val\"],\n wise_sig_logm = params[\"WISE HOD\"][\"sig_logm\"][\"val\"],\n R_ss = params[\"galaxy_corr\"][\"R_ss\"][\"val\"],\n R_cs = params[\"galaxy_corr\"][\"R_cs\"][\"val\"],\n R_sc = params[\"galaxy_corr\"][\"R_sc\"][\"val\"]\n )\n R_title = f'{R_s1}, {R_s2}, {R_s3}'\n likelihood_title = f'Log-likelihood = {likelihood}'\n R_title += f' | {likelihood_title}'\n\n # Get complete title\n title = cmass_title + wise_title + R_title\n\n return title\n\n# ----------------------------------------------------------------------------------------------------------------------\n# MCMC Posterior Plot\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Function for plotting posterior distributions from MCMC chains\ndef posterior_plot(samples_path, names, labels, output=''):\n \"\"\"\n Generates a plot of the posterior distribution of the CMASS-WISE HOD model parameters determined by an MCMC chain.\n\n Parameters\n ----------\n samples_path : str\n String representation of the path to the MCMC chain results.\n names : array-like\n Array of strings containing the names of each variable in the posterior distribution. Order should reflect the\n order of the columns in the posterior distribution file.\n labels : array-like\n Array of strings containing the LaTex representations of each variable in the posterior distribution. Order\n should reflect the order of the columns in the posterior distribution file.\n output : Str\n String representation of the path and file name the plot will be saved as.\n\n Returns\n -------\n None\n \"\"\"\n # Get all files in target directory\n samples_dir = '/'.join(samples_path.split('/')[:-1]) + '/'\n files_only = [f for f in listdir(samples_dir) if isfile(join(samples_dir, f))]\n\n # Determine which files in target directory are desired MCMC chain results files\n samples_name = samples_path.split('/')[-1]\n sample_files = []\n for file in files_only:\n if (samples_name in file) and ('.txt' in file):\n sample_files.append(file)\n\n # Load data from MCMC chains files\n sample_data = []\n for i in range(len(sample_files)):\n load_sample = np.loadtxt(samples_dir + sample_files[i])\n sample_vals = []\n for j in range(len(load_sample)):\n step_vals = []\n for k in range(len(names)):\n step_vals.append(load_sample[j][k + 2])\n sample_vals.append(step_vals)\n \n # Remove first 30% of data to remove bad values due to burn-in\n sample_vals = sample_vals[int(np.ceil(0.3*len(sample_vals))):]\n sample_data.append(np.array(sample_vals))\n\n # Load MC samples from data\n samples = []\n for i in range(len(sample_files)):\n sample = MCSamples(samples = sample_data[i], names = names, labels = labels, label = f'Chain {i + 1}')\n samples.append(sample)\n\n # Plot posterior distribution\n g = plots.get_subplot_plotter()\n g.triangle_plot(samples, filled=True)\n\n if bool(output):\n save_path = '/'.join(output.split('/')[:-1])\n save_name = output.split('/')[-1]\n g.export(save_name, save_path)\n\n else:\n g.export(samples_name + '.png', samples_dir)\n\n# ----------------------------------------------------------------------------------------------------------------------"
},
{
"alpha_fraction": 0.5645205974578857,
"alphanum_fraction": 0.5753553509712219,
"avg_line_length": 38.53856658935547,
"blob_id": "4f03b0d0ccde3c90cb45ba68b9ddfd7039355383",
"content_id": "e938006b805a8fdac6f7efc0f72f1eae5087984d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 28704,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 726,
"path": "/src/cmass_wise_hod.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "# ----------------------------------------------------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Basic imports\nimport numpy as np\nfrom scipy.interpolate import InterpolatedUnivariateSpline as _spline\n\n# Cosmology imports\nfrom hmf import cached_quantity, parameter\nfrom halomod.integrate_corr import AngularCF, angular_corr_gal\nfrom halomod.cross_correlations import CrossCorrelations\nfrom astropy.cosmology import Planck15\n\n# Custom imports\nfrom get_model_info import get_model_params, get_model_dicts\n\n# ----------------------------------------------------------------------------------------------------------------------\n# AngularCrossCF Class\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass AngularCrossCF(CrossCorrelations):\n \"\"\"\n Framework extension to angular correlation functions.\n \"\"\"\n # Initialize class\n def __init__(self, p1=None, p2=None, theta_min=1e-3 * np.pi / 180.0, theta_max=np.pi / 180.0, theta_num=30,\n theta_log=True, zmin=0.2, zmax=0.4, znum=100, logu_min=-4, logu_max=2.3, unum=100, check_p_norm=True,\n p_of_z=True, exclusion_model=None, exclusion_params=None, **kwargs):\n \"\"\"\n Initializes the AngularCrossCF class.\n\n Parameters\n ----------\n p1 : callable, optional\n The redshift distribution of the sample. This needs not be normalised to 1, as this will occur internally. May\n be either a function of radial distance [Mpc/h] or redshift. If a function of radial distance, `p_of_z` must be\n set to False. Default is a flat distribution in redshift.\n p2 : callable, optional\n See `p1`. This can optionally be a different function against which to cross-correlate. By default is\n equivalent to `p1`.\n theta_min, theta_max : float, optional\n min,max angular separations [Rad].\n theta_num : int, optional\n Number of steps in angular separation.\n theta_log : bool, optional\n Whether to use logspace for theta values.\n zmin, zmax : float, optional\n The redshift limits of the sample distribution. Note that this is in redshit, regardless of the value of\n `p_of_z`.\n znum : int, optional\n Number of steps in redshift grid.\n logu_min, logu_max : float, optional\n min, max of the log10 of radial separation grid [Mpc/h]. Must be large enough to let the integral over the 3D\n correlation function to converge.\n unum : int, optional\n Number of steps in the u grid.\n check_p_norm : bool, optional\n If False, cancels checking the normalisation of `p1` and `p2`.\n p_of_z : bool, optional\n Whether `p1` and `p2` are functions of redshift.\n kwargs : unpacked-dict\n Any keyword arguments passed down to :class:`halomod.HaloModel`.\n\n Returns\n -------\n None\n \"\"\"\n super(AngularCrossCF, self).__init__(**kwargs)\n\n if self.halo_model_1.z < zmin or self.halo_model_1.z > zmax:\n warnings.warn(\n f'Your specified redshift (z = {self.z}) is not within your selection function, z = ({zmin}, {zmax})'\n )\n\n if p1 is None:\n p1 = flat_z_dist(zmin, zmax)\n\n self.p1 = p1\n self.p2 = p2\n self.zmin = zmin\n self.zmax = zmax\n self.znum = znum\n self.logu_min = logu_min\n self.logu_max = logu_max\n self.unum = unum\n self.check_p_norm = check_p_norm\n self.p_of_z = p_of_z\n\n self.theta_min = theta_min\n self.theta_max = theta_max\n self.theta_num = theta_num\n self.theta_log = theta_log\n \n self.cosmo = self.halo_model_1.cosmo\n self.rnum = self.halo_model_1.rnum\n \n self.exclusion_model = exclusion_model\n self.exclusion_params = exclusion_params\n\n # p1 parameter\n @parameter(\"param\")\n def p1(self, val):\n return val\n\n # p2 parameter\n @parameter(\"param\")\n def p2(self, val):\n return val\n\n # p_of_z parameter\n @parameter(\"model\")\n def p_of_z(self, val):\n return val\n\n # theta_min parameter\n @parameter(\"res\")\n def theta_min(self, val):\n if val < 0:\n raise ValueError(\"theta_min must be > 0\")\n return val\n\n # theta_max parameter\n @parameter(\"res\")\n def theta_max(self, val):\n if val > 180.0:\n raise ValueError(\"theta_max must be < 180.0\")\n return val\n\n # theta_num parameter\n @parameter(\"res\")\n def theta_num(self, val):\n return val\n\n # theta_log parameter\n @parameter(\"res\")\n def theta_log(self, val):\n return val\n\n # zmin parameter\n @parameter(\"param\")\n def zmin(self, val):\n return val\n\n # zmax parameter\n @parameter(\"param\")\n def zmax(self, val):\n return val\n\n # znum parameter\n @parameter(\"res\")\n def znum(self, val):\n return val\n\n # logu_min parameter\n @parameter(\"res\")\n def logu_min(self, val):\n return val\n\n # logu_max parameter\n @parameter(\"res\")\n def logu_max(self, val):\n return val\n\n # unum parameter\n @parameter(\"res\")\n def unum(self, val):\n return val\n\n # check_p_norm parameter\n @parameter(\"option\")\n def check_p_norm(self, val):\n return val\n\n # Reshift distribution grid\n @cached_quantity\n def zvec(self):\n return np.linspace(self.zmin, self.zmax, self.znum)\n\n # Radial separation grid [Mpc/h]\n @cached_quantity\n def uvec(self):\n return np.logspace(self.logu_min, self.logu_max, self.unum)\n\n # Radial distance grid (corresponds to zvec) [Mpc/h]\n @cached_quantity\n def xvec(self):\n return self.cosmo.comoving_distance(self.zvec).value\n\n # Angular separations [Rad]\n @cached_quantity\n def theta(self):\n if self.theta_min > self.theta_max:\n raise ValueError(\"theta_min must be less than theta_max\")\n\n if self.theta_log:\n return np.logspace(\n np.log10(self.theta_min), np.log10(self.theta_max), self.theta_num\n )\n else:\n return np.linspace(self.theta_min, self.theta_max, self.theta_num)\n\n # Physical separation grid [Mpc/h]\n @cached_quantity\n def r(self):\n rmin = np.sqrt(\n (10 ** self.logu_min) ** 2 + self.theta.min() ** 2 * self.xvec.min() ** 2\n )\n rmax = np.sqrt(\n (10 ** self.logu_max) ** 2 + self.theta.max() ** 2 * self.xvec.max() ** 2\n )\n return np.logspace(np.log10(rmin), np.log10(rmax), self.rnum)\n\n # Angular correlation function w(theta) from Blake+08, Eq. 33\n @cached_quantity\n def angular_corr_gal(self):\n def xi(r):\n s = _spline(self.halo_model_1.r, self.corr_cross - 1.0)\n return s(r)\n \n return angular_corr_gal(\n self.theta,\n xi,\n self.p1,\n self.zmin,\n self.zmax,\n self.logu_min,\n self.logu_max,\n znum=self.znum,\n unum=self.unum,\n p2=self.p2,\n check_p_norm=self.check_p_norm,\n cosmo=self.cosmo,\n p_of_z=self.p_of_z\n )\n\n# ----------------------------------------------------------------------------------------------------------------------\n# CMASS_WISE_HOD Class\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass CMASS_WISE_HOD(AngularCrossCF):\n \"\"\"\n HOD model for the cross-correlation of the galaxies observed at a redshift of z ~ 0.5 in the BOSS-CMASS and\n WISE galaxy surveys.\n \"\"\"\n # Initialize class\n def __init__(self, cmass_redshift_file, wise_redshift_file, data_file, covariance_file, params_file,\n cross_hod_model, diag_covariance=False, exclusion_model=None, exclusion_params=None):\n \"\"\"\n Initializes the CMASS_WISE_HOD class.\n\n Parameters\n ----------\n cmass_reshift_file : str\n String representation of the path to the .txt file containing the BOSS-CMASS redshift distribution.\n wise_redshift_file : str\n String representation of the path to the .txt file containing the WISE redshift distribution.\n data_file : str\n String representation of the path to the .txt file containing the CMASS autocorrelation and CMASS-WISE\n cross-correlation data as functions of R [Mpc/h].\n covariance_file : str\n String representation of the path to the .txt file containing the joint covariances matrix for the CMASS\n autocorrelation and CMASS-WISE cross-correlatoin data.\n params_file : str\n String representation of the path to the .json file containing the parameters for the BOSS-CMASS and WISE\n HOD models.\n cross_hod_model : AngularCrossCF\n HOD model for cross-correlations provided by an isntance of the AngularCrossCF class or any of its child\n classes.\n diag_covariance : bool, optional\n If True, only the diagonal values of the covariance matrix are used in calculations. If False, the full\n covariance matrix is used.\n\n Returns\n -------\n None\n \"\"\"\n # Initializing redshift distribution attributes\n self.cmass_redshift_file = cmass_redshift_file\n self.cmass_redshift = np.loadtxt(cmass_redshift_file, dtype=float)\n self.wise_redshift_file = wise_redshift_file\n self.wise_redshift = np.loadtxt(wise_redshift_file, dtype=float)\n\n # Initializing data attribute\n self.data_file = data_file\n self.data = np.loadtxt(data_file, dtype=float)\n\n # Initializing covariance matrix attribute\n self.covariance_file = covariance_file \n if diag_covariance:\n self.covariance = np.diag(np.diag(np.loadtxt(covariance_file, dtype=float)))\n else:\n self.covariance = np.loadtxt(covariance_file, dtype=float)\n\n # Initializing model attributes\n self.params_file = params_file \n model_params = get_model_params(params_file)\n self.model_params = model_params\n\n cmass_model, wise_model = get_model_dicts(params_file)\n self.cmass_model = cmass_model \n self.wise_model = wise_model \n\n # Initializing redshift limit attributes\n z = model_params['halo_params']['z']\n zmin = model_params['halo_params']['zmin']\n zmax = model_params['halo_params']['zmax']\n self.z = z\n self.zmin = zmin\n self.zmax = zmax\n\n # Initializing attributes for remaining parameters\n self.cross_hod_model = cross_hod_model \n self.diag_covariance = diag_covariance \n self.exclusion_model = exclusion_model\n self.exclusion_params = exclusion_params \n\n # Calculating radial and angular values\n distance = Planck15.comoving_distance(z).value * Planck15.H0.value/100.0\n thetas = self.data[7:,0]/distance\n self.thetas = thetas\n\n # CMASS redshift calculations\n cmass_nz = np.loadtxt(cmass_redshift_file)\n cmass_z = np.linspace(0, 4.00, 401)\n cmass_zbin = 0.5 * (cmass_z[1:] + cmass_z[:-1])\n cmass_zfunc = _spline(cmass_zbin, cmass_nz)\n\n zrange = np.linspace(zmin, zmax, 100)\n chirange = Planck15.comoving_distance(zrange).value * Planck15.H0.value/100.0\n d_chirange = np.gradient(chirange)\n norm = np.sum(cmass_zfunc(zrange) * d_chirange * Planck15.H(zrange).value)\n cmass_zfunc_orig = _spline(Planck15.comoving_distance(cmass_zbin).value * Planck15.H0.value/100.0,\n cmass_nz * Planck15.H(cmass_zbin).value/norm)\n\n def cmass_zfunc(chi):\n out = np.zeros_like(chi) \n out = cmass_zfunc_orig(chi) \n out[chi < Planck15.comoving_distance(zmin).value * Planck15.H0.value/100.0] = 0 \n out[chi > Planck15.comoving_distance(zmax).value * Planck15.H0.value/100.0] = 0 \n return out\n\n norm = np.sum(cmass_zfunc(chirange) * d_chirange)\n cmass_zfunc = _spline(chirange, cmass_zfunc(chirange)/norm)\n\n # WISE redshift calculations\n wise_zdist = np.loadtxt(wise_redshift_file)\n wise_zbin = wise_zdist[:,0]\n wise_nz = wise_zdist[:,1]\n wise_zfunc = _spline(wise_zbin, wise_nz)\n\n zrange = np.linspace(0, 4, 1000)\n chirange = Planck15.comoving_distance(zrange).value * Planck15.H0.value/100.0\n d_chirange = np.gradient(chirange)\n norm = np.sum(wise_zfunc(zrange) * d_chirange * Planck15.H(zrange).value)\n wise_zfunc = _spline(Planck15.comoving_distance(wise_zbin).value * Planck15.H0.value/100.0,\n wise_nz * Planck15.H(wise_zbin).value/norm)\n\n # CMASS angular autocorrelation computation\n self.cmass_auto = AngularCF(\n p1 = cmass_zfunc,\n theta_min = np.min(thetas[3:]),\n theta_max = np.max(thetas[3:]),\n theta_num = len(thetas[3:]),\n theta_log = True,\n p_of_z = False,\n z = z,\n zmin = zmin,\n zmax = zmax,\n check_p_norm = False,\n hod_model = 'Zheng05',\n hod_params = cmass_model['hod_params'],\n logu_min = -5,\n logu_max = 2.2,\n unum = 500,\n exclusion_model = exclusion_model,\n exclusion_params = exclusion_params\n )\n\n # CMASS-WISE angular cross-correlation computation\n self.cross = AngularCrossCF(\n p1 = cmass_zfunc,\n p2 = wise_zfunc,\n theta_min = np.min(thetas),\n theta_max = np.max(thetas),\n theta_num = len(thetas),\n theta_log = True,\n p_of_z = False,\n zmin = zmin,\n zmax = zmax,\n cross_hod_model = cross_hod_model,\n check_p_norm = False,\n halo_model_1_params = cmass_model,\n halo_model_2_params = wise_model,\n logu_min = -5,\n logu_max = 2.2,\n unum = 500,\n exclusion_model = exclusion_model,\n exclusion_params = exclusion_params\n )\n\n # Summary of model attributes for comparison\n self.summary = (cmass_redshift_file, wise_redshift_file, data_file, covariance_file, params_file,\n cross_hod_model, diag_covariance, exclusion_model, exclusion_params)\n\n # Printable representation of class instance\n def __str__(self):\n \"\"\"\n Provides a printable representation of an instance of the CMASS_WISE_HOD class.\n \"\"\"\n rep_str = '-'*80\n rep_str += '\\nInstance of the CrossHOD class.'\n rep_str += '\\n' + '-'*80\n rep_str += '\\nSources data from the files'\n rep_str += f'\\n- Redshift 1: {self.cmass_redshift_file}'\n rep_str += f'\\n- Redshift 2: {self.wise_redshift_file}'\n rep_str += f'\\n- Data: {self.data_file}'\n rep_str += f'\\n- Covariance: {self.covariance_file}'\n rep_str += f'\\n- Model Parameters: {self.params_file}'\n rep_str += '\\n' + '-'*80\n rep_str += f'\\nUses the HOD models'\n rep_str += f'\\n- Model 1: {self.cmass_model}'\n rep_str += '\\n'\n rep_str += f'\\n- Model 2: {self.wise_model}'\n rep_str += '\\n' + '-'*80\n\n return rep_str\n\n # Equivalence of class instances\n def __eq__(self, other):\n \"\"\"\n Compares an instance of the CMASS_WISE_HOD class to any other object.\n\n Parameters\n ----------\n other : any\n Any other object being compared against.\n\n Returns\n -------\n are_equal : bool\n True if other is an instance of the CMASS_WISE_HOD class with identical parameters, and False otherwise.\n \"\"\"\n are_equal = isinstance(other, CMASS_WISE_HOD) and (self.summary == other.summary)\n return are_equal\n\n # Calculate CMASS angular autocorrelation\n def corr_cmass_auto(self, update_cmass_params={}):\n \"\"\"\n Executes Halomod's angular_corr_gal method on self.cmass_auto to compute the angular autocorrelation of the\n BOSS-CMASS HOD model.\n\n Parameters\n ----------\n update_cmass_params : dict, optional\n Dictionary containing parameters to udpate the BOSS-CMASS HOD model parameters.\n\n Returns\n -------\n cmass_auto_corr : array_like\n Array of calculated BOSS-CMASS autocorrelation values.\n \"\"\"\n # Get CMASS autocorrelation\n cmass_auto = self.cmass_auto \n\n # Update CMASS autocorrelation if updated parameters are provided\n if update_cmass_params != {}:\n print('-'*80)\n print('CMASS parameters before update', cmass_auto.hod_params)\n cmass_auto.hod_params.update(update_cmass_params)\n print('CMASS parameters after update', cmass_auto.hod_params)\n\n print('\\n')\n print('cmass_auto attributes')\n print('- theta', cmass_auto.theta)\n print('- corr_auto_tracer_fnc', cmass_auto.corr_auto_tracer_fnc)\n print('- corr_1h_auto_tracer_fnc', cmass_auto.corr_1h_auto_tracer_fnc)\n print('- corr_2h_auto_tracer_fnc', cmass_auto.corr_2h_auto_tracer_fnc)\n print('- p1', cmass_auto.p1)\n print('- p2', cmass_auto.p2)\n print('- zmin', cmass_auto.zmin)\n print('- zmax', cmass_auto.zmax)\n print('- logu_min', cmass_auto.logu_min)\n print('- logu_max', cmass_auto.logu_max)\n print('- znum', cmass_auto.znum)\n print('- unum', cmass_auto.unum)\n print('- check_p_norm', cmass_auto.check_p_norm)\n print('- cosmo', cmass_auto.cosmo)\n print('- p_of_z', cmass_auto.p_of_z)\n\n # Calculate CMASS angular autocorrelation\n cmass_auto_corr = cmass_auto.angular_corr_gal\n\n return cmass_auto_corr\n\n # Calculate CMASS-WISE angular cross-correlation\n def corr_cross(self, update_cmass_params={}, update_wise_params={}):\n \"\"\"\n Executes Halomod's angular_corr_gal method on self.cross to compute the angular cross-correlation of the\n BOSS-CMASS HOD model and the WISE HOD model.\n\n Parameters\n ----------\n update_cmass_params : dict, optional\n Dictionary containing parameters to udpate the BOSS-CMASS HOD model parameters.\n update_wise_params : dict, optional\n Dictionary containing parameters to udpate the WISE HOD model parameters.\n\n Returns\n -------\n cross_corr : array_like\n Array of calculated BOSS-CMASS and WISE cross-correlation values.\n \"\"\"\n # Get cross-correlation\n cross = self.cross\n\n # Update cross-correlation if updated CMASS parameters are provided\n if update_cmass_params != {}:\n print('\\n')\n print('CMASS parameters before update', cross.halo_model_1.hod_params)\n cross.halo_model_1.update(hod_params = update_cmass_params)\n print('CMASS parameters after update', cross.halo_model_1.hod_params)\n\n # Update cross-correlation if updated WISE parameters are provided\n if update_wise_params != {}:\n print('WISE parameters before update', cross.halo_model_2.hod_params)\n cross.halo_model_2.update(hod_params = update_wise_params)\n print('WISE parameters after update', cross.halo_model_2.hod_params)\n\n print('\\n')\n print('cross attributes')\n print('- theta', cross.theta)\n print('- p1', cross.p1)\n print('- p2', cross.p2)\n print('- zmin', cross.zmin)\n print('- zmax', cross.zmax)\n print('- logu_min', cross.logu_min)\n print('- logu_max', cross.logu_max)\n print('- znum', cross.znum)\n print('- unum', cross.unum)\n print('- check_p_norm', cross.check_p_norm)\n print('- cosmo', cross.cosmo)\n print('- p_of_z', cross.p_of_z)\n\n # Calculate angular cross-correlation\n cross_corr = cross.angular_corr_gal\n return cross_corr\n\n # Components of the log-likelihood\n def loglike_components(self, cmass_M_min, cmass_M_1, cmass_alpha, cmass_M_0, cmass_sig_logm, wise_M_min, wise_M_1,\n wise_alpha, wise_M_0, wise_sig_logm, R_ss, R_cs, R_sc):\n \"\"\"\n Calculates the individual components of the log-likelihood that the observed BOSS-CMASS and WISE data were\n produced by an HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n\n Parameters\n ----------\n cmass_M_min : float\n The minimum halo mass necessary for a CMASS dark matter halo to host a central galaxy.\n cmass_M_1 : float\n Mass parameter for CMASS satellite galaxies.\n cmass_alpha : float\n The exponent of the galaxy mass power law for CMASS galaxies.\n cmass_M_0 : float\n Mass parameter for CMASS satellite galaxies.\n cmass_sig_logm : float\n The step function smoothing parameter for WISE dark matter halos.\n wise_M_min : float\n The minimum halo mass necessary for a WISE dark matter halo to host a central galaxy.\n wise_M_1 : float\n Mass parameter for WISE satellite galaxies.\n wise_alpha : float\n The exponent of the galaxy mass power law for WISE galaxies.\n wise_M_0 : float\n Mass parameter for WISE satellite galaxies.\n wise_sig_logm : float\n The step function smoothing parameter for WISE dark matter halos.\n R_ss : float\n The satellite-satellite correlation parameter for CMASS and WISE galaxies.\n R_cs : float\n The central-satellite correlation parameter for CMASS and WISE galaxies.\n R_sc : float\n The satellite-central correlation parameter for CMASS and WISE galaxies.\n\n Returns\n -------\n loglike_dict : dict\n Dictionary containing the autocorrelation-only, cross-correlation-only, and total log-likelihood values.\n \"\"\"\n # Get data and covariance\n data = self.data[:,1]\n cov = self.covariance\n\n # Initialize parameter update dictionaries\n cmass_params = {\n 'M_min': cmass_M_min,\n 'M_1': cmass_M_1,\n 'alpha': cmass_alpha,\n 'M_0': cmass_M_0,\n 'sig_logm': cmass_sig_logm\n }\n wise_params = {\n 'M_min': wise_M_min,\n 'M_1': wise_M_1,\n 'alpha': wise_alpha,\n 'M_0': wise_M_0,\n 'sig_logm': wise_sig_logm\n }\n\n # Calculate CMASS autocorrelation and CMASS-WISE cross-correlation\n cmass_auto_corr = self.corr_cmass_auto(update_cmass_params=cmass_params)\n cross_corr = self.corr_cross(update_cmass_params=cmass_params, update_wise_params=wise_params)\n\n # Calculate autocorrelation-only log-likelihood\n auto_chisq = np.linalg.multi_dot([data[:7] - cmass_auto_corr, np.linalg.inv(cov[:7,:7]), data[:7] - cmass_auto_corr])\n auto_loglike = -0.5 * auto_chisq\n\n # Calculate cross-correlation-only log-likelihood\n cross_chisq = np.linalg.multi_dot([data[7:] - cross_corr, np.linalg.inv(cov[7:,7:]), data[7:] - cross_corr])\n cross_loglike = -0.5 * cross_chisq\n\n # Calculate total log-likelihood\n total_corr = np.concatenate((cmass_auto_corr, cross_corr))\n total_chisq = np.linalg.multi_dot([data - total_corr, np.linalg.inv(cov), data - total_corr])\n total_loglike = -0.5 * total_chisq\n\n # Put results in dictionary\n loglike_dict = {\n 'auto_loglike': auto_loglike,\n 'cross_loglike': cross_loglike,\n 'total_loglike': total_loglike\n }\n\n return loglike_dict\n\n # Total log-likelihood\n def loglike(self, cmass_M_min, cmass_M_1, cmass_alpha, cmass_M_0, cmass_sig_logm, wise_M_min, wise_M_1,\n wise_alpha, wise_M_0, wise_sig_logm, R_ss, R_cs, R_sc):\n \"\"\"\n Calculates the total log-likelihood that the observed BOSS-CMASS and WISE data were produced by an\n HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n\n Parameters\n ----------\n See `loglike_components`.\n\n Returns\n -------\n total_loglike : float\n The total log-likelihood value.\n \"\"\"\n loglike_dict = self.loglike_components(cmass_M_min, cmass_M_1, cmass_alpha, cmass_M_0, cmass_sig_logm,\n wise_M_min, wise_M_1, wise_alpha, wise_M_0, wise_sig_logm, R_ss,\n R_cs, R_sc)\n total_loglike = loglike_dict['total_loglike']\n return total_loglike\n\n # Components of the nbar-weighted log-likelihood\n def nbar_components(self, cmass_M_min, cmass_M_1, cmass_alpha, cmass_M_0, cmass_sig_logm, wise_M_min, wise_M_1,\n wise_alpha, wise_M_0, wise_sig_logm, R_ss, R_cs, R_sc):\n \"\"\"\n Calculates the individual components of the nbar-weighted log-likelihood that the observed BOSS-CMASS and\n WISE data were produced by an HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n\n Parameters\n ----------\n See `loglike_components`.\n\n Returns\n -------\n loglike_dict : dict\n Dictionary containing the autocorrelation-only, cross-correlation-only, and total log-likelihood values,\n as well as the data-based and model-based galaxy number densities, and their associated number density\n corrections.\n \"\"\"\n sig_nbar = 0.1\n\n # Get CMASS number densities and calculate CMASS number density correction\n cmass_nbar_data = self.model_params['nbar']['CMASS']\n cmass_nbar_model = self.cross.halo_model_1.mean_tracer_den\n cmass_nbar_correction = -0.5 * ((cmass_nbar_data - cmass_nbar_model)/(sig_nbar * cmass_nbar_data))**2\n\n # Get WISE number densities and calculate WISE number density correction\n wise_nbar_data = self.model_params['nbar']['WISE']\n wise_nbar_model = self.cross.halo_model_2.mean_tracer_den \n wise_nbar_correction = -0.5 * ((wise_nbar_data - wise_nbar_model)/(sig_nbar * wise_nbar_data))**2\n\n # Get unweighted log-likelihood components\n loglike_dict = self.loglike_components(cmass_M_min, cmass_M_1, cmass_alpha, cmass_M_0, cmass_sig_logm,\n wise_M_min, wise_M_1, wise_alpha, wise_M_0, wise_sig_logm, R_ss,\n R_cs, R_sc)\n\n # Update components dictionary\n loglike_dict['total_loglike'] += cmass_nbar_correction + wise_nbar_correction\n loglike_dict['cmass_nbar_data'] = cmass_nbar_data \n loglike_dict['cmass_nbar_model'] = cmass_nbar_model\n loglike_dict['cmass_nbar_correction'] = cmass_nbar_correction\n loglike_dict['wise_nbar_data'] = wise_nbar_data \n loglike_dict['wise_nbar_model'] = wise_nbar_model\n loglike_dict['wise_nbar_correction'] = wise_nbar_correction\n\n return loglike_dict\n\n # Total nbar-weighted log-likelihood\n def nbar_loglike(self, cmass_M_min, cmass_M_1, cmass_alpha, cmass_M_0, cmass_sig_logm, wise_M_min, wise_M_1,\n wise_alpha, wise_M_0, wise_sig_logm, R_ss, R_cs, R_sc):\n \"\"\"\n Calculates the total nbar-weighted log-likelihood that the observed BOSS-CMASS and WISE data were produced\n by an HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n\n Parameters\n ----------\n See `loglike_components`.\n\n Returns\n -------\n total_loglike : float\n The total nbar-weighted log-likelihood value.\n \"\"\"\n loglike_dict = self.nbar_components(cmass_M_min, cmass_M_1, cmass_alpha, cmass_M_0, cmass_sig_logm,\n wise_M_min, wise_M_1, wise_alpha, wise_M_0, wise_sig_logm, R_ss,\n R_cs, R_sc)\n total_loglike = loglike_dict['total_loglike']\n return total_loglike\n\n# ----------------------------------------------------------------------------------------------------------------------"
},
{
"alpha_fraction": 0.6357616186141968,
"alphanum_fraction": 0.7086092829704285,
"avg_line_length": 24.33333396911621,
"blob_id": "20ff7a0eb32bd5f71390334c156e02feb1bbcbd2",
"content_id": "2118163e6e15f1196e83dbccc7910e51b310a99a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/chains.sh",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#SBATCH --account=def-wperciva\n#SBATCH --ntasks=4\n#SBATCH --mem-per-cpu=128G\n#SBATCH --time=0-22:00\nsrun -n 4 python3 main.py --action mcmc"
},
{
"alpha_fraction": 0.44824886322021484,
"alphanum_fraction": 0.48855409026145935,
"avg_line_length": 36.4468879699707,
"blob_id": "596a76efea60dcd2f58c3788c7449890306298f1",
"content_id": "45db2f8a2c79cfbabc571c56ba5e3c99014745a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10222,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 273,
"path": "/main.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "# ----------------------------------------------------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Basic imports\nimport sys\nimport argparse\nimport numpy as np\n\n# Cosmology imports\nfrom halomod.cross_correlations import HODCross\n\n# Custom imports\nsys.path.append('src')\nfrom get_model_info import get_model_params\nfrom cmass_wise_hod import CMASS_WISE_HOD\nfrom model_variations import ModelVariations\nfrom eval_model import optimize_model, mcmc, gridsearch\nfrom plot_results import cmass_autocorr_plot, crosscorr_plot, get_corr_title, posterior_plot\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Packages, Data, and Parameter Paths\n# ----------------------------------------------------------------------------------------------------------------------\n\npackages_path = '/home/jptlawre/packages'\ncmass_redshift_file = 'data/dr12cmassN.txt'\nwise_redshift_file = 'data/blue.txt'\ndata_file = 'data/combined_data.txt'\ncovariance_file = 'data/combined_cov.txt'\nparams_file = 'param/cmass_wise_params.json'\nparams = get_model_params(params_file)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# VariableCorr Class\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass VariableCorr(HODCross):\n \"\"\"\n Correlation relation for constant cross-correlation pairs\n \"\"\"\n R_ss = params['galaxy_corr']['R_ss']['val']\n R_cs = params['galaxy_corr']['R_cs']['val']\n R_sc = params['galaxy_corr']['R_sc']['val']\n\n _defaults = {\"R_ss\": R_ss, \"R_cs\": R_cs, \"R_sc\": R_sc}\n\n def R_ss(self, m):\n return self.params[\"R_ss\"]\n\n def R_cs(self, m):\n return self.params[\"R_cs\"]\n\n def R_sc(self, m):\n return self.params[\"R_sc\"]\n\n def self_pairs(self, m):\n \"\"\"\n The expected number of cross-pairs at a separation of zero\n \"\"\"\n return 0 \n\n# ----------------------------------------------------------------------------------------------------------------------\n# Class Instances\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Instance of CMASS_WISE_HOD\nnoexclusion_hod = CMASS_WISE_HOD(\n cmass_redshift_file = cmass_redshift_file,\n wise_redshift_file = wise_redshift_file,\n data_file = data_file,\n covariance_file = covariance_file,\n params_file = params_file,\n cross_hod_model = VariableCorr,\n diag_covariance = False, \n exclusion_model = None,\n exclusion_params = None\n)\n\nngmatched_hod = CMASS_WISE_HOD(\n cmass_redshift_file = cmass_redshift_file,\n wise_redshift_file = wise_redshift_file,\n data_file = data_file,\n covariance_file = covariance_file,\n params_file = params_file,\n cross_hod_model = VariableCorr,\n diag_covariance = False, \n exclusion_model = 'NgMatched',\n exclusion_params = None\n)\n\n# Instance of ModelVariations\ncmass_wise_variations = ModelVariations(params_file)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Program Execution\n# ----------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n # Initialize argument parser\n parser = argparse.ArgumentParser(description=\"\"\"HOD model for the cross-correlation of BOSS-CMASS and WISE\n galaxies at a redshift of z ~ 0.5.\"\"\")\n parser.add_argument('-a', '--action', type=str, metavar='action',\n help=\"\"\"Function executed by the program. Options are: optimize, mcmc, gridsearch, corr_plots,\n posterior_plot.\"\"\")\n args = parser.parse_args()\n\n # Verify argument is valid\n assert args.action in ('optimize', 'mcmc', 'gridsearch', 'corr_plots',\n 'posterior_plot', 'test'), 'Invalid action chosen.'\n\n # Optimizer action\n if args.action == 'optimize':\n \n # Set optimizer output\n output = 'results/optim1'\n if output == '':\n output = input('Enter optimizer output path: ')\n \n # Run optimizer\n optimize_model(\n model_variations = cmass_wise_variations,\n loglike_func = ngmatched_hod.nbar_loglike,\n method = 'scipy',\n packages_path = packages_path,\n output = output,\n debug = True\n )\n\n # MCMC action\n elif args.action == 'mcmc':\n\n # Set MCMC output\n output = 'results/mcmc1'\n if output == '':\n output = input('Enter MCMC output path: ')\n\n # Run MCMC chains\n mcmc(\n model_variations = cmass_wise_variations,\n loglike_func = ngmatched_hod.nbar_loglike,\n packages_path = packages_path,\n output = output,\n debug = True\n )\n\n # Grid search action\n elif args.action == 'gridsearch':\n\n # Set grid search output\n output = 'results/grid1'\n if output == '':\n output = input('Enter grid search output path: ')\n\n # Run grid search\n gridsearch(\n params = params,\n loglike_func = ngmatched_hod.nbar_components,\n output = output\n )\n\n # Correlation plots action\n elif args.action == 'corr_plots':\n\n # Set autocorr plot output\n auto_output = ''\n if auto_output == '':\n auto_output = input('Enter autocorrelation plot output path: ')\n\n # Set cross-corr plot output\n cross_output = ''\n if cross_output == '':\n cross_output = input('Enter cross-correlation plot output path: ')\n\n # Generate correlation plots\n title = get_corr_title(params, ngmatched_hod.nbar_components)\n\n cmass_autocorr_plot(\n cmass_wise_hod = ngmatched_hod,\n sampled = [],\n plot_title = title,\n output = auto_output,\n dpi = 200\n ) \n\n crosscorr_plot(\n cmass_wise_hod = ngmatched_hod,\n sampled = [],\n plot_title = title,\n output = cross_output,\n dpi = 200\n )\n\n # Posterior plot action\n elif args.action == 'posterior_plot':\n\n # Set samples, names, and labels\n samples_path = ''\n if samples_path == '':\n samples_path = input('Enter path to MCMC chain results: ')\n\n names = []\n if names == []:\n names = input('Enter parameter names: ')\n names = list(map(lambda x: x.strip(), names.split(',')))\n\n labels = []\n if labels == []:\n labels = input('Enter LaTeX labels for graph axes: ')\n labels = list(map(lambda x: x.strip(), labels.split(',')))\n\n # Set posterior plot output\n output = ''\n if output == '':\n output = input('Enter posterior plot output path: ')\n\n # Generate posterior plot\n posterior_plot(\n samples_path = samples_path,\n names = names,\n labels = labels,\n output = output\n )\n\n # Test action\n elif args.action == 'test':\n print('TESTING BRANCH')\n\n hod_params_list = [\n (13.04, 14.0, 0.950, 13.16, 0.43, 13.09, 13.775, 0.970, 13.44, 0.60),\n (13.04, 14.0, 0.975, 13.16, 0.47, 13.09, 13.775, 0.990, 13.44, 0.58),\n (12.94, 14.1, 0.950, 13.16, 0.43, 13.19, 13.775, 1.000, 13.64, 0.60),\n (13.04, 14.0, 0.975, 13.26, 0.48, 13.09, 13.675, 1.025, 13.54, 0.55),\n (13.14, 13.9, 1.000, 13.06, 0.53, 12.99, 13.875, 1.050, 13.44, 0.65),\n (12.94, 14.1, 1.025, 13.16, 0.43, 13.19, 13.775, 0.950, 13.64, 0.60),\n (13.04, 14.0, 1.050, 13.26, 0.48, 13.09, 13.675, 0.975, 13.54, 0.55),\n (13.14, 13.9, 0.950, 13.06, 0.53, 12.99, 13.875, 1.000, 13.44, 0.65),\n (12.94, 14.1, 0.975, 13.16, 0.43, 13.19, 13.775, 1.025, 13.64, 0.60),\n (13.04, 14.0, 1.000, 13.26, 0.48, 13.09, 13.675, 1.050, 13.54, 0.55)\n ]\n\n # idx = 6\n\n for idx in range(len(hod_params_list)):\n\n exclusion_ngmatched = ngmatched_hod.nbar_components(\n cmass_M_min = hod_params_list[idx][0],\n cmass_M_1 = hod_params_list[idx][1],\n cmass_alpha = hod_params_list[idx][2],\n cmass_M_0 = hod_params_list[idx][3],\n cmass_sig_logm = hod_params_list[idx][4],\n wise_M_min = hod_params_list[idx][5],\n wise_M_1 = hod_params_list[idx][6],\n wise_alpha = hod_params_list[idx][7],\n wise_M_0 = hod_params_list[idx][8],\n wise_sig_logm = hod_params_list[idx][9],\n R_ss = params[\"galaxy_corr\"][\"R_ss\"][\"val\"],\n R_cs = params[\"galaxy_corr\"][\"R_cs\"][\"val\"],\n R_sc = params[\"galaxy_corr\"][\"R_sc\"][\"val\"]\n )\n\n print(f'Set {idx + 1}: NgMatched Exclusion ' + '-'*80)\n print(f'- Autocorrelation log-likelihood: {exclusion_ngmatched[\"auto_loglike\"]}')\n print(f'- Cross-correlation log-likelihood: {exclusion_ngmatched[\"cross_loglike\"]}')\n print(f'- Total log-likelihood: {exclusion_ngmatched[\"total_loglike\"]}')\n print(f'- CMASS data nbar: {exclusion_ngmatched[\"cmass_nbar_data\"]}')\n print(f'- CMASS model nbar: {exclusion_ngmatched[\"cmass_nbar_model\"]}')\n print(f'- CMASS nbar correction: {exclusion_ngmatched[\"cmass_nbar_correction\"]}')\n print(f'- WISE data nbar: {exclusion_ngmatched[\"wise_nbar_data\"]}')\n print(f'- WISE model nbar: {exclusion_ngmatched[\"wise_nbar_model\"]}')\n print(f'- WISE nbar correction: {exclusion_ngmatched[\"wise_nbar_correction\"]}')\n\n# ----------------------------------------------------------------------------------------------------------------------"
},
{
"alpha_fraction": 0.502947986125946,
"alphanum_fraction": 0.5043773651123047,
"avg_line_length": 41.732826232910156,
"blob_id": "7090955f7d81964f5a50e5bb149eb06cd6d68b96",
"content_id": "894305b2b8af2d00187e80e5ace0d396c90bd70e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5597,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 131,
"path": "/src/model_variations.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "# ----------------------------------------------------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------------------------------------------------\n\nimport numpy as np\nfrom get_model_info import get_model_params\n\n# ----------------------------------------------------------------------------------------------------------------------\n# ModelVariations Class\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass ModelVariations:\n \"\"\"\n Generates the parameter space necessary for optimization and statistical analysis to be performed on the \n CMASS-WISE HOD model.\n \"\"\"\n # Initialize class\n def __init__(self, params_file):\n \"\"\"\n Initializes the ModelVariations class.\n\n Parameters\n ----------\n params_file : str\n String representation of the path to the .json file containing the parameters for the BOSS-CMASS and WISE\n HOD models.\n\n Returns\n -------\n None\n \"\"\"\n # Initialize parameter file attribute and get model parameters\n self.params_file = params_file \n model_params = get_model_params(params_file)\n\n # Rename CMASS and WISE parameter keys\n cmass_keys = list(model_params['CMASS HOD'].keys())\n new_cmass_keys = ['cmass_' + key for key in cmass_keys]\n wise_keys = list(model_params['WISE HOD'].keys())\n new_wise_keys = ['wise_' + key for key in wise_keys]\n for i in range(len(cmass_keys)):\n model_params['CMASS HOD'][new_cmass_keys[i]] = model_params['CMASS HOD'][cmass_keys[i]]\n del model_params['CMASS HOD'][cmass_keys[i]]\n model_params['WISE HOD'][new_wise_keys[i]] = model_params['WISE HOD'][wise_keys[i]]\n del model_params['WISE HOD'][wise_keys[i]]\n\n # Initializelists for keys and values\n all_params_dict = {**model_params['CMASS HOD'], **model_params['WISE HOD'], **model_params['galaxy_corr']}\n del all_params_dict['cmass_central']\n del all_params_dict['wise_central']\n all_params_keys = list(all_params_dict.keys())\n all_params_vals = list(all_params_dict.values())\n \n # Determine fixed and sampled keys and values\n sample_params = []\n sample_range_mins = []\n sample_range_maxs = []\n sample_ranges = []\n sample_values = []\n\n fixed_params = []\n fixed_values = []\n\n for i in range(len(all_params_keys)):\n val = all_params_vals[i]\n if val['sample']:\n sample_params.append(all_params_keys[i])\n sample_range_mins.append(val['sample_min'])\n sample_range_maxs.append(val['sample_max'])\n sample_ranges.append(np.linspace(val['sample_min'], val['sample_max'], val['sample_div']))\n sample_values.append(val['val'])\n else:\n fixed_params.append(all_params_keys[i])\n fixed_values.append(val['val'])\n\n self.sample_params = sample_params\n self.sample_range_mins = sample_range_mins\n self.sample_range_maxs = sample_range_maxs \n self.sample_ranges = sample_ranges \n self.sample_values = sample_values\n\n self.fixed_params = fixed_params\n self.fixed_values = fixed_values\n\n # Create parameters dictionary\n params_dict = {}\n for i in range(len(self.fixed_params)):\n params_dict[self.fixed_params[i]] = self.fixed_values[i]\n for i in range(len(self.sample_params)):\n params_dict[self.sample_params[i]] = {'prior': {'min': self.sample_range_mins[i],\n 'max': self.sample_range_maxs[i]},\n 'ref': self.sample_values[i],\n 'latex': self.sample_params[i]}\n self.sampling_params_dict = params_dict\n\n # Printable representation of class instance\n def __str__(self):\n \"\"\"\n Provides a printable representation of an instance of the ModelVariations class.\n \"\"\"\n rep_str = '-'*80\n rep_str += '\\nInstance of the ModelVariations class.'\n rep_str += '\\n' + '-'*80\n for i in range(len(self.sample_params)):\n rep_str += f'\\n- Samples {self.sample_params[i]}'\n rep_str += f' over [{self.sample_range_mins[i]}, {self.sample_range_maxs[i]}]'\n rep_str += f' with reference value {self.sample_values[i]}'\n rep_str += '\\n' + '-'*80\n for i in range(len(self.fixed_params)):\n rep_str += f'\\n- Holds {self.fixed_params[i]} fixed at {self.fixed_values[i]}'\n rep_str += '\\n' + '-'*80\n\n # Equivalence of class instances\n def __eq__(self, other):\n \"\"\"\n Compares an instance of the ModelVariations class to any other object.\n\n Parameters\n ----------\n other : any\n Any other object being compared against.\n\n Returns\n -------\n are_equal : bool\n True if other is an instance of the ModelVariations class with identical parameters, and False otherwise.\n \"\"\"\n are_equal = isinstance(other, ModelVariations) and (self.sampled == other.sampled) and (self.fixed == other.fixed)\n return are_equal\n\n# ----------------------------------------------------------------------------------------------------------------------"
},
{
"alpha_fraction": 0.5927055478096008,
"alphanum_fraction": 0.6166816353797913,
"avg_line_length": 31.28764533996582,
"blob_id": "d988d6755c9aec6292e3ada22f391e23fd9f9253",
"content_id": "d42ebf5819f3ee8cd2882894ed0a2a14e1d81f91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16725,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 518,
"path": "/halomod/hod.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "\"\"\"\nModule for defining HOD classes.\n\nThe HOD class exposes methods that deal directly with occupation statistics and don't interact with\nthe broader halo model. These include things like the average satellite/central occupation, total\noccupation, and \"pair counts\".\n\nThe HOD concept is here meant to be as general as possible. While traditionally the HOD has been\nthought of as a number count occupation, the base class here is just as amenable to \"occupations\"\nthat could be defined over the real numbers -- i.e. continuous occupations. This could be achieved\nvia each \"discrete\" galaxy being marked by some real quantity (eg. each galaxy is on average a\ncertain brightness, or contains a certain amount of gas), or it could be achieved without assuming\nany kind of discrete tracer, and just assuming a matching of some real field to the underlying halo\nmass. Thus *all* kinds of occupations can be dealt with in these classes.\n\nFor the sake of consistency of implementation, all classes contain the notion that there may be a\n\"satellite\" component of the occupation, and a \"central\" component. This is to increase fidelity in\ncases where it is known that a discrete central object will necessarily be in the sample before any\nother object, because it is inherently \"brighter\" (for whatever selection the sample uses). It is\nnot necessary to assume some distinct central component, so for models in which this does not make\nsense, it is safe to set the central component to zero.\n\nThe most subtle/important thing to note about these classes are the assumptions surrounding the\nsatellite/central decomposition. So here are the assumptions:\n\n1. The average satellite occupancy is taken to be the average over *all* haloes, with and without\n centrals. This has subtle implications for how to mock up the galaxy population, because if one\n requires a central before placing a satellite, then the avg. number of satellites placed into\n *available* haloes is increased if the central occupation is less than 1.\n\n2. We provide the option to enforce a \"central condition\", that is, the requirement that a central\n be found in a halo before any satellites are observed. To enforce this, set ``central=True`` in\n the constructor of any HOD. This has some ramifications:\n\n3. If the central condition is enforced, then for all HOD classes (except see point 5), the mean\n satellite occupancy is modified. If the defined occupancy is Ns', then the returned occupancy is\n Ns = Nc*Ns'. This merely ensures that Ns=0 when Nc=0. The user should note that this will change\n the interpretation of parameters in the Ns model, unless Nc is a simple step function.\n\n4. The pair-wise counts involve a term <Nc*Ns>. When the central condition is enforced, this reduces\n trivially to <Ns>. However, if the central condition is not enforced we *assume* that the\n variates Nc and Ns are uncorrelated, and use <Nc*Ns> = <Nc><Ns>.\n\n5. A HOD class that is defined with the central condition intrinsically satisfied, the class variable\n ``central_condition_inherent`` can be set to True in the class definition, which will avoid the\n extra modification. Do note that just because the class is specified such that the central\n condition can be satisfied (i.e. <Ns> is 0 when <Nc> is zero), and thus the\n ``central_condition_inherent`` is True, does not mean that it is entirely enforced.\n The pairwise counts still depend on whether the user assumes that the central condition is\n enforced or not, which must be set at instantiation.\n\n6. By default, the central condition is *not* enforced.\n\"\"\"\n\n\nimport numpy as np\nimport scipy.special as sp\nfrom hmf import Component\nfrom abc import ABCMeta, abstractmethod\n\n\nclass HOD(Component):\n \"\"\"\n Halo Occupation Distribution model base class.\n\n This class should not be called directly. The user\n should call a derived class.\n\n As with all :class:`hmf._framework.Model` classes,\n each class should specify its parameters in a _defaults dictionary at\n class-level.\n\n The exception to this is the M_min parameter, which is defined for every\n model (it may still be defined to modify the default). This parameter acts\n as the one that may be set via the mean density given all the other\n parameters. If the model has a sharp cutoff at low mass, corresponding to\n M_min, the extra parameter sharp_cut may be set to True, allowing for simpler\n setting of M_min via this route.\n\n See the derived classes in this module for examples of how to define derived\n classes of :class:`HOD`.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n _defaults = {\"M_min\": 11.0}\n sharp_cut = False\n central_condition_inherent = False\n\n def __init__(self, central=False, **model_parameters):\n\n self._central = central\n super(HOD, self).__init__(**model_parameters)\n\n @abstractmethod\n def nc(self, m):\n \"\"\"Defines the average number of centrals at mass m.\n\n Useful for populating catalogues.\"\"\"\n pass\n\n @abstractmethod\n def ns(self, m):\n \"\"\"Defines the average number of satellites at mass m.\n\n Useful for populating catalogues.\"\"\"\n pass\n\n @abstractmethod\n def _central_occupation(self, m):\n \"\"\"The occupation function of the tracer.\"\"\"\n pass\n\n @abstractmethod\n def _satellite_occupation(self, m):\n \"\"\"The occupation function of the tracer.\"\"\"\n pass\n\n @abstractmethod\n def ss_pairs(self, m):\n \"\"\"The average amount of the tracer coupled with itself in haloes of mass m, <T_s T_s>.\"\"\"\n pass\n\n @abstractmethod\n def cs_pairs(self, m):\n \"\"\"The average amount of the tracer coupled with itself in haloes of mass m, <T_s T_c>.\"\"\"\n pass\n\n @abstractmethod\n def sigma_satellite(self, m):\n \"\"\"The standard deviation of the total tracer amount in haloes of mass m.\"\"\"\n pass\n\n @abstractmethod\n def sigma_central(self, m):\n \"\"\"The standard deviation of the total tracer amount in haloes of mass m.\"\"\"\n pass\n\n def central_occupation(self, m):\n \"\"\"The occupation function of the central component.\"\"\"\n return self._central_occupation(m)\n\n def satellite_occupation(self, m):\n \"\"\"The occupation function of the satellite (or profile-dependent) component.\"\"\"\n if self._central and not self.central_condition_inherent:\n return self.nc(m) * self._satellite_occupation(m)\n else:\n return self._satellite_occupation(m)\n\n def total_occupation(self, m):\n \"\"\"The total (average) occupation of the halo.\"\"\"\n return self.central_occupation(m) + self.satellite_occupation(m)\n\n def total_pair_function(self, m):\n \"\"\"The total weight of the occupation paired with itself.\"\"\"\n return self.ss_pairs(m) + self.cs_pairs(m)\n\n def unit_conversion(self, cosmo, z):\n \"\"\"A factor to convert the total occupation to a desired unit.\"\"\"\n return 1.0\n\n @property\n def mmin(self):\n \"\"\"Defines a reasonable minimum mass to set for this HOD to converge when integrated.\"\"\"\n return self.params[\"M_min\"]\n\n\nclass HODNoCentral(HOD):\n \"\"\"Base class for all HODs which have no concept of a central/satellite split.\"\"\"\n\n def __init__(self, **model_parameters):\n super(HODNoCentral, self).__init__(**model_parameters)\n self._central = False\n\n def nc(self, m):\n return 0\n\n def cs_pairs(self, m):\n return 0\n\n def _central_occupation(self, m):\n return 0\n\n def sigma_central(self, m):\n return 0\n\n\nclass HODBulk(HODNoCentral):\n \"\"\"Base class for HODs with no discrete tracers, just an assignment of tracer to the halo.\"\"\"\n\n def ns(self, m):\n return 0\n\n def ss_pairs(self, m):\n return self.satellite_occupation(m) ** 2\n\n\nclass HODPoisson(HOD):\n \"\"\"\n Base class for discrete HOD's with poisson-distributed satellite population.\n\n Also assumes that the amount of the tracer is statistically independent of the number\n counts, but its average is directly proportional to it.\n\n This accounts for all Poisson-distributed number-count HOD's (which is all traditional HODs).\n \"\"\"\n\n def nc(self, m):\n return self.central_occupation(m) / self._tracer_per_central(m)\n\n def ns(self, m):\n return self.satellite_occupation(m) / self._tracer_per_satellite(m)\n\n def _tracer_per_central(self, m):\n return 1\n\n def _tracer_per_satellite(self, m):\n return self._tracer_per_central(m)\n\n def ss_pairs(self, m):\n return self.satellite_occupation(m) ** 2\n\n def cs_pairs(self, m):\n if self._central:\n return self.satellite_occupation(m) * self._tracer_per_central(m)\n else:\n return self.central_occupation(m) * self.satellite_occupation(m)\n\n def sigma_central(self, m):\n co = self.central_occupation(m)\n return np.sqrt(co * (1 - co))\n\n def sigma_satellite(self, m):\n return np.sqrt(self.satellite_occupation(m))\n\n\nclass Zehavi05(HODPoisson):\n \"\"\"\n Three-parameter model of Zehavi (2005)\n\n Parameters\n ----------\n M_min : float, default = 11.6222\n Minimum mass of halo that supports a central galaxy\n M_1 : float, default = 12.851\n Mass of a halo which on average contains 1 satellite\n alpha : float, default = 1.049\n Index of power law for satellite galaxies\n \"\"\"\n\n _defaults = {\"M_min\": 11.6222, \"M_1\": 12.851, \"alpha\": 1.049}\n sharp_cut = True\n\n def _central_occupation(self, m):\n \"\"\"\n Number of central galaxies at mass M\n \"\"\"\n n_c = np.zeros_like(m)\n n_c[m >= 10 ** self.params[\"M_min\"]] = 1\n\n return n_c\n\n def _satellite_occupation(self, m):\n \"\"\"\n Number of satellite galaxies at mass M\n \"\"\"\n return (m / 10 ** self.params[\"M_1\"]) ** self.params[\"alpha\"]\n\n\nclass Zheng05(HODPoisson):\n \"\"\"\n Five-parameter model of Zheng (2005)\n\n Parameters\n ----------\n M_min : float, default = 11.6222\n Minimum mass of halo that supports a central galaxy\n M_1 : float, default = 12.851\n Mass of a halo which on average contains 1 satellite\n alpha : float, default = 1.049\n Index of power law for satellite galaxies\n sig_logm : float, default = 0.26\n Width of smoothed cutoff\n M_0 : float, default = 11.5047\n Minimum mass of halo containing satellites\n \"\"\"\n\n _defaults = {\n \"M_min\": 11.6222,\n \"M_1\": 12.851,\n \"alpha\": 1.049,\n \"M_0\": 11.5047,\n \"sig_logm\": 0.26,\n }\n\n def _central_occupation(self, M):\n \"\"\"\n Number of central galaxies at mass M\n \"\"\"\n return 0.5 * (\n 1 + sp.erf((np.log10(M) - self.params[\"M_min\"]) / self.params[\"sig_logm\"])\n )\n\n def _satellite_occupation(self, M):\n \"\"\"\n Number of satellite galaxies at mass M\n \"\"\"\n ns = np.zeros_like(M)\n ns[M > 10 ** self.params[\"M_0\"]] = (\n (M[M > 10 ** self.params[\"M_0\"]] - 10 ** self.params[\"M_0\"])\n / 10 ** self.params[\"M_1\"]\n ) ** self.params[\"alpha\"]\n return ns\n\n @property\n def mmin(self):\n return self.params[\"M_min\"] - 5 * self.params[\"sig_logm\"]\n\n\nclass Contreras13(HODPoisson):\n \"\"\"\n Nine-parameter model of Contreras (2013)\n\n Parameters\n ----------\n M_min : float, default = 11.6222\n Minimum mass of halo that supports a central galaxy\n M_1 : float, default = 12.851\n Mass of a halo which on average contains 1 satellite\n alpha : float, default = 1.049\n Index of power law for satellite galaxies\n sig_logm : float, default = 0.26\n Width of smoothed cutoff\n M_0 : float, default = 11.5047\n Minimum mass of halo containing satellites\n fca : float, default = 0.5\n fca\n fcb : float, default = 0\n fcb\n fs : float, default = 1\n fs\n delta : float, default = 1\n delta\n x : float, default = 1\n x\n \"\"\"\n\n _defaults = {\n \"M_min\": 11.6222,\n \"M_1\": 12.851,\n \"alpha\": 1.049,\n \"M_0\": 11.5047,\n \"sig_logm\": 0.26,\n \"fca\": 0.5,\n \"fcb\": 0,\n \"fs\": 1,\n \"delta\": 1,\n \"x\": 1,\n }\n\n def _central_occupation(self, m):\n \"\"\"\n Number of central galaxies at mass M\n \"\"\"\n return self.params[\"fcb\"] * (1 - self.params[\"fca\"]) * np.exp(\n -np.log10(m / 10 ** self.params[\"M_min\"]) ** 2\n / (2 * (self.params[\"x\"] * self.params[\"sig_logm\"]) ** 2)\n ) + self.params[\"fca\"] * (\n 1\n + sp.erf(\n np.log10(m / 10 ** self.params[\"M_min\"])\n / self.params[\"x\"]\n / self.params[\"sig_logm\"]\n )\n )\n\n def _satellite_occupation(self, m):\n \"\"\"\n Number of satellite galaxies at mass M\n \"\"\"\n return (\n self.params[\"fs\"]\n * (\n 1\n + sp.erf(np.log10(m / 10 ** self.params[\"M_1\"]) / self.params[\"delta\"])\n )\n * (m / 10 ** self.params[\"M_1\"]) ** self.params[\"alpha\"]\n )\n\n\nclass Geach12(Contreras13):\n \"\"\"\n 8-parameter model of Geach et. al. (2012).\n\n This is identical to `Contreras13`, but with `x==1`.\n \"\"\"\n\n pass\n\n\nclass Tinker05(Zehavi05):\n \"\"\"3-parameter model of Tinker et. al. (2005).\"\"\"\n\n _defaults = {\"M_min\": 11.6222, \"M_1\": 12.851, \"M_cut\": 12.0}\n central_condition_inherent = True\n\n def _satellite_occupation(self, m):\n out = self.central_occupation(m)\n return (\n out\n * np.exp(-(10 ** self.params[\"M_cut\"]) / (m - 10 ** self.params[\"M_min\"]))\n * (m / 10 ** self.params[\"M_1\"])\n )\n\n\nclass Zehavi05WithMax(Zehavi05):\n \"\"\"Zehavi05 model in which a maximum halo mass for occupancy also exists.\"\"\"\n\n _defaults = {\n \"M_min\": 11.6222,\n \"M_1\": 12.851,\n \"alpha\": 1.049,\n \"M_max\": 18, # Truncation mass\n }\n\n def _central_occupation(self, m):\n \"\"\"\n Number of central galaxies at mass M\n \"\"\"\n n_c = np.zeros_like(m)\n n_c[\n np.logical_and(\n m >= 10 ** self.params[\"M_min\"], m <= 10 ** self.params[\"M_max\"]\n )\n ] = 1\n\n return n_c\n\n def _satellite_occupation(self, m):\n \"\"\"\n Number of satellite galaxies at mass M\n \"\"\"\n return (m / 10 ** self.params[\"M_1\"]) ** self.params[\"alpha\"]\n\n\nclass Zehavi05Marked(Zehavi05WithMax):\n \"\"\"\n The Zehavi05 model, with a possibility that the quantity is not number counts.\n\n NOTE: this should not give different results to Zehavi05 for any normalised statistic.\n \"\"\"\n\n _defaults = {\n \"M_min\": 11.6222,\n \"M_1\": 12.851,\n \"logA\": 0.0,\n \"alpha\": 1.049,\n \"M_max\": 18.0,\n }\n\n def sigma_central(self, m):\n co = super(Zehavi05Marked, self)._central_occupation(m)\n return np.sqrt(self._tracer_per_central(m) * co * (1 - co))\n\n def _tracer_per_central(self, m):\n return 10 ** self.params[\"logA\"]\n\n def _central_occupation(self, m):\n return super(Zehavi05Marked, self)._central_occupation(\n m\n ) * self._tracer_per_central(m)\n\n def _satellite_occupation(self, m):\n return super(Zehavi05Marked, self)._satellite_occupation(\n m\n ) * self._tracer_per_satellite(m)\n\n\nclass ContinuousPowerLaw(HODBulk):\n \"\"\"\n A continuous HOD which is tuned to match the Zehavi05 total occupation except for normalisation.\n \"\"\"\n\n _defaults = {\n \"M_min\": 11.6222,\n \"M_1\": 12.851,\n \"logA\": 0.0,\n \"alpha\": 1.049,\n \"M_max\": 18.0,\n \"sigma_A\": 0, # The (constant) standard deviation of the tracer\n }\n sharp_cut = True\n\n def _satellite_occupation(self, m):\n alpha = self.params[\"alpha\"]\n M_1 = 10 ** self.params[\"M_1\"]\n A = 10 ** self.params[\"logA\"]\n M_min = 10 ** self.params[\"M_min\"]\n M_max = 10 ** self.params[\"M_max\"]\n\n return np.where(\n np.logical_and(m >= M_min, m <= M_max), A * ((m / M_1) ** alpha + 1.0), 0,\n )\n\n def sigma_satellite(self, m):\n return np.ones_like(m) * self.params[\"sigma_A\"]\n\n\nclass Constant(HODBulk):\n \"\"\"A toy model HOD in which every halo has the same amount of the tracer on average.\"\"\"\n\n _defaults = {\"logA\": 0, \"M_min\": 11.0, \"sigma_A\": 0}\n\n def _satellite_occupation(self, m):\n return np.where(m > 10 ** self.params[\"M_min\"], 10 ** self.params[\"logA\"], 0)\n\n def sigma_satellite(self, m):\n return np.ones_like(m) * self.params[\"sigma_A\"]\n"
},
{
"alpha_fraction": 0.7387387156486511,
"alphanum_fraction": 0.7567567825317383,
"avg_line_length": 54.5,
"blob_id": "20276f4c31dad6bd659e205a5a11e0f9507112b6",
"content_id": "54f77f734aa83cbf23f172305702f64529685c93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 2,
"path": "/README.md",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "# CMASS-WISE-HOD\nHOD model for the cross-correlation of BOSS-CMASS and WISE galaxies at a redshift of z ~ 0.5.\n"
},
{
"alpha_fraction": 0.5350985527038574,
"alphanum_fraction": 0.5437660813331604,
"avg_line_length": 37.181819915771484,
"blob_id": "0867d76900f000a6009fcf3e8a83b1138ba6ae2b",
"content_id": "05ff36493a1975b23251053bdea9df1ecdf69d2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10499,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 275,
"path": "/src/eval_model.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "# ----------------------------------------------------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------------------------------------------------\n\nfrom cobaya.model import get_model\nfrom cobaya.run import run\nimport numpy as np\nfrom itertools import product \n\n# ----------------------------------------------------------------------------------------------------------------------\n# Model Optimization\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef optimize_model(model_variations, loglike_func, method, packages_path, output='', debug=False):\n \"\"\"\n Finds the values of the model CMASS-WISE HOD model parameters that minimize the log-likelihood using Cobaya's\n `run` function.\n\n Parameters\n ----------\n model_variations : ModelVariations\n Instance of the ModelVariations class that contains the HOD parameters to be sampled and the range over which\n they are sampled, and the HOD parameters to be held fixed.\n loglike_func : function\n Function that calculates the log-likelihood that the observed BOSS-CMASS and WISE data were produced by an\n HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n method : str\n Optimization method to be used by Cobaya. Options are 'scipy' and 'bobyqa'.\n packages_path : str\n String representation of the path to the Cobaya `packages` directory.\n output : str, optional\n String representation of the path to where the optimization results are saved.\n debug : bool, optional\n Determines whether Cobaya's debug mode should be used for more detailed console outputs.\n \"\"\"\n # Initialize model information dictionary\n info = {\n 'params': model_variations.sampling_params_dict,\n 'likelihood': {'my_cl_like': {'external': loglike_func}},\n 'theory': {},\n 'packages_path': packages_path,\n 'sampler': {'minimize': \n {'method': method,\n 'ignore_prior': False,\n 'override_scipy': {'method': 'Nelder-Mead'},\n 'max_evals': 1e6,\n 'confidence_for_unbounded': 0.9999995}},\n }\n\n if bool(output):\n info['output'] = output\n\n if debug:\n info['debug'] = True\n\n # Run optimizer\n return run(info)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Markov Chain Monte Carlo\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef mcmc(model_variations, loglike_func, packages_path, output='', debug=False):\n \"\"\"\n Runs Markov Chain Monte Carlo (MCMC) chains on the CMASS-WISE HOD model using Cobaya's `run` function.\n\n Parameters\n ----------\n model_variations : ModelVariations\n Instance of the ModelVariations class that contains the HOD parameters to be sampled and the range over which\n they are sampled, and the HOD parameters to be held fixed.\n loglike_func : function\n Function that calculates the log-likelihood that the observed BOSS-CMASS and WISE data were produced by an\n HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n packages_path : str\n String representation of the path to the Cobaya `packages` directory.\n output : str, optional\n String representation of the path to where the optimization results are saved.\n debug : bool, optional\n Determines whether Cobaya's debug mode should be used for more detailed console outputs.\n \"\"\"\n # Initialize model information dictionary\n info = {\n 'params': model_variations.sampling_params_dict,\n 'likelihood': {'my_cl_like': {'external': loglike_func}},\n 'theory': {},\n 'packages_path': packages_path,\n 'sampler': {'mcmc': \n {'learn_proposal': True,\n 'oversample': True,\n 'learn_proposal_Rminus1_max': 10,\n 'proposal_scale': 1.0,\n 'Rminus1_stop': 0.05,\n 'burn_in': '100d',\n 'max_tries': '100d'}},\n }\n\n if bool(output):\n info['output'] = output\n\n if debug:\n info['debug'] = True\n \n # Run chains\n return run(info)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Grid Search\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Get grid search parameter ranges\ndef get_gridsearch_range(params, hod, param):\n \"\"\"\n Determines the grid search parameter space for a given model parameter.\n\n Parameters\n ----------\n params : dict\n Dictionary of the CMASS-WISE HOD model parameters.\n hod : str\n The individual HOD model from which the parameter space will be determined.\n param : str\n The HOD model parameter whose parameter space will be determined.\n\n Returns\n -------\n params_vals : array-like\n Array representatino of the parameter's parameter space.\n \"\"\"\n if params[hod][param]['sample']:\n param_vals = np.linspace(\n params[hod][param]['sample_min'],\n params[hod][param]['sample_max'],\n params[hod][param]['sample_div']\n )\n else:\n param_vals = np.array([params[hod][param]['val']])\n\n return param_vals\n\n# Execute grid search\ndef gridsearch(params, loglike_func, output=''):\n \"\"\"\n Finds the values of the model CMASS-WISE HOD model parameters that minimize the log-likelihood using a grid search.\n\n Parameters\n ----------\n params : dict\n Dictionary of the CMASS-WISE HOD model parameters.\n loglike_func : func\n Function that calculates the log-likelihood that the observed BOSS-CMASS and WISE data were produced by an\n HOD model with the BOSS-CMASS HOD model and WISE HOD model parameters.\n output : str, optional\n String representation of the path to where the optimization results are saved.\n\n Returns\n -------\n None\n \"\"\"\n # Get parameter ranges\n cmass_M_min_vals = get_gridsearch_range(params, 'CMASS HOD', 'M_min')\n cmass_M_1_vals = get_gridsearch_range(params, 'CMASS HOD', 'M_1')\n cmass_alpha_vals = get_gridsearch_range(params, 'CMASS HOD', 'alpha')\n cmass_M_0_vals = get_gridsearch_range(params, 'CMASS HOD', 'M_0')\n cmass_sig_logm_vals = get_gridsearch_range(params, 'CMASS HOD', 'sig_logm')\n wise_M_min_vals = get_gridsearch_range(params, 'WISE HOD', 'M_min')\n wise_M_1_vals = get_gridsearch_range(params, 'WISE HOD', 'M_1')\n wise_alpha_vals = get_gridsearch_range(params, 'WISE HOD', 'alpha')\n wise_M_0_vals = get_gridsearch_range(params, 'WISE HOD', 'M_0')\n wise_sig_logm_vals = get_gridsearch_range(params, 'WISE HOD', 'sig_logm')\n R_ss = params['galaxy_corr']['R_ss']['val']\n R_cs = params['galaxy_corr']['R_cs']['val']\n R_sc = params['galaxy_corr']['R_sc']['val']\n\n # Print out parameter space values\n print('-'*80)\n print('Executing grid search over the following parameter space:')\n print('-'*80)\n print('CMASS')\n print(f'M_min = {cmass_M_min_vals}')\n print(f'M_1 = {cmass_M_1_vals}')\n print(f'alpha = {cmass_alpha_vals}')\n print(f'M_0 = {cmass_M_0_vals}')\n print(f'sig_logm = {cmass_sig_logm_vals}')\n print('\\n')\n print('WISE')\n print(f'M_min = {wise_M_min_vals}')\n print(f'M_1 = {wise_M_1_vals}')\n print(f'alpha = {wise_alpha_vals}')\n print(f'M_0 = {wise_M_0_vals}')\n print(f'sig_logm = {wise_sig_logm_vals}')\n print('-'*80)\n print('\\n')\n\n # Get all possible parameter combinations\n param_combos = product(\n cmass_M_min_vals,\n cmass_M_1_vals,\n cmass_alpha_vals,\n cmass_M_0_vals,\n cmass_sig_logm_vals,\n wise_M_min_vals,\n wise_M_1_vals,\n wise_alpha_vals,\n wise_M_0_vals,\n wise_sig_logm_vals\n )\n\n # Initialize variables to keep track of search results\n counter = 1\n best_loglike = {'total_loglike': -1e6}\n best_cmass = []\n best_wise = []\n\n # Execute grid search\n for combo in param_combos:\n loglike = loglike_func(\n cmass_M_min = combo[0],\n cmass_M_1 = combo[1],\n cmass_alpha = combo[2],\n cmass_M_0 = combo[3],\n cmass_sig_logm = combo[4],\n wise_M_min = combo[5],\n wise_M_1 = combo[6],\n wise_alpha = combo[7],\n wise_M_0 = combo[8],\n wise_sig_logm = combo[9],\n R_ss = R_ss,\n R_cs = R_cs,\n R_sc = R_sc\n )\n\n if loglike['total_loglike'] > best_loglike['total_loglike']:\n best_loglike = loglike\n best_cmass = combo[:5]\n best_wise = combo[5:]\n\n print(f'STEP {counter}')\n print(f'New best log-likelihood: {loglike}')\n print(f'CMASS parameters: {best_cmass}')\n print(f'WISE parameters: {best_wise}')\n print('\\n')\n\n counter += 1\n\n # Print results\n print('-'*80)\n print('GRID SEARCH COMPLETE')\n print(f'Best log-likelihood: {best_loglike}')\n print(f'CMASS parameters: {best_cmass}')\n print(f'WISE parameters: {best_wise}')\n print('-'*80)\n\n # Save results\n if bool(output):\n output_file = open(f'{output}.txt', 'w')\n output_file.write(f'Log-likelihood = {best_loglike}\\n')\n output_file.write(f'Chi^2 = {-2 * best_loglike}\\n')\n output_file.write('\\n')\n output_file.write('CMASS Parameters\\n')\n output_file.write(f'M_min = {best_cmass[0]}\\n')\n output_file.write(f'M_1 = {best_cmass[1]}\\n')\n output_file.write(f'alpha = {best_cmass[2]}\\n')\n output_file.write(f'M_0 = {best_cmass[3]}\\n')\n output_file.write(f'sig_logm = {best_cmass[4]}\\n')\n output_file.write('\\n')\n output_file.write('WISE Parameters\\n')\n output_file.write(f'M_min = {best_wise[0]}\\n')\n output_file.write(f'M_1 = {best_wise[1]}\\n')\n output_file.write(f'alpha = {best_wise[2]}\\n')\n output_file.write(f'M_0 = {best_wise[3]}\\n')\n output_file.write(f'sig_logm = {best_wise[4]}\\n')\n output_file.close()\n\n# ----------------------------------------------------------------------------------------------------------------------"
},
{
"alpha_fraction": 0.5360456705093384,
"alphanum_fraction": 0.5547136664390564,
"avg_line_length": 33.75954055786133,
"blob_id": "bd35416ccfdea954ee246574800a98179ab18441",
"content_id": "e9db394cdfa9e284e6122c83c32021644ee9562d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18213,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 524,
"path": "/halomod/cross_correlations.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "\"\"\"\nModules defining cross-correlated samples.\n\nHas classes for both pure HOD cross-correlations\n(i.e. number of cross-pairs) and for HaloModel-derived quantities\nbased on these cross-pairs.\n\nTo construct a :class:`CrossCorrelations` one need to specify the\nhalo models to be cross-correlated, and how they're correlated.\n\nExamples\n--------\n\nCross-correlating the same galaxy samples in different redshifts::\n\n >>> from halomod import HaloModel\n >>> from halomod.cross_correlations import CrossCorrelations, HODCross\n >>> cross = CrossCorrelations(cross_hod_model=ConstantCorr, halo_model_1_params=dict(z=1.0),\n >>> halo_model_2_params=dict(z=0.0))\n >>> pkcorr = cross.power_cross\n\"\"\"\n\nfrom .halo_model import TracerHaloModel\nfrom hmf import Component, Framework\nfrom hmf._internals._framework import get_model_\nfrom hmf._internals._cache import parameter, cached_quantity, subframework\nfrom abc import ABC, abstractmethod\nimport numpy as np\nfrom scipy import integrate as intg\nfrom . import tools\nfrom .halo_exclusion_for_xcorr import DblEllipsoid, NgMatched, Exclusion, NoExclusion\nfrom numpy import issubclass_\n\n\nclass HODCross(ABC, Component):\n \"\"\"Provides methods necessary to compute cross-correlation pairs for HOD models.\"\"\"\n\n _defaults = {}\n\n def __init__(self, hods, **model_params):\n super().__init__(**model_params)\n\n assert len(hods) == 2\n self.hods = hods\n\n @abstractmethod\n def R_ss(self, m):\n r\"\"\"The cross-correlation of numbers of pairs within a halo.\n\n Notes\n -----\n Defined by\n\n .. math:: \\langle T_1 T_2 \\rangle = \\langle T_1 \\rangle \\langle T_2 \\rangle + \\sigma_1 \\sigma_2 R_{ss},\n\n where :math:`T` is the total amount of tracer in the halo's profile (i.e. not counting the\n central component, if this exists).\n \"\"\"\n pass\n\n @abstractmethod\n def R_cs(self, m):\n r\"\"\"\n The cross-correlation of central-satellite pairs within a halo.\n\n Central from first hod, satellite from second.\n\n Notes\n -----\n Defined by\n\n .. math:: \\langle T^c_1 T^s_2 \\rangle = \\langle T^c_1 \\rangle \\langle T^s_2 \\rangle + \\sigma^c_1 \\sigma^s_2 R_{cs},\n\n where :math:`T^s` is the total amount of tracer in the halo's profile (i.e. not counting the\n central component,if this exists).\n \"\"\"\n pass\n\n @abstractmethod\n def R_sc(self, m):\n r\"\"\"\n The cross-correlation of satellite-central pairs within a halo.\n\n Central from second hod, Satellite from first.\n\n Notes\n -----\n Defined by\n\n .. math:: \\langle T^s_1 T^c_2 \\rangle = \\langle T^s_1 \\rangle \\langle T^c_2 \\rangle + \\sigma^s_1 \\sigma^c_2 R_{sc},\n\n where :math:`T^s` is the total amount of tracer in the halo's profile (i.e. not counting\n the central component,if this exists).\n \"\"\"\n pass\n\n @abstractmethod\n def self_pairs(self, m):\n r\"\"\"The expected number of cross-pairs at a separation of zero.\"\"\"\n pass\n\n def ss_cross_pairs(self, m):\n r\"\"\"The average value of cross-pairs in a halo of mass m.\n\n Notes\n -----\n .. math:: `\\langle T^s_1 T^s_2 \\rangle - Q`\"\"\"\n h1, h2 = self.hods\n\n return (\n h1.satellite_occupation(m) * h2.satellite_occupation(m)\n + h1.sigma_satellite(m) * h2.sigma_satellite(m) * self.R_ss(m)\n - self.self_pairs(m)\n )\n\n def cs_cross_pairs(self, m):\n r\"\"\"The average value of cross-pairs in a halo of mass m.\n\n Notes\n -----\n .. math:: \\langle T^c_1 T^s_2 \\rangle.\n\n \"\"\"\n h1, h2 = self.hods\n\n return h1.central_occupation(m) * h2.satellite_occupation(m) + h1.sigma_central(\n m\n ) * h2.sigma_satellite(m) * self.R_cs(m)\n\n def sc_cross_pairs(self, m):\n r\"\"\"The average value of cross-pairs in a halo of mass m,\n\n Notes\n -----\n .. math:: \\langle T^s_1 T^c_2 \\rangle\n \"\"\"\n h1, h2 = self.hods\n\n return h2.central_occupation(m) * h1.satellite_occupation(m) + h2.sigma_central(\n m\n ) * h1.sigma_satellite(m) * self.R_sc(m)\n\n\nclass ConstantCorr(HODCross):\n \"\"\"Correlation relation for constant cross-correlation pairs\"\"\"\n\n _defaults = {\"R_ss\": 0.0, \"R_cs\": 0.0, \"R_sc\": 0.0}\n\n def R_ss(self, m):\n return self.params[\"R_ss\"]\n\n def R_cs(self, m):\n return self.params[\"R_cs\"]\n\n def R_sc(self, m):\n return self.params[\"R_sc\"]\n\n def self_pairs(self, m):\n \"\"\"The expected number of cross-pairs at a separation of zero.\"\"\"\n return 0\n\n\nclass CrossCorrelations(Framework):\n r\"\"\"\n The Framework for cross-correlations.\n\n This class generates two :class:`~halomod.halo_model.TracerHaloModel`,\n and calculates their cross-correlation according to the cross-correlation\n model given.\n\n Parameters\n ----------\n cross_hod_model : class\n Model for the HOD of cross correlation.\n cross_hod_params : dict\n Parameters for HOD used in cross-correlation.\n halo_model_1_params,halo_model_2_params : dict\n Parameters for the tracers used in cross-correlation.\n\n \"\"\"\n\n def __init__(\n self,\n cross_hod_model,\n cross_hod_params={},\n halo_model_1_params={},\n halo_model_2_params={},\n exclusion_model=None,\n exclusion_params=None\n ):\n super().__init__()\n\n self.cross_hod_model = cross_hod_model\n self.cross_hod_params = cross_hod_params\n\n self._halo_model_1_params = halo_model_1_params\n self._halo_model_2_params = halo_model_2_params\n \n self.exclusion_model, self.exclusion_params = (\n exclusion_model,\n exclusion_params or {},\n )\n\n\n @parameter(\"model\")\n def cross_hod_model(self, val):\n if not (isinstance(val, str) or np.issubclass_(val, HODCross)):\n raise ValueError(\n \"cross_hod_model must be a subclass of cross_correlations.HODCross\"\n )\n elif isinstance(val, str):\n return get_model_(val, \"\")\n else:\n return val\n\n @parameter(\"param\")\n def cross_hod_params(self, val):\n return val\n \n \n @subframework\n def halo_model_1(self) -> TracerHaloModel:\n \"\"\"Halo Model of the first tracer\"\"\"\n return TracerHaloModel(**self._halo_model_1_params)\n\n @subframework\n def halo_model_2(self) -> TracerHaloModel:\n \"\"\"Halo Model of the second tracer\"\"\"\n return TracerHaloModel(**self._halo_model_2_params)\n \n @parameter(\"model\")\n def exclusion_model(self, val):\n \"\"\"A string identifier for the type of halo exclusion used (or None).\"\"\"\n if val is None:\n val = \"NoExclusion\"\n\n if issubclass_(val, Exclusion):\n return val\n else:\n return get_model_(val, \"halomod.halo_exclusion_for_xcorr\")\n \n @parameter(\"param\")\n def exclusion_params(self, val):\n \"\"\"Dictionary of parameters for the Exclusion model.\"\"\"\n return val\n\n # ===========================================================================\n # Cross-correlations\n # ===========================================================================\n @cached_quantity\n def cross_hod(self):\n \"\"\"HOD model of the cross-correlation\"\"\"\n return self.cross_hod_model(\n [self.halo_model_1.hod, self.halo_model_2.hod], **self.cross_hod_params\n )\n\n @cached_quantity\n def power_1h_cross_fnc(self):\n \"\"\"Total 1-halo cross-power.\"\"\"\n hm1, hm2 = self.halo_model_1, self.halo_model_2\n mask = np.logical_and(\n np.logical_and(\n np.logical_not(np.isnan(self.cross_hod.ss_cross_pairs(hm1.m))),\n np.logical_not(np.isnan(self.cross_hod.sc_cross_pairs(hm1.m))),\n ),\n np.logical_not(np.isnan(self.cross_hod.cs_cross_pairs(hm1.m))),\n )\n\n m = hm1.m[mask]\n u1 = hm1.tracer_profile_ukm[:, mask]\n u2 = hm2.tracer_profile_ukm[:, mask]\n\n integ = hm1.dndm[mask] * (\n u1 * u2 * self.cross_hod.ss_cross_pairs(m)\n + u1 * self.cross_hod.sc_cross_pairs(m)\n + u2 * self.cross_hod.cs_cross_pairs(m)\n )\n\n p = intg.simps(integ, m)\n\n p /= hm1.mean_tracer_den * hm2.mean_tracer_den\n return tools.ExtendedSpline(\n hm1.k, p, lower_func=\"power_law\", upper_func=\"power_law\"\n )\n\n @property\n def power_1h_cross(self):\n \"\"\"Total 1-halo cross-power.\"\"\"\n return self.power_1h_cross_fnc(self.halo_model_1.k_hm)\n\n @cached_quantity\n def corr_1h_cross_fnc(self):\n \"\"\"The 1-halo term of the cross correlation\"\"\"\n corr = tools.hankel_transform(\n self.power_1h_cross_fnc, self.halo_model_1._r_table, \"r\"\n )\n return tools.ExtendedSpline(\n self.halo_model_1._r_table,\n corr,\n lower_func=\"power_law\",\n upper_func=tools._zero,\n )\n\n @cached_quantity\n def corr_1h_cross(self):\n \"\"\"The 1-halo term of the cross correlation\"\"\"\n return self.corr_1h_cross_fnc(self.halo_model_1.r)\n\n @cached_quantity\n def _power_halo_centres_fnc(self):\n \"\"\"\n Power spectrum of halo centres, unbiased.\n Notes\n -----\n This defines the halo-centre power spectrum, which is a part of the 2-halo\n term calculation. Formally, we make the assumption that the halo-centre\n power spectrum is linearly biased, and this function returns\n .. math :: P^{hh}_c (k) /(b_1(m_1)b_2(m_2))\n \"\"\"\n if self.halo_model_1.hc_spectrum == \"filtered-lin\":\n f = TopHat(None, None)\n p = self.halo_model_1.power * f.k_space(self.halo_model_1.k * 2.0)\n first_zero = np.where(p <= 0)[0][0]\n p[first_zero:] = 0\n return tools.ExtendedSpline(\n self.halo_model_1.k,\n p,\n lower_func=self.halo_model_1.linear_power_fnc,\n upper_func=tools._zero,\n match_lower=False,\n )\n elif self.halo_model_1.hc_spectrum == \"filtered-nl\":\n f = TopHat(None, None)\n p = self.halo_model_1.nonlinear_power * f.k_space(self.halo_model_1.k * 3.0)\n first_zero = np.where(p <= 0)[0][0]\n p[first_zero:] = 0\n return tools.ExtendedSpline(\n self.halo_model_1.k,\n p,\n lower_func=self.halo_model_1.nonlinear_power_fnc,\n upper_func=tools._zero,\n match_lower=False,\n )\n elif self.halo_model_1.hc_spectrum == \"linear\":\n return self.halo_model_1.linear_power_fnc\n elif self.halo_model_1.hc_spectrum == \"nonlinear\":\n return self.halo_model_1.nonlinear_power_fnc\n else:\n raise ValueError(\"hc_spectrum was specified incorrectly!\")\n\n @cached_quantity\n def _power_2h_cross_primitive(self):\n \"\"\"The 2-halo term of the cross-power spectrum.\"\"\"\n import time\n t0 = time.time()\n hm1, hm2 = self.halo_model_1, self.halo_model_2\n\n u1 = hm1.tracer_profile_ukm[:, (hm1._tm & hm2._tm)]\n u2 = hm2.tracer_profile_ukm[:, (hm1._tm & hm2._tm)]\n \n if hm1.sd_bias_model is not None:\n bias1 = np.outer(hm1.sd_bias_correction, hm1.halo_bias)[:, (hm1._tm & hm2._tm)]\n bias2 = np.outer(hm2.sd_bias_correction, hm2.halo_bias)[:, (hm1._tm & hm2._tm)]\n else:\n bias1 = hm1.halo_bias[(hm1._tm & hm2._tm)]\n bias2 = hm2.halo_bias[(hm1._tm & hm2._tm)]\n \n inst = self.exclusion_model(\n m1=hm1.m[(hm1._tm & hm2._tm)],\n m2=hm2.m[(hm1._tm & hm2._tm)],\n density1 = hm1.total_occupation[(hm1._tm & hm2._tm)] * hm1.dndm[(hm1._tm & hm2._tm)],\n density2 = hm2.total_occupation[(hm1._tm & hm2._tm)] * hm2.dndm[(hm1._tm & hm2._tm)],\n Ifunc1=hm1.total_occupation[(hm1._tm & hm2._tm)]\n * hm1.dndm[(hm1._tm & hm2._tm)]\n * u1\n / hm1.mean_tracer_den,\n Ifunc2=hm2.total_occupation[(hm1._tm & hm2._tm)]\n * hm2.dndm[(hm1._tm & hm2._tm)]\n * u2\n / hm2.mean_tracer_den,\n bias1=bias1,\n bias2=bias2,\n r=hm1._r_table,\n delta_halo=hm1.halo_overdensity_mean,\n mean_density=hm1.mean_density0\n )\n \n if hasattr(inst, \"density_mod\"):\n self.__density_mod = inst.density_mod\n else:\n self.__density_mod = np.ones_like(hm1._r_table) * hm1.mean_tracer_den\n \n if hasattr(inst, \"density_mod1\"):\n self.__density_mod1 = inst.density_mod1\n else:\n self.__density_mod1 = np.ones_like(hm1._r_table) * hm1.mean_tracer_den\n\n if hasattr(inst, \"density_mod2\"):\n self.__density_mod2 = inst.density_mod2\n else:\n self.__density_mod2 = np.ones_like(hm2._r_table) * hm2.mean_tracer_den\n intg = inst.integrate()\n \n phh = self._power_halo_centres_fnc(hm1.k)\n\n if intg.ndim == 2:\n p = [\n tools.ExtendedSpline(\n hm1.k,\n x * phh,\n lower_func=hm1.linear_power_fnc,\n match_lower=True,\n upper_func=\"power_law\"\n if (\n self.halo_model_1.exclusion_model == NoExclusion\n and \"filtered\" not in self.halo_model_1.hc_spectrum\n )\n else tools._zero,\n )\n for i, x in enumerate(intg)\n ]\n else:\n p = tools.ExtendedSpline(\n hm1.k,\n intg * phh,\n lower_func=hm1.linear_power_fnc,\n match_lower=True,\n upper_func=\"power_law\"\n if (\n self.halo_model_1.exclusion_model == NoExclusion\n and \"filtered\" not in self.halo_model_1.hc_spectrum\n )\n else tools._zero,\n )\n print('done',time.time()-t0)\n return p\n \n @property\n def power_2h_cross(self):\n \"\"\"The 2-halo term of the tracer auto-power spectrum.\"\"\"\n # If there's nothing modifying the scale-dependence, just return the original power.\n if self.halo_model_1.exclusion_model is NoExclusion and self.halo_model_1.sd_bias_model is None:\n return self._power_2h_cross_primitive(self.k_hm)\n\n # Otherwise, first calculate the correlation function.\n out = tools.hankel_transform(\n self.corr_2h_cross_fnc, self.k_hm, \"k\", h=0.001\n )\n\n # Everything below about k=1e-2 is essentially just the linear power biased,\n # and the hankel transform stops working at some small k.\n if np.any(self.k_hm < 1e-2):\n warnings.warn(\n \"power_2h_auto_tracer for k < 1e-2 is not computed directly, but \"\n \"is rather just the linear power * effective bias.\"\n )\n out[self.k_hm < 1e-2] = (\n self.power[self.k_hm < 1e-2] * self.halo_model_1.bias_effective_tracer * self.halo_model_2.bias_effective_tracer\n )\n\n return out\n \n @cached_quantity\n def corr_2h_cross_fnc(self):\n \"\"\"A callable returning the 2-halo term of the tracer auto-correlation.\"\"\"\n # Need to set h smaller here because this might need to be transformed back\n # to power.\n hm1, hm2 = self.halo_model_1, self.halo_model_2\n\n corr = tools.hankel_transform(\n self._power_2h_cross_primitive, hm1._r_table, \"r\", h=1e-4\n )\n\n # modify by the new density. This step is *extremely* sensitive to the exact\n # value of __density_mod at large\n # scales, where the ratio *should* be exactly 1.\n if hm1._r_table[-1] > 2 * hm1.halo_profile.halo_mass_to_radius(hm1.m[-1]):\n try:\n self.__density_mod1 *= hm1.mean_tracer_den / self.__density_mod1[-1]\n except TypeError:\n pass\n if hm2._r_table[-1] > 2 * hm2.halo_profile.halo_mass_to_radius(hm2.m[-1]):\n try:\n self.__density_mod2 *= hm2.mean_tracer_den / self.__density_mod2[-1]\n except TypeError:\n pass\n if hm1._r_table[-1] > 2 * hm1.halo_profile.halo_mass_to_radius(hm1.m[-1]):\n try:\n self.__density_mod *= np.sqrt(hm2.mean_tracer_den * hm1.mean_tracer_den) / self.__density_mod[-1]\n except TypeError:\n pass\n\n #corr = (self.__density_mod1 / hm1.mean_tracer_den) * (self.__density_mod2 / hm2.mean_tracer_den) * (1 + corr) - 1\n density_sq = ((self.__density_mod**2.)**2./np.sqrt(self.__density_mod1**2. * self.__density_mod2**2.))\n density_sq[(self.__density_mod == 0) & ((self.__density_mod1 == 0) | (self.__density_mod2 == 0))] = 0\n corr = density_sq / (hm1.mean_tracer_den * hm2.mean_tracer_den) * (1 + corr) - 1\n\n return tools.ExtendedSpline(\n hm1._r_table, corr, lower_func=\"power_law\", upper_func=tools._zero\n )\n\n\n @cached_quantity\n def corr_2h_cross(self):\n \"\"\"The 2-halo term of the cross-correlation.\"\"\"\n return self.corr_2h_cross_fnc(self.halo_model_1.r)\n\n def power_cross_fnc(self, k):\n \"\"\"Total tracer cross power spectrum.\"\"\"\n return self.power_1h_cross_fnc(k) + self.power_2h_cross_fnc(k)\n\n @property\n def power_cross(self):\n \"\"\"Total tracer cross power spectrum.\"\"\"\n return self.power_cross_fnc(self.halo_model_1.k_hm)\n\n def corr_cross_fnc(self, r):\n \"\"\"The tracer cross correlation function.\"\"\"\n return self.corr_1h_cross_fnc(r) + self.corr_2h_cross_fnc(r) + 1\n\n @property\n def corr_cross(self):\n \"\"\"The tracer cross correlation function.\"\"\"\n return self.corr_cross_fnc(self.halo_model_1.r)"
},
{
"alpha_fraction": 0.489761084318161,
"alphanum_fraction": 0.5170648694038391,
"avg_line_length": 21.538461685180664,
"blob_id": "38b06c0e272c7eeb1d2a9e4f65836fcf1e258d83",
"content_id": "ec60c8062d4f9ea896908ae2295d7c2bf4c1cff3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 586,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 26,
"path": "/halomod/functional.py",
"repo_name": "jensen-lawrence/CMASS-WISE-HOD",
"src_encoding": "UTF-8",
"text": "from .halo_model import HaloModel\nfrom hmf import get_hmf\n\n\ndef get_halomodel(\n required_attrs,\n get_label=True,\n kls=HaloModel,\n fast_kwargs={\n \"transfer_fit\": \"BBKS\",\n \"lnk_min\": -4,\n \"lnk_max\": 2,\n \"dlnk\": 1,\n \"Mmin\": 13,\n \"dlog10m\": 0.5,\n \"rmin\": 10,\n \"rmax\": 20,\n \"rnum\": 4,\n \"halo_exclusion\": \"None\",\n \"nonlinear\": False,\n \"scale_dependent_bias\": False,\n \"hod_model\": \"Zehavi05\",\n },\n **kwargs\n):\n return get_hmf(required_attrs, get_label, kls, fast_kwargs, **kwargs)\n"
}
] | 11 |
gslin/flickrmirrorer
|
https://github.com/gslin/flickrmirrorer
|
7e726ac466d02f24be88b69b8dfc310d5bd2df82
|
37c74b6cf77f632b93077106cbe3d6df4647174f
|
cab5ee3f4256cafe59580aa307ef28e11643d436
|
refs/heads/master
| 2020-12-24T00:15:34.937376 | 2020-01-30T22:53:35 | 2020-01-30T22:53:35 | 237,320,675 | 0 | 0 | null | 2020-01-30T22:46:10 | 2020-01-28T18:36:48 | 2019-06-02T01:50:52 | null |
[
{
"alpha_fraction": 0.8857142925262451,
"alphanum_fraction": 0.8857142925262451,
"avg_line_length": 10.666666984558105,
"blob_id": "bbe3eea3d062948694fcc821bfb2a613c2699da1",
"content_id": "aa5108350b989b28170cadcaa62d419c3821f6f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "gslin/flickrmirrorer",
"src_encoding": "UTF-8",
"text": "requests\npython-dateutil\nflickrapi\n"
},
{
"alpha_fraction": 0.5006045699119568,
"alphanum_fraction": 0.6227327585220337,
"avg_line_length": 27.517240524291992,
"blob_id": "8dad1330aef3e2a15fcc6f23870b82d2f6fcc550",
"content_id": "f3f3a49215c0f559f266a5c90831837f154d5d14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 827,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 29,
"path": "/tests.py",
"repo_name": "gslin/flickrmirrorer",
"src_encoding": "UTF-8",
"text": "from .flickrmirrorer import get_photo_datetime\n\n\ndef test_unparseable_title_timestamp():\n timestamp = get_photo_datetime({\n 'datetakenunknown': '1',\n 'datetaken': '2014-10-01 13:45:37',\n 'title': 'flaskpost'\n })\n\n # Fall back on datetaken if we can't parse the date from the title\n assert timestamp.isoformat() == \"2014-10-01T13:45:37\"\n\n\ndef test_plain_title_timestamp():\n timestamp = get_photo_datetime({\n 'datetakenunknown': '1',\n 'datetaken': '2014-10-01 13:45:37',\n 'title': '20151130_135610'\n })\n assert timestamp.isoformat() == \"2015-11-30T13:56:10\"\n\n\ndef test_known_timestamp():\n timestamp = get_photo_datetime({\n 'datetakenunknown': '0',\n 'datetaken': '2015-11-02 12:35:07'\n })\n assert timestamp.isoformat() == \"2015-11-02T12:35:07\"\n"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.71875,
"avg_line_length": 15,
"blob_id": "6c1ce4a1dfd49cfb024ba8559bb31e2eaa52e1ac",
"content_id": "d30a8d447ae9254270d0bb0f4c024389cefee640",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 4,
"path": "/tox.ini",
"repo_name": "gslin/flickrmirrorer",
"src_encoding": "UTF-8",
"text": "[flake8]\nmax-line-length = 120\n[pytest]\npython_files = tests.py\n"
},
{
"alpha_fraction": 0.7033158540725708,
"alphanum_fraction": 0.7496606707572937,
"avg_line_length": 31.031055450439453,
"blob_id": "5c5a224183e5c45e12cf22ac5e9b99e98e373037",
"content_id": "877ee18a23ab605fd244c9aa520a68cdca18aaea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5157,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 161,
"path": "/README.md",
"repo_name": "gslin/flickrmirrorer",
"src_encoding": "UTF-8",
"text": "Overview\n========\nA small command-line python script that creates a local backup of your\nFlickr data. It mirrors images, video metadata, titles, description, tags,\nalbums and collections.\n\nAvailable at https://github.com/markdoliner/flickrmirrorer\n\n\nUsage\n=====\nThe script was developed on Linux. It should work on other Unixy operating\nsystems such as OS X, hopefully without changes. It could probably be made\nto work on Microsoft Windows with minor changes.\n\nTo set `flickrmirrorer` up the first time:\n\n```\ngit clone https://github.com/markdoliner/flickrmirrorer\ncd flickrmirrorer\npip install -r requirements.txt\n```\n\nThen run:\n\n```\npython flickrmirrorer.py /mnt/backup/flickr/\n```\n\n(Replace `/mnt/backup/flickr` with the path to your backup)\n\nThe first time you run this command, it will pop open your web browser and request permission from Flickr.\n\nSee `--help` for options.\n\n\nFeatures\n========\nThe script allows you to mirror only photos, only videos, or both. See\nthe `--ignore-videos` and `--ignore-photos` command line options.\n\nYour local backup can be cleaned automatically, so that files that were\ndeleted in Flickr are deleted locally. Deletion is disabled by default. See\nthe `--delete-unknown` command line option.\n\nThe script displays a summary of its actions if `--statistics` is passed on\nthe command line.\n\nRequirements\n============\n\n(These are covered by running `pip install -r requirements.txt` as mentioned above)\n\n* python 2.something or python 3.anything\n* python dateutil\n * Ubuntu: apt-get install python-dateutil\n* python flickrapi library 2.0 or newer.\n * Homepage: http://stuvel.eu/flickrapi\n * Ubuntu 16.04 LTS Xenial and newer: apt-get install python-flickrapi\n* python requests\n\nRunning via Cron\n================\nRunning this script regularly via cron is a good way to keep your backup\nup to date. For example, create the file /etc/cron.d/flickr_backup\ncontaining the following:\n\n```\n# Run Flickr photo mirroring script.\n# Sleep between 0 and 4 hours to distribute load on Flickr's API servers.\n0 3 * * 2 root sleep $((`bash -c 'echo $RANDOM'` \\% 14400)) && /usr/local/bin/flickrmirrorer.py -q /mnt/backup/flickr/\n```\n\nIf you run the cronjob as a user other than yourself you may\nneed to take additional steps to make sure the cron user is able to\nauthenticate. The steps are something like this:\n\n1. Run the script as yourself the first time around. It should pop open\n your web browser and request permission.\n2. After granting permission an authorization token is stored in\n `~/.flickr/9c5c431017e712bde232a2f142703bb2/auth.token`\n3. Copy this file to the home directory of the cron user:\n ```\n sudo mkdir -p /root/.flickr/9c5c431017e712bde232a2f142703bb2/\n sudo cp ~/.flickr/9c5c431017e712bde232a2f142703bb2/auth.token \\\n /root/.flickr/9c5c431017e712bde232a2f142703bb2/auth.token\n ```\n\n\nOutput\n======\nThe script creates this directory hierarchy:\n\n```\ndest_dir\ndest_dir/photostream/\ndest_dir/photostream/12345.jpg\ndest_dir/photostream/12345.jpg.metadata\ndest_dir/photostream/12346.jpg\ndest_dir/photostream/12346.jpg.metadata\ndest_dir/photostream/12347.jpg\ndest_dir/photostream/12347.jpg.metadata\ndest_dir/Not in any album/\ndest_dir/Not in any album/12345.jpg -> ../photostream/12345.jpg\ndest_dir/Albums/\ndest_dir/Albums/Waterfalls - 6789/\ndest_dir/Albums/Waterfalls - 6789/1_12346.jpg -> ../../photostream/12346.jpg\ndest_dir/Albums/Waterfalls - 6789/2_12347.jpg -> ../../photostream/12347.jpg\ndest_dir/Collections/\ndest_dir/Collections/Nature - 2634-98761234/Waterfalls - 6789 -> ../../Albums/Waterfalls - 6789\ndest_dir/Collections/Nature - 2634-98761234/Mountains - 6790 -> ../../Albums/Mountains - 6790\n```\n\nThe metadata files contain JSON data dumped from the Flickr API.\nIt's not the prettiest thing in the world... but it does contain\nall the necessary data in case you want to recover from it.\n\nThe album and collection directories contain symlinks to the files in\nthe photostream. The symlink names in albums are numbered so as to\npreserve the order.\n\nRoutine status is printed to stdout by default.\n\nErrors are printed to stderr.\n\nTo see more options run with the `--help` flag.\n\n\nA note about videos\n===================\nThe Flickr API does not support downloading original video files. If this script encounters videos in your photostream, it asks you download them (you must be logged in to your Flickr account).\n\n\nRunning unit tests\n==================\nRun [`py.test`](http://pytest.org/).\n\n\nTODO\n====\n* Handle download errors better:\n * Add retry logic.\n * Continue trying to download other photos.\n * Stop running only if there are many download errors.\n* Mirror comments\n* Store order of photos in photostream\n* Store order of albums in collections\n\n\nChanges\n=======\n2018-06-02\n- Support for nested collections and empty collections.\n\n2017-01-02\n- Don't warn about downloading videos if they've already been downloaded.\n- Unknown files are no longer deleted by default.\n- Added new command line option `--delete-unknown`\n- Added new command line option `--ignore-photos`\n- Added new command line option `--ignore-videos`\n- Print statistics even if script is killed by CTRL+C.\n"
}
] | 4 |
noesterle/DailyProgrammerChallenges
|
https://github.com/noesterle/DailyProgrammerChallenges
|
1ac180f42534d2b9cd9ea34a9f0a2315db86e7ef
|
841a9f19a8fb8d0bec7cc313c7b074dba62462bf
|
4e7196ee099ad1944efb5ddb7bbdef151fad144f
|
refs/heads/master
| 2021-01-17T12:37:47.435128 | 2016-06-25T04:58:37 | 2016-06-25T04:58:37 | 58,017,695 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6894409656524658,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 11.384614944458008,
"blob_id": "740c7f914c445b682a7e40ff3bc170efb4737ee7",
"content_id": "823767645d5326e35140b98756acbcd19565f16b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 13,
"path": "/TextTransposer/README.md",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "This program will transpose the text in \"test.txt\".\nSo, the following:\n\na b c d\ne f g h\n\nbecomes:\na e\nb f\nc g\nd h\n\nThis program is designed to be run by Java 8.\n"
},
{
"alpha_fraction": 0.5333951711654663,
"alphanum_fraction": 0.5510203838348389,
"avg_line_length": 20.540000915527344,
"blob_id": "91dad5c58253aac799903a1648bbfa1aa263ada1",
"content_id": "0c8ea7ee38c85423be1461625c00b7716132bdbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1078,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 50,
"path": "/NotMyPlace/NotMyPlace.cxx",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "/*\n * NotMyPlace.cxx\n * \n * Copyright 2016 Nathan Oesterle <nathan@nathan-C55B>\n * \n */\n \n#include <iostream>\n#include <stdlib.h>\n#include <string>\n#include <sstream>\n\nint main(int argc, char **argv)\n{\n\tint MS; //Place you finished in.\n\tstd::cout << \"Enter the place you finished in.\\n\";\n\tstd::cin >> MS;\n\tint MAX;// = 100; //Total number of places.\n\tstd::cout<< \"Enter how many competitors there were.\\n\";\n\tstd::cin >> MAX;\n\tstd::string VAL; //String of current place that isn't yours.\n\tfor(int i=1; i<= MAX; i++){\n\t\tif (i != MS){\n\t\t\t//Make current place a string.\n\t\t\tstd::stringstream ss;\n\t\t\tss <<\"\";\n\t\t\tss << i;\n\t\t\tVAL = ss.str(); //Make current place a string\n\t\t\tchar last;\n\t\t\t//Gets last character in place sting.\n\t\t\tlast = VAL.at(VAL.length() -1);\n\t\t\t\n\t\t\t//Check special cases.\n\t\t\tif (last == '1' and VAL != \"11\"){\n\t\t\t\tstd::cout << VAL<<\"st \";\n\t\t\t}\n\t\t\telse if (last == '2' and VAL != \"12\"){\n\t\t\t\tstd::cout << VAL<<\"nd \";\n\t\t\t}\n\t\t\telse if (last == '3' and VAL != \"13\"){\n\t\t\t\tstd::cout << VAL<<\"rd \";\n\t\t\t}\n\t\t\t//Not a special case.\n\t\t\telse{\n\t\t\t\tstd::cout << VAL<<\"th \";\n\t\t\t}\n\t\t};\n\t};\n\treturn 0;\n}\n\n"
},
{
"alpha_fraction": 0.7509578466415405,
"alphanum_fraction": 0.7835249304771423,
"avg_line_length": 64.125,
"blob_id": "89e7a373c1087ccaf83bbd1fe9d3f499f820b7a1",
"content_id": "08797838f46747882e6ac82dbe5994c1325d7771",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 522,
"license_type": "no_license",
"max_line_length": 361,
"num_lines": 8,
"path": "/NotMyPlace/README.md",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "\nYour dog just won X place in a dog show, congratulations! You post your star's photo and placement announcement to /r/aww and, predictably, a funny redditor asks what places the rest of the participating dogs took. Your job is to create a program that lists all places within the range of 0-100 in spoken English, excluding the placing (X) of your winning pup. \n\nBonus Challenges:\nBonus 1) Allow scaling greater than 100 placings\n\nBonus 2) Exclude 0th place\n\nBonus 3) Accurately represent the unique cases 11, 12, and 13\n"
},
{
"alpha_fraction": 0.4545060992240906,
"alphanum_fraction": 0.4672725200653076,
"avg_line_length": 29.205883026123047,
"blob_id": "0debb8746e087a3dc773822aa9f483a035ea710a",
"content_id": "99f7333857d36d60fbbaf79ddb5a17265224c9ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9243,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 306,
"path": "/BlackJack/play.py",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "__author__ = 'nathan'\n\nimport random\n\nclass Player:\n\n def __init__(self, num):\n self.id = num\n self.hand = []\n self.in_game = True\n self.busted = False\n\n def __str__(self):\n return self.id\n\n\ndef print_deck(deck):\n for item in deck:\n string=\"\"\n suit = \"\"\n if item % 4 == 0:\n suit = \"Hearts\"\n elif item % 4 == 1:\n suit = \"Clubs\"\n elif item % 4 == 2:\n suit = \"Diamonds\"\n elif item % 4 == 3:\n suit = \"Spades\"\n face = (item%13)+2\n if face == 14:\n print(item, \":\", \"Ace of \" + suit)\n elif face == 11:\n print(item, \":\", \"Jack of \" + suit)\n elif face == 12:\n print(item, \":\", \"Queen of \" + suit)\n elif face == 13:\n print(item, \":\", \"King of \" + suit)\n else:\n print(item, \":\", face, \"of \" + suit)\n\n\ndef show_hand(person, game_over):\n string=\"\"\n n = 0\n for item in person.hand:\n suit = \"\"\n if item % 4 == 0:\n suit = \"Hearts\"\n elif item % 4 == 1:\n suit = \"Clubs\"\n elif item % 4 == 2:\n suit = \"Diamonds\"\n elif item % 4 == 3:\n suit = \"Spades\"\n \n face=((item%13)+2)\n if face == 14:\n face = \"Ace\"\n elif face == 13:\n face = \"King\"\n elif face == 12:\n face = \"Queen\"\n elif face == 11:\n face = \"Jack\"\n\n if person.id != USER and not game_over:\n if n == 0:\n string += \"?, \"\n n = 1\n else:\n string+= str(face)+\" of \"+ suit +\", \"\n else:\n string+= str(face)+\" of \"+ suit +\", \"\n return string[:-2]\n\n\ndef sum_hand(person,game_over):\n if person.id == USER or game_over:\n hand_value = 0\n for card in person.hand:\n points = ((card % 13) + 2)\n if points > 13:\n hand_value += 11\n elif points > 10:\n hand_value += 10\n else:\n hand_value += points\n #for item in ACE:\n # if item in person.hand and hand_value > 21:\n # #You have an ace, and would bust if it counts as 11 points.\n num_aces = any_aces(person)\n while num_aces > 0 and hand_value > 21:\n hand_value -= 10\n num_aces -= 1\n return hand_value\n return \"?\"\n\n\ndef hit(deck, person):\n new_card = deck.pop()\n person.hand.append(new_card)\n if (sum_hand(person,True) > 21):\n #print(\"BUST\")\n person.busted = True\n person.in_game = False\n #return hand\n\n\ndef stand(person):\n person.in_game = False\n #return in_game\n\n\ndef double_down():\n pass\n\n\ndef split():\n pass\n\n\ndef surrender():\n pass\n\n\ndef deal(deck,players):\n for person in all_players:\n hit(deck,person)\n hit(deck, person)\n\n\ndef view(players,game_over):\n for person in players:\n if person.id != DEALER:\n print(\"Player \"+ str(person.id) + \"\\tHand value\", sum_hand(person,game_over), \"\\tHand:\", show_hand(person,game_over))\n else:\n print(\"Dealer \\tHand value\", sum_hand(person,game_over), \"\\tHand:\", show_hand(person,game_over))\n\ndef any_aces(player):\n ACE = [12,25,38,51]\n return len(set(ACE).intersection(player.hand))\n\n\ndef play(deck,players):\n player_num = 0\n\n #Give every player a turn.\n while player_num < len(players):\n #print(\"Take a turn, player\",player_num)\n #Make sure the player didn't bust.\n #if sum_hand(players[player_num],True) < 22:\n if((players[player_num].in_game)):\n #print(\"Player is in game.\")\n #User's turn.\n if players[player_num].id == USER:\n #print(\"Player is user\")\n view(players,False)\n action = \"\"\n \n #User decides what to do.\n while action.lower() != \"hit\" and \\\n action.lower() != \"stand\" and \\\n action.lower() != \"double down\" and \\\n action.lower() != \"split\" and \\\n action.lower() != \"surrender\":\n action = str(input(\"What would you like to do? You can 'hit' or 'stand'. \"))\n if action.lower() == 'hit':\n hit(deck,players[player_num])\n elif action.lower() == 'stand':\n stand(players[player_num])\n player_num += 1\n elif action.lower() == 'double down':\n double_down()\n player_num += 1\n elif action.lower() == 'split':\n split()\n player_num += 1\n elif action.lower() == 'surrender':\n surrender()\n player_num += 1\n #Dealer follows soft-hit rules\n #print(\"Dealer is player number\",DEALER)\n #Dealer's turn.\n elif players[player_num].id==DEALER:\n #print(\"Player is dealer\")\n score=(sum_hand(players[player_num],True))\n #print(\"Dealer is playing.\")\n if score < 17:\n hit(deck,players[player_num])\n #print(\"Dealer hit with score less than 17\")\n elif score == 17:\n num_aces = any_aces(players[player_num])\n if num_aces > 0:\n hit(deck,players[player_num])\n #print(\"Dealer hit with a soft 17\")\n else:\n stand(players[player_num])\n else:\n stand(players[player_num])\n player_num += 1\n #print(\"Dealer stands\")\n #Other player's turn.\n else:\n #print(\"PLayer is bot\")\n #Automated player plays\n if random.randint(0,22) > sum_hand(players[player_num],True):\n hit(deck,players[player_num])\n else:\n stand(players[player_num])\n player_num += 1\n #Over 21 points.\n else:\n print(\"Player \"+str(player_num)+\" BUSTED\")\n stand(players[player_num])\n player_num += 1\n #print(player_num)\n\n\ndef players_playing(players):\n \"\"\"\n Determines if any players are currently playing.\n \"\"\"\n x = False\n for person in players:\n if person.in_game == True:\n x = True\n return x\n\n\ndef winning(players):\n \"\"\"\n Prints out the result of each player vs Dealer.\n \"\"\"\n dealer_score = sum_hand(players[DEALER],True)\n #print(\"Is Dealer still in?\",players[DEALER].busted)\n #print(players[DEALER].hand)\n for person in players:\n if person.id != DEALER:\n #print(\"Is player still in?\",person.busted)\n player_score = sum_hand(person,True)\n if (players[DEALER].busted and not person.busted):\n print(\"PLayer\",str(person.id),\"beat the dealer, who busted!\")\n elif dealer_score < player_score <= 21:\n if person.id == USER:\n print(\"You beat the dealer with a score of \"+ str(player_score)+\"!\")\n else:\n print(\"Player \" + str(person.id)+ \" beat the dealer with a score of \" + str(player_score) + \"!\")\n else:\n if person.id == USER:\n busted = \"\"\n if player_score > 21:\n busted = \"You busted! \"\n print(busted + \"Better luck next time!\")\n else:\n print(\"The dealer beat Player \" + str(person.id))\n\nif __name__ == '__main__':\n play_again = 0\n while play_again == 0:\n #Set up deck.\n deck = []\n for i in range(0, 52):\n deck.append(i)\n #print_deck(deck)\n #print(\"This is the deck.\")\n random.shuffle(deck)\n \n #Game info from user.\n num_decks = 0\n num_players = -1\n while num_decks < 1:\n num_decks = int(input(\"How many decks do you want to play with? \"))\n deck *= num_decks\n while num_players < 0:\n num_players = int(input(\"How many other players are there? \")) + 1\n \n DEALER = num_players\n USER = 0\n\n #Setting up players and hands.\n x = 0\n all_players = []\n print(\"CREATING PLAYERS\")\n while x < num_players + 1:\n person = Player(x)\n all_players.append(person)\n x += 1\n print(\"DEALING\")\n deal(deck, all_players)\n x = players_playing(all_players)\n\n #Play Game\n play(deck,all_players)\n\n print(\"GAME OVER\")\n #View results (Score, Hand, won/lost to delaer.)\n view(all_players,True)\n winning(all_players)\n #Play again?\n again =\"\"\n while (again.lower()!=\"y\" and again.lower()!=\"n\"):\n again = input(\"Would you like to play again? (y/n)\")\n if again.lower() == \"y\":\n play_again = 0\n else:\n play_again = 1\n"
},
{
"alpha_fraction": 0.6942675113677979,
"alphanum_fraction": 0.7197452187538147,
"avg_line_length": 51.33333206176758,
"blob_id": "17f9d77e449590b5993072671b602487f7c32cb1",
"content_id": "6ba8ddf7ec12bb7f19aed4bcee270b779ecc380d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 3,
"path": "/DateDilemma/README.md",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "#DateDilema\n\nThis takes user input of the form MM DD YYYY or YYYY MM DD seperated by either a \"-\",\"/\", or a space and converts them to YYYY-MM-DD (ISO 8106)\n"
},
{
"alpha_fraction": 0.7541528344154358,
"alphanum_fraction": 0.7541528344154358,
"avg_line_length": 26.363636016845703,
"blob_id": "66b6d66bd279bf84283a2d3001c3cb27b938d9ce",
"content_id": "a751a92f641b7aface96a3d3315e7984981c8840",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 301,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 11,
"path": "/TextTransposer/src/textTransporter/Main.java",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "package textTransporter;\n\nimport java.util.ArrayList;\n\npublic class Main {\n\tpublic static void main(String[] args){\n\t\tTransposer t = new Transposer();\n\t\tArrayList<String> arr = t.read(\"/home/nathan/code/git/DailyProgrammerChallenges/TextTransposer/src/textTransporter/test.txt\");\n\t\tt.print(arr);\n\t}\n}\n"
},
{
"alpha_fraction": 0.7894737124443054,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 67.4000015258789,
"blob_id": "212191b177795b65bf27603be01a70f1d8f0ef12",
"content_id": "adc36616fe7253ac5241d38ff23ac0443d687123",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 5,
"path": "/Typoglycemia/README.md",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "#Typoglycemia\n\nThis program takes advantage of the fact that the human brain can read almost any word as long as the first and last letters of the word are in the correct spot.\n\nWhen run, it asks you to enter a string. It'll then scramble the letters of each word, except for the first and last letters. Location of punctuation is preserved.\n"
},
{
"alpha_fraction": 0.5180505514144897,
"alphanum_fraction": 0.5361011028289795,
"avg_line_length": 24.18181800842285,
"blob_id": "d2dd663d56c19e435e71182019b6598560e49e28",
"content_id": "5f7fcb0e8948ac3f04fc14d5115c9ada902b4b23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 22,
"path": "/DateDilemma/DateDilemma.py",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "__author__ = 'nathan'\n\nimport datetime\n\n\ndef fix_date(date):\n date = date.replace(\"/\",\" \")\n date = date.replace(\"-\",\" \")\n fixing = date.split(\" \")\n\n #Year first (#### ## ##)\n if len(fixing[0]) == 4:\n fixed = datetime.date(int(fixing[0]),int(fixing[1]),int(fixing[2]))\n #Month First (## ## ####)\n elif len(fixing[2]) == 4:\n fixed = datetime.date(int(fixing[2]),int(fixing[0]),int(fixing[1]))\n print(fixed)\n\n\nif __name__ == '__main__':\n date= str(input(\"Enter a date to change to YYYY-MM-DD: \"))\n fix_date(date)\n"
},
{
"alpha_fraction": 0.44736841320991516,
"alphanum_fraction": 0.45881006121635437,
"avg_line_length": 24.735294342041016,
"blob_id": "4f7b391ccfd5b53afe8c2a0e0f66f96254ff7e2c",
"content_id": "35bba7df83c991a0427e6b5c2b92426ed733ad0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 874,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 34,
"path": "/Typoglycemia/typoglycemia.py",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "__author__ = 'nathan'\n\nimport random\n\ndef typoglycemia(strng):\n ary = strng.split()\n final = \"\"\n for i in range(len(ary)):\n if not ary[i][-1].isalpha():\n mid = ary[i][1:-2]\n x = True\n else:\n mid = ary[i][1:-1]\n x = False\n result=[]\n for item in mid:\n result.append(item)\n random.shuffle(result)\n resultString=\"\"\n for item in result:\n resultString += item\n if x:\n final += ary[i][0]+resultString+ary[i][-2:] + \" \"\n else:\n if len(ary[i])==1:\n final += ary[i] + \" \"\n else:\n final += ary[i][0]+resultString+ary[i][-1:] + \" \"\n return final\n\nif __name__ == '__main__':\n s = input(\"Enter a string to scramble and read: \")\n scrambled = typoglycemia(s)\n print(scrambled)"
},
{
"alpha_fraction": 0.7870370149612427,
"alphanum_fraction": 0.7870370149612427,
"avg_line_length": 322,
"blob_id": "575062e072e273df19118fd157d374fbbd4b9660",
"content_id": "0fe6316e8a519f979ad920aa4a0cee4cd593209c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 322,
"num_lines": 1,
"path": "/Typoglycemia/example.md",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "s pragorm taeks anadtgave of the fcat that the hmaun brian can raed aomslt any word as lnog as the first and lsat leterts of the word are in the cceorrt sopt. When run, it akss you to etner a sntrig. It'll tehn scrlabme the lttrees of ecah wrod, ecexpt for the fsirt and lsat lreetts. Licooatn of pituoacuntn is pevrersed. \n"
},
{
"alpha_fraction": 0.7670454382896423,
"alphanum_fraction": 0.7689393758773804,
"avg_line_length": 57.66666793823242,
"blob_id": "59360f6d60b77e33426a5da48d71b3a113b5c709",
"content_id": "9ff8c4e82608b115e2730fc837603e15933a492c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 9,
"path": "/BlackJack/README.md",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "#Black Jack\n\nThis is a text based game of blackjack. \nYou can enter how many decks of cards are played with, how many players are playing. You are player0. There will always be a dealer. \nAt the moment, you can hit or stand.\nThe output displays the cards in your hand and the score of your hand. You can see all but one card in the other players hands. \nIf you bust you are done, everyone else continues to play.\nAt the end, you are shown the hands off all players and their scores.\nYou are then given the option to play again.\n"
},
{
"alpha_fraction": 0.7962085604667664,
"alphanum_fraction": 0.8056871891021729,
"avg_line_length": 41.20000076293945,
"blob_id": "4a5eeb84662dffb1f23d8f36017f22ded2fa68c5",
"content_id": "76cafca15ed9c83604eafc13b047774f148ab364",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 5,
"path": "/README.md",
"repo_name": "noesterle/DailyProgrammerChallenges",
"src_encoding": "UTF-8",
"text": "A collection of programs based on challenges found at reddit.com/r/dailyprogrammer\n\nAll python scripts are written and should be run using python3.\n\nAll Java files are meant to be compiled and run using Java 8.\n"
}
] | 12 |
iamjackhu/web_scraping
|
https://github.com/iamjackhu/web_scraping
|
397b8c10e49f9691c1a7dd7eee57aca7f63fe369
|
2469ab74df18430ba3c44aadab2153d7532403c2
|
9a9ac4f8c0cfb7c7ca46b333c9a3bf613d3e3e3b
|
refs/heads/master
| 2020-02-29T21:30:53.691946 | 2016-08-11T00:32:05 | 2016-08-11T00:32:05 | 65,347,405 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6650943160057068,
"alphanum_fraction": 0.6709905862808228,
"avg_line_length": 28.275861740112305,
"blob_id": "f5f9e8812a6af2341e47ad8e98eaaa0d9330c92f",
"content_id": "6f04cb1679fa1c32fb6a55a9285c69c080d48952",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 29,
"path": "/code/iteratorHref.py",
"repo_name": "iamjackhu/web_scraping",
"src_encoding": "UTF-8",
"text": "from urllib2 import urlopen\nfrom bs4 import BeautifulSoup\nimport re\t\nimport datetime\nimport random\n\nhtml = urlopen(\"http://en.wikipedia.org/wiki/Kevin_Bacon\")\nbsObj = BeautifulSoup(html, \"lxml\")\n\n# for link in bsObj.findAll(\"a\"):\n# \tif 'href' in link.attrs:\n# \t\tprint link\n\n# for link in bsObj.find(\"div\", {\"id\":\"bodyContent\"}).findAll(\"a\", href=re.compile(\"^(/wiki/)((?!:).)*$\")):\n# if 'href' in link.attrs:\n# \t\tprint link\n\nrandom.seed(datetime.datetime.now())\n\ndef getLinks(url):\n\thtml = urlopen(\"http://en.wikipedia.org\"+url)\n\tbsObj = BeautifulSoup(html, \"lxml\")\n\treturn bsObj.find(\"div\", {\"id\":\"bodyContent\"}).findAll(\"a\", href=re.compile(\"^(/wiki/)((?!:).)*$\"))\n\nlinks = getLinks(\"/wiki/Kevin_Bacon\")\nwhile len(links) > 0 :\n\tnewArticle = links[random.randint(0, len(links)-1)].attrs[\"href\"]\n\tprint(newArticle)\n\tlinks = getLinks(newArticle)"
},
{
"alpha_fraction": 0.6408601999282837,
"alphanum_fraction": 0.6494623422622681,
"avg_line_length": 16.259260177612305,
"blob_id": "3f8d777b3bb45a3559047d6024a1a85369a363dc",
"content_id": "62bf2cce5c903a00622bca754f90ce4ca3593b86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 27,
"path": "/code/scrapetest.py",
"repo_name": "iamjackhu/web_scraping",
"src_encoding": "UTF-8",
"text": "from urllib2 import urlopen\nfrom urllib2 import HTTPError\nfrom bs4 import BeautifulSoup\n\ndef getTitle(url):\n\n try:\n html = urlopen(url)\n except HTTPError as e:\n return None\n\n try:\n bsObj = BeautifulSoup(html.read(),\"lxml\")\n title = bsObj.body.h1\n except AttributeError as e:\n return None\n\n return title\n\n\n\ntitle = getTitle(\"http://www.ifeng.com\")\n\nif title == None:\n\tprint(\"Title was not found\")\nelse:\n\tprint(title)"
},
{
"alpha_fraction": 0.71659916639328,
"alphanum_fraction": 0.71659916639328,
"avg_line_length": 26.55555534362793,
"blob_id": "a5e6e21976492e922b120ff22e45b8127137bf4d",
"content_id": "b422d29c960fe3d2be96ed2dbb0231a4bd985c99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 247,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 9,
"path": "/code/mysql.py",
"repo_name": "iamjackhu/web_scraping",
"src_encoding": "UTF-8",
"text": "import pymysql\n\nconn = pymysql.connect(host='localhost', unix_socket='/var/lib/mysql/mysql.sock', user='jack', passwd=None, db='scraping')\n\ncur = conn.cursor()\ncur.execute(\"select * from pages\")\nprint(cur.fetchone())\ncur.close()\nconn.close()"
},
{
"alpha_fraction": 0.6539325714111328,
"alphanum_fraction": 0.6606741547584534,
"avg_line_length": 21.25,
"blob_id": "e909b213ec143762c1eebd625fae0a2c882bb10a",
"content_id": "27d44aae76b234ab1164ea73c446287dccbbdcd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 20,
"path": "/code/sendMail.py",
"repo_name": "iamjackhu/web_scraping",
"src_encoding": "UTF-8",
"text": "import smtplib\nfrom email.mime.text import MIMEText\n\n# it require python3\n# msg = MIMEText(\"The body of the email is here\")\n\n# msg['Subject'] = \"An email alert\"\n# msg['From'] = \"[email protected]\"\n# msg['To'] = \"[email protected]\"\n\n# s = smtplib.SMTP(\"localhost\")\n# s.send_message(msg) \n# s.quit()\n\n# it's suitable for python2\nmsg = \"smtp_mail\" \nserver = smtplib.SMTP('localhost') \nserver.set_debuglevel(1)\nserver.sendmail(\"[email protected]\", \"[email protected]\", msg) \nserver.quit() "
},
{
"alpha_fraction": 0.668485701084137,
"alphanum_fraction": 0.6739426851272583,
"avg_line_length": 26.074073791503906,
"blob_id": "a3990650bc61b079cf1cbf80c496de3d7451b27f",
"content_id": "55e60a91a970965212d27f5695f7074cb22b04a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 733,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 27,
"path": "/code/beautifulSoup.py",
"repo_name": "iamjackhu/web_scraping",
"src_encoding": "UTF-8",
"text": "from urllib2 import urlopen\nfrom bs4 import BeautifulSoup\nimport re\n\nhtml = urlopen(\"http://www.pythonscraping.com/pages/warandpeace.html\")\nbsObj = BeautifulSoup(html, \"lxml\")\n\nnameList = bsObj.findAll(\"span\", {\"class\":\"green\"})\n# <span class=\"green\">The prince</span> \nfor name in nameList:\n\tprint(name) # <span class=\"green\">The prince</span>\n\tprint(name.get_text()) # The prince\n\nprint(\"############\")\n\n# re.compile\nhtml = urlopen(\"http://www.pythonscraping.com/pages/page3.html\")\nbsObj = BeautifulSoup(html, \"lxml\")\n\nfor image in bsObj.findAll(\"img\", {\"src\":re.compile(\"\\.\\.\\/img\\/gifts/img.*\\.jpg\")}):\n\tprint image\n\nprint(\"############\")\n\n# lambda sample\nfor tag in bsObj.findAll(lambda tag: len(tag.attrs) == 2):\n\tprint tag\n\n\n"
},
{
"alpha_fraction": 0.6849056482315063,
"alphanum_fraction": 0.6933962106704712,
"avg_line_length": 30.205883026123047,
"blob_id": "63d7447a2e161648380340716e51faf08c594691",
"content_id": "e8a77d0228287941e3ece169e65f6dfc11c43cfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1060,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 34,
"path": "/code/crawl2mysql.py",
"repo_name": "iamjackhu/web_scraping",
"src_encoding": "UTF-8",
"text": "from urllib2 import urlopen\nfrom bs4 import BeautifulSoup\nimport pymysql\nimport re\t\nimport datetime\nimport random\n\nconn = pymysql.connect(host='localhost', unix_socket='/var/lib/mysql/mysql.sock', user='jack', passwd=None, db='scraping', charset='utf8')\ncur = conn.cursor()\ncur.execute(\"use scraping\")\n\nrandom.seed(datetime.datetime.now())\n\ndef store2mysql(title, content):\n\tcur.execute(\"insert into pages (title, content) values (\\\"%s\\\", \\\"%s\\\")\",(title, content))\n\tcur.connection.commit()\n\ndef getLinks(url):\n\thtml = urlopen(\"http://en.wikipedia.org\"+url)\n\tbsObj = BeautifulSoup(html, \"lxml\")\n\ttitle = bsObj.find(\"h1\").get_text()\n\tcontent = bsObj.find(\"div\", {\"id\":\"mw-content-text\"}).find(\"p\").get_text()\n\tstore2mysql(title, content)\n\treturn bsObj.find(\"div\", {\"id\":\"bodyContent\"}).findAll(\"a\", href=re.compile(\"^(/wiki/)((?!:).)*$\"))\n\nlinks = getLinks(\"/wiki/Kevin_Bacon\")\ntry:\n\twhile len(links) > 0 :\n\t\tnewArticle = links[random.randint(0, len(links)-1)].attrs[\"href\"]\n\t\tprint(newArticle)\n\t\tlinks = getLinks(newArticle)\nfinally:\n\tcur.close()\n\tconn.close()"
},
{
"alpha_fraction": 0.7824267745018005,
"alphanum_fraction": 0.7824267745018005,
"avg_line_length": 33.28571319580078,
"blob_id": "5a1e5933d30701c40e802a1b6daa79e9924a40a5",
"content_id": "564dbb9032f0dcb6ffc323b527e17369a25eb183",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 7,
"path": "/README.md",
"repo_name": "iamjackhu/web_scraping",
"src_encoding": "UTF-8",
"text": "# web_scraping\nIt's a learning project of web scraping.\n\n### useful tools\n scrapy startproject wikiSpider\n\nIt will create a project of spider. Please refer the directory of \"tools/wikiSpider\", which was generated automaticlly by scrapy."
}
] | 7 |
djelinski/dt-cli
|
https://github.com/djelinski/dt-cli
|
94e74a021084f7b867fa8f7a43876d53f884c10b
|
2d0891521fe94054d57f360736a1997bc542dad6
|
7423c1ac814586e95b8329085dada3f01656c5a6
|
refs/heads/main
| 2023-09-04T10:52:06.429143 | 2021-09-27T11:00:10 | 2021-09-28T07:58:46 | 411,619,968 | 0 | 0 | null | 2021-09-29T09:59:01 | 2021-09-28T07:58:49 | 2021-09-28T07:58:46 | null |
[
{
"alpha_fraction": 0.7234782576560974,
"alphanum_fraction": 0.7323912978172302,
"avg_line_length": 25.900585174560547,
"blob_id": "8d20758261e54ee4a01ebb23212734425bdbf16b",
"content_id": "9ce2b1addcfbdba8f70614ea6112da9ad33c3f1b",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4612,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 171,
"path": "/CONTRIBUTING.md",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Contributing to dt-cli\n\n👍🎉 Thank you for choosing to contribute to dt-cli! 🎉👍\n\n**Table of Contents**\n\n* [Development](#development)\n * [Required tools](#required-tools)\n * [Environment setup](#environment-setup)\n * [With Poetry](#with-poetry)\n * [With Docker](#with-docker)\n * [How to use Poetry](#how-to-use-poetry)\n * [Development commands](#development-commands)\n * [Development cycle](#development-cycle)\n * [Development configuration](#development-configuration)\n\n<a id=\"development\"></a>\n## Development\n\nThis tool requires Python 3.8+ and is built with [poetry](https://python-poetry.org/).\nBefore starting, make sure you have a dedicated [virtual environment](https://docs.python.org/3/library/venv.html)\nfor working with this project.\n\n<a id=\"required-tools\"></a>\n### Required tools\n\nYou will need Python 3.8+ and `poetry` tool installed on your system.\nAlternatively, you can use the Docker image to replicate the environment without having to install anything on your system.\n\n<a id=\"environment-setup\"></a>\n### Environment setup\n\n<a id=\"with-poetry\"></a>\n#### With Poetry\n\nAfter poetry is installed and proper version of Python is present in the system,\ntell poetry to use the proper version for creation of the virtual environment.\n\n```shell\npoetry env use 3.9.5\n```\n\nNow you can install the dependencies specified in `pyproject.toml` and `poetry.lock` (frozen versions and hashes of dependencies).\n\n```shell\npoetry install\n```\n\n<a id=\"with-docker\"></a>\n#### With Docker\n\nBuild the image locally:\n\n```shell\ndocker build -t dtcli-dev .\n```\n\nRun it with root of the repo mounted into `/app` directory:\n\n```shell\ndocker run --rm -it -v \"$(pwd):/app\" bash\n```\n\nThis will launch an interactive shell into Docker container where you can run all the commands below.\n\n<a id=\"how-to-use-poetry\"></a>\n### How to use Poetry\n\nInstalling all dependencies from the `pyproject.toml` and the `poetry.lock`.\n\n```shell\npoetry install\n```\n\nRun any command within the virtual environment:\n\n```shell\npoetry run <command>\n```\n\nGet info about the virtual environment\n\n```shell\npoetry env info\n```\n\nUninstalling the virtual environment directory completely\n\n```shell\nrm -rf $(poetry env info -p)\n```\n\nAdd new dependency package. For example, Dynatrace's python API:\n\n```shell\npoetry add dt\n```\n\nAdd new development dependency. Will not be installed on the user system, only for development.\n\n```shell\npoetry add --dev black\n```\n\nRemove an existing dependency\n\n```shell\npoetry remove ipython\n```\n\n<a id=\"development-commands\"></a>\n### Development commands\n\nRun interactive python shell with syntax highlighting within the virtual environment\n\n```shell\npoetry run ipython\n```\n\nRun full test suite using MyPy, flake8, Coverage, and pytest:\n\n```shell\npoetry run pytest --mypy dtcli --strict --flake8 --cov . --cov-report html\n```\n\nRun `pytest` until the first failed test\n\n```shell\npoetry run pytest -x\n```\n\nRun `dt` command line itself in it's current state within the virtual environment\n\n```shell\npoetry run dt --help\n```\n\nBump to the new version using `bump2version` CLI.\n*Note: all changes must be committed*.\n\n```shell\n# Where <part> is major (x.0.0), minor (0.x.0), or patch (0.0.x)\npoetry run bump2version patch\n# or\npoetry run bump2version --new-version 1.2.3 <part>\n```\n\n<a id=\"development-cycle\"></a>\n### Development cycle\n\n1. Create a new branch for a new feature or to fix a bug.\n1. Make required changes.\n1. Test locally.\n1. Commit them with mentioning the GitHub issue ID (e.g. `Implements #19`).\n1. Push new branch to the repo (if you are maintainer) or create a PR from your fork.\n1. Wait for the pipeline to built and test the changes in PR.\n1. PR gets approved and merged into the `main` branch.\n1. Maintainers wait until enough changes are accumulated in the `main` branch for a new release.\n1. Maintainer pulls the `main` branch and runs `poetry bump2version <part>`\n which creates a new commit and a tag.\n1. Maintainer then pushes the newly versioned `main` branch back to the GitHub using `git push --follow-tags`\n1. New tagged push triggers the release of new version to the PyPI.\n\n<a id=\"development-configuration\"></a>\n### Development configuration\n\n1. `.coveragerc` contains settings that control how test coverage is measured\n1. `.bumpversion.cfg` controls how version is bumped and how semantics of it work\n1. `.readthedocs.yml` controls how Sphinx documentation is built on Readthedocs platform\n1. `pyproject.toml` controls most of the tool settings (instead of the old approach with `setup.cfg`).\n1. `.github/workflows/*` contains Pipeline settings for GitHub actions.\n"
},
{
"alpha_fraction": 0.6661931872367859,
"alphanum_fraction": 0.6704545617103577,
"avg_line_length": 35.54666519165039,
"blob_id": "9a295915f0a73ab276c1cf6bc7225e2161e38494",
"content_id": "45f3d958f898b574aed7de6813c68721afa10743",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2816,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 75,
"path": "/dtcli/utils.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport os.path\r\nimport re\r\n\r\n\r\nclass ExtensionBuildError(Exception):\r\n pass\r\n\r\nclass ExtensionValidationError(Exception):\r\n pass\r\n\r\nclass KeyGenerationError(Exception):\r\n pass\r\n\r\ndef require_extension_name_valid(extension_name):\r\n extension_name_regex = re.compile(\"^custom:(?!\\\\.)(?!.*\\\\.\\\\.)(?!.*\\\\.$)[a-z0-9-_\\\\.]+$\")\r\n if not extension_name_regex.match(extension_name):\r\n print(\"%s doesn't satisfy extension naming format, aborting!\" % extension_name)\r\n print(\r\n \"Name of your extension, (an extension not developed by Dynatrace) must start with custom: \"\r\n \"and comply with the metric ingestion protocol requirements for dimensions.\\n Read more at: \"\r\n \"https://www.dynatrace.com/support/help/extend-dynatrace/extensions20/extension-yaml/#start-extension-yaml-file\"\r\n )\r\n raise ExtensionBuildError()\r\n\r\ndef check_file_exists(file_path, exception_cls=ExtensionBuildError, warn_overwrite=True):\r\n \"\"\"Returns True and prints a message if file under given path exists and is a real file.\r\n In case the path represents a directory, exception given in the exception_cls parameter will be thrown.\r\n In case there's no file under the given path returns False.\r\n \"\"\"\r\n if os.path.exists(file_path):\r\n require_is_not_dir(file_path, exception_cls)\r\n if warn_overwrite:\r\n print(\"%s file already exists, it will be overwritten!\" % file_path)\r\n return True\r\n return False\r\n\r\n\r\ndef require_file_exists(file_path):\r\n if not os.path.exists(file_path):\r\n print(\"%s doesn't exist, aborting!\" % file_path)\r\n raise ExtensionBuildError()\r\n\r\n\r\ndef require_dir_exists(dir_path):\r\n if not os.path.isdir(dir_path):\r\n print(\"%s is not a directory, aborting!\" % dir_path)\r\n raise ExtensionBuildError()\r\n\r\n\r\ndef require_is_not_dir(file_path, exception_cls=ExtensionBuildError):\r\n if os.path.isdir(file_path):\r\n print(\"%s is a directory, aborting!\" % file_path)\r\n raise exception_cls()\r\n\r\n\r\ndef remove_files(file_paths):\r\n for file_path in file_paths:\r\n try:\r\n os.remove(file_path)\r\n except:\r\n print(\"Failed to remove %s\" % file_path)\r\n"
},
{
"alpha_fraction": 0.6270816326141357,
"alphanum_fraction": 0.6446537375450134,
"avg_line_length": 36.697776794433594,
"blob_id": "a02329c89fcf8b01f041127b527aa2d29330c9ac",
"content_id": "d37e01dc9665159f76a541686275a1cf5922eb8f",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8707,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 225,
"path": "/dtcli/signing.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport datetime\r\n\r\nfrom asn1crypto import cms, util, x509, core, pem\r\nfrom cryptography import x509 as crypto_x509\r\nfrom cryptography.x509.oid import NameOID\r\nfrom cryptography.hazmat.primitives import serialization, hashes\r\nfrom cryptography.hazmat.primitives.asymmetric import padding, rsa, utils\r\nfrom cryptography.hazmat.backends import default_backend\r\n\r\nfrom . import utils as dtcliutils\r\n\r\n\r\nCHUNK_SIZE = 1024 * 1024\r\n\r\nX509NameAttributes = {\r\n \"CN\": NameOID.COMMON_NAME,\r\n \"O\": NameOID.ORGANIZATION_NAME,\r\n \"OU\": NameOID.ORGANIZATIONAL_UNIT_NAME,\r\n \"L\": NameOID.LOCALITY_NAME,\r\n \"S\": NameOID.STATE_OR_PROVINCE_NAME,\r\n \"C\": NameOID.COUNTRY_NAME\r\n}\r\n\r\ndef _generate_x509_name(attributes):\r\n names_attributes = []\r\n for name, oid in X509NameAttributes.items():\r\n if name in attributes and attributes[name]:\r\n names_attributes.append(crypto_x509.NameAttribute(oid, attributes[name]))\r\n\r\n return crypto_x509.Name(names_attributes)\r\n\r\n\r\ndef generate_ca(ca_cert_file_path, ca_key_file_path, subject, not_valid_after, passphrase=None):\r\n print(\"Generating CA...\")\r\n private_key = rsa.generate_private_key(\r\n public_exponent=65537, key_size=4096\r\n )\r\n private_key_encryption = serialization.BestAvailableEncryption(passphrase.encode()) if passphrase else serialization.NoEncryption()\r\n with open(ca_key_file_path, \"wb\") as fp:\r\n fp.write(\r\n private_key.private_bytes(\r\n encoding=serialization.Encoding.PEM,\r\n format=serialization.PrivateFormat.TraditionalOpenSSL,\r\n encryption_algorithm=private_key_encryption\r\n )\r\n )\r\n print(\"Wrote CA private key: %s\" % ca_key_file_path)\r\n public_key = private_key.public_key()\r\n builder = crypto_x509.CertificateBuilder()\r\n builder = builder.subject_name(_generate_x509_name(subject))\r\n builder = builder.issuer_name(_generate_x509_name(subject))\r\n builder = builder.not_valid_before(datetime.datetime.today() - datetime.timedelta(days=1))\r\n builder = builder.not_valid_after(not_valid_after)\r\n builder = builder.serial_number(crypto_x509.random_serial_number())\r\n builder = builder.public_key(public_key)\r\n builder = builder.add_extension(\r\n crypto_x509.BasicConstraints(ca=True, path_length=None),\r\n critical=False,\r\n )\r\n # TODO add aditional extension fields to be on par with openssl result\r\n # https://cryptography.io/en/latest/x509/reference.html#cryptography.x509.AuthorityKeyIdentifier\r\n # cryptography.x509.AuthorityKeyIdentifier(\r\n # https://cryptography.io/en/latest/x509/reference.html#cryptography.x509.SubjectKeyIdentifier\r\n # cryptography.x509.SubjectKeyIdentifier\r\n certificate = builder.sign(\r\n private_key=private_key,\r\n algorithm=hashes.SHA256(),\r\n )\r\n with open(ca_cert_file_path, \"wb\") as fp:\r\n fp.write(certificate.public_bytes(serialization.Encoding.PEM))\r\n print(\"Wrote CA certificate: %s\" % ca_cert_file_path)\r\n\r\n\r\ndef generate_cert(\r\n ca_cert_file_path, ca_key_file_path, dev_cert_file_path, dev_key_file_path, subject,\r\n not_valid_after, ca_passphrase=None, dev_passphrase=None\r\n):\r\n print(\"Loading CA private key %s\" % ca_key_file_path)\r\n with open(ca_key_file_path, \"rb\") as fp:\r\n ca_private_key = serialization.load_pem_private_key(\r\n fp.read(),\r\n password=ca_passphrase.encode() if ca_passphrase else None,\r\n backend=default_backend()\r\n )\r\n\r\n print(\"Loading CA certificate %s\" % ca_cert_file_path)\r\n with open(ca_cert_file_path, \"rb\") as fp:\r\n ca_cert = crypto_x509.load_pem_x509_certificate(fp.read())\r\n subject_name = _generate_x509_name(subject)\r\n if ca_cert.issuer == subject_name:\r\n raise dtcliutils.KeyGenerationError(\"Certificate subject must be different from its issuer\")\r\n\r\n\r\n print(\"Generating developer certificate...\")\r\n private_key = rsa.generate_private_key(\r\n public_exponent=65537, key_size=4096\r\n )\r\n private_key_encryption = serialization.BestAvailableEncryption(dev_passphrase.encode()) if dev_passphrase else serialization.NoEncryption()\r\n with open(dev_key_file_path, \"wb\") as fp:\r\n fp.write(\r\n private_key.private_bytes(\r\n encoding=serialization.Encoding.PEM,\r\n format=serialization.PrivateFormat.TraditionalOpenSSL,\r\n encryption_algorithm=private_key_encryption,\r\n )\r\n )\r\n public_key = private_key.public_key()\r\n print(\"Wrote developer private key: %s\" % dev_key_file_path)\r\n\r\n builder = crypto_x509.CertificateBuilder()\r\n builder = builder.subject_name(subject_name)\r\n builder = builder.issuer_name(ca_cert.issuer)\r\n builder = builder.not_valid_before(datetime.datetime.today() - datetime.timedelta(days=1))\r\n builder = builder.not_valid_after(not_valid_after)\r\n builder = builder.serial_number(crypto_x509.random_serial_number())\r\n builder = builder.public_key(public_key)\r\n certificate = builder.sign(\r\n private_key=ca_private_key,\r\n algorithm=hashes.SHA256(),\r\n )\r\n\r\n with open(dev_cert_file_path, \"wb\") as fp:\r\n fp.write(certificate.public_bytes(serialization.Encoding.PEM))\r\n print(\"Wrote developer certificate: %s\" % dev_cert_file_path)\r\n\r\n\r\ndef sign_file(\r\n file_path,\r\n signature_file_path,\r\n certificate_file_path,\r\n private_key_file_path,\r\n dev_passphrase=None\r\n):\r\n print(\r\n \"Signing %s using %s certificate and %s private key\"\r\n % (file_path, certificate_file_path, private_key_file_path)\r\n )\r\n with open(private_key_file_path, \"rb\") as fp:\r\n private_key = serialization.load_pem_private_key(\r\n fp.read(),\r\n password=dev_passphrase.encode() if dev_passphrase else None,\r\n backend=default_backend()\r\n )\r\n sha256 = hashes.SHA256()\r\n hasher = hashes.Hash(sha256)\r\n with open(file_path, \"rb\") as fp:\r\n buf = fp.read(CHUNK_SIZE)\r\n while len(buf) > 0:\r\n hasher.update(buf)\r\n buf = fp.read(CHUNK_SIZE)\r\n signature = private_key.sign(\r\n hasher.finalize(), padding.PKCS1v15(), utils.Prehashed(sha256)\r\n )\r\n signed_data = cms.SignedData()\r\n signed_data[\"version\"] = \"v1\"\r\n signed_data[\"encap_content_info\"] = util.OrderedDict(\r\n [(\"content_type\", \"data\"), (\"content\", None)]\r\n )\r\n signed_data[\"digest_algorithms\"] = [\r\n util.OrderedDict([(\"algorithm\", \"sha256\"), (\"parameters\", None)])\r\n ]\r\n\r\n with open(certificate_file_path, \"rb\") as fp:\r\n der_bytes = fp.read()\r\n if pem.detect(der_bytes):\r\n type_name, headers, der_bytes = pem.unarmor(der_bytes)\r\n else:\r\n print(\"Wrong certificate format, expected PEM, aborting!\")\r\n raise dtcliutils.ExtensionBuildError()\r\n\r\n cert = x509.Certificate.load(der_bytes)\r\n\r\n signed_data[\"certificates\"] = [\r\n cert,\r\n ]\r\n\r\n signer_info = cms.SignerInfo()\r\n signer_info[\"version\"] = 1\r\n signer_info[\"digest_algorithm\"] = util.OrderedDict(\r\n [(\"algorithm\", \"sha256\"), (\"parameters\", None)]\r\n )\r\n signer_info[\"signature_algorithm\"] = util.OrderedDict(\r\n [(\"algorithm\", \"rsassa_pkcs1v15\"), (\"parameters\", core.Null)]\r\n )\r\n signer_info[\"signature\"] = signature\r\n signer_info[\"sid\"] = cms.SignerIdentifier(\r\n {\r\n \"issuer_and_serial_number\": util.OrderedDict(\r\n [\r\n (\"issuer\", cert.issuer),\r\n (\"serial_number\", cert.serial_number),\r\n ]\r\n )\r\n }\r\n )\r\n\r\n signed_data[\"signer_infos\"] = [\r\n signer_info,\r\n ]\r\n\r\n # TODO timestamping?\r\n # dump ASN.1 object\r\n asn1obj = cms.ContentInfo()\r\n asn1obj[\"content_type\"] = \"signed_data\"\r\n asn1obj[\"content\"] = signed_data\r\n\r\n with open(signature_file_path, \"wb+\") as fp:\r\n der_bytes = asn1obj.dump()\r\n pem_bytes = pem.armor(\"CMS\", der_bytes)\r\n fp.write(pem_bytes)\r\n print(\"Wrote signature file %s\" % signature_file_path)\r\n"
},
{
"alpha_fraction": 0.7252943515777588,
"alphanum_fraction": 0.7299321889877319,
"avg_line_length": 33.50632858276367,
"blob_id": "becf827e0ba9eebeef156b7c1bece0861a2fc1cf",
"content_id": "329fd888e98209215c4209dc3c6b404e0ad75801",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2805,
"license_type": "permissive",
"max_line_length": 258,
"num_lines": 79,
"path": "/README.md",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# dt-cli — Dynatrace developer's toolbox\r\n\r\nDynatrace CLI is a command line utility that assists in signing, building and uploading\r\nextensions for Dynatrace Extension Framework 2.0.\r\n\r\n<p>\r\n <a href=\"https://pypi.org/project/dt-cli/\"><img alt=\"PyPI\" src=\"https://img.shields.io/pypi/v/dt-cli?color=blue&logo=python&logoColor=white\"></a>\r\n <a href=\"https://pypi.org/project/dt-cli/\"><img alt=\"PyPI - Python Version\" src=\"https://img.shields.io/pypi/pyversions/dt-cli?logo=python&logoColor=white\"></a>\r\n <a href=\"https://github.com/dynatrace-oss/dt-cli/actions/workflows/built-test-release.yml\"><img alt=\"GitHub Workflow Status (main branch)\" src=\"https://img.shields.io/github/workflow/status/dynatrace-oss/dt-cli/Build%20Test%20Release/main?logo=github\"></a>\r\n</p>\r\n\r\n\r\n### Features\r\n\r\n* Build and sign extensions from source\r\n* Generate development certificates for extension signing\r\n* Generate CA certificates for development\r\n* Validate and upload extension to Dynatrace Extension Framework 2.0.\r\n\r\n## Installation\r\n\r\n```shell\r\npip install dt-cli\r\n```\r\n\r\n## Usage\r\n\r\n1. Generate certificates\r\n```sh\r\n dt extension gencerts\r\n```\r\n2. Upload your `ca.pem` certificate to the Dynatrace credential vault\r\n\r\nSee: [Add your root certificate to the Dynatrace credential vault](https://www.dynatrace.com/support/help/extend-dynatrace/extensions20/sign-extension/#add-your-root-certificate-to-the-dynatrace-credential-vault)\r\n\r\n3. Build and sign, then upload extension\r\n```sh\r\n dt extension build\r\n dt extension upload\r\n```\r\nUse `dt extension --help` to learn more\r\n\r\n\r\n## Using dt-cli from your Python code\r\n\r\nYou may want to use some commands implemented by `dt-cli` directly in your Python code, e.g. to automatically sign your extension in a CI environment.\r\nHere's an example of building an extension programatically, it assumes `dtcli` package is already installed and available in your working environment.\r\n\r\n\r\n```python\r\nfrom dtcli import building\r\n\r\n\r\nbuilding.build_extension(\r\n extension_dir_path = './extension',\r\n extension_zip_path = './extension.zip',\r\n extension_zip_sig_path = './extension.zip.sig',\r\n target_dir_path = './dist',\r\n certificate_file_path = './developer.crt',\r\n private_key_file_path = './developer.key',\r\n dev_passphrase=None,\r\n keep_intermediate_files=False,\r\n)\r\n```\r\n\r\n## Development\r\n\r\nSee our [CONTRIBUTING](CONTRIBUTING.md) guidelines and instructions.\r\n\r\n## Contributions\r\n\r\nYou are welcome to contribute using Pull Requests to the respective\r\nrepository. Before contributing, please read our\r\n[Code of Conduct](https://github.com/dynatrace-oss/dt-cli/blob/main/CODE_OF_CONDUCT.md).\r\n\r\n## License\r\n\r\n`dt-cli` is an Open Source Project. Please see\r\n[LICENSE](https://github.com/dynatrace-oss/dt-cli/blob/main/LICENSE) for more information."
},
{
"alpha_fraction": 0.6713703274726868,
"alphanum_fraction": 0.6736280918121338,
"avg_line_length": 35.96428680419922,
"blob_id": "6b762a5f6999bcf6ed0579152f7da48e11a8e320",
"content_id": "bb2bbd2a1075f42b36a1a6d492e742794d2bcde5",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15945,
"license_type": "permissive",
"max_line_length": 170,
"num_lines": 420,
"path": "/dtcli/scripts/dt.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport click\r\nimport datetime\r\nimport re\r\n\r\nfrom click_aliases import ClickAliasedGroup\r\n\r\nfrom dtcli.constants import *\r\nfrom dtcli.utils import *\r\n\r\nfrom dtcli import building\r\nfrom dtcli import signing\r\nfrom dtcli import __version__\r\nfrom dtcli import dev\r\nfrom dtcli import server_api\r\n\r\nCONTEXT_SETTINGS = dict(help_option_names=[\"-h\", \"--help\"])\r\n\r\ndef validate_parse_subject(ctx, param, value):\r\n if value is None:\r\n return None\r\n\r\n def split_pair_and_verify_key(pair):\r\n key, val = pair.replace(\"\\\\\", \"\").split('=')\r\n if key not in signing.X509NameAttributes:\r\n raise click.BadParameter(f\"subject attributes must be one of {list(signing.X509NameAttributes)}. Got '{key}' instead.\")\r\n return key, val\r\n\r\n try:\r\n return(dict(map(split_pair_and_verify_key, filter(None, re.split(r\"(?<!\\\\)\\/\", value)))))\r\n return value\r\n except ValueError:\r\n raise click.BadParameter(f\"format must be '/key0=value0/key1=value1/...' got: '{value}'\")\r\n\r\ndef edit_other_option_if_true(ctx, param, value, other_name, edit_callback):\r\n if not value:\r\n return\r\n for p in ctx.command.params:\r\n if isinstance(p, click.Option) and p.name == other_name:\r\n edit_callback(p)\r\n\r\n\r\ndef _genca(ca_cert_path, ca_key_path, force, subject, days_valid, ca_passphrase):\r\n if force:\r\n print(\"Forced generation option used. Already existing CA certificate files will be overwritten.\")\r\n check_file_exists(ca_cert_path, KeyGenerationError)\r\n check_file_exists(ca_key_path, KeyGenerationError)\r\n signing.generate_ca(\r\n ca_cert_path,\r\n ca_key_path,\r\n subject,\r\n datetime.datetime.today() + datetime.timedelta(days=days_valid),\r\n ca_passphrase\r\n )\r\n return\r\n\r\n if (\r\n check_file_exists(ca_cert_path, KeyGenerationError, warn_overwrite=False) and\r\n check_file_exists(ca_key_path, KeyGenerationError, warn_overwrite=False)\r\n ):\r\n raise KeyGenerationError(\r\n \"CA certificate NOT generated! CA key and certificate already exist. Use --force option to generate anyway.\"\r\n )\r\n\r\n signing.generate_ca(\r\n ca_cert_path,\r\n ca_key_path,\r\n subject,\r\n datetime.datetime.today() + datetime.timedelta(days=days_valid),\r\n ca_passphrase\r\n )\r\n\r\n\r\ndef _gendevcert(\r\n ca_cert_path, ca_key_path, dev_cert_path, dev_key_path, subject, days_valid, ca_passphrase, dev_passphrase\r\n):\r\n require_file_exists(ca_cert_path)\r\n require_file_exists(ca_key_path)\r\n require_is_not_dir(dev_cert_path)\r\n require_is_not_dir(dev_key_path)\r\n\r\n check_file_exists(dev_cert_path, KeyGenerationError)\r\n check_file_exists(dev_key_path, KeyGenerationError)\r\n\r\n signing.generate_cert(\r\n ca_cert_path,\r\n ca_key_path,\r\n dev_cert_path,\r\n dev_key_path,\r\n subject,\r\n datetime.datetime.today() + datetime.timedelta(days=days_valid),\r\n ca_passphrase,\r\n dev_passphrase\r\n )\r\n\r\n\r\n\r\[email protected](context_settings=CONTEXT_SETTINGS, cls=ClickAliasedGroup)\r\[email protected]_option(version=__version__)\r\ndef main():\r\n \"\"\"\r\n Dynatrace CLI is a command line utility that assists in signing, building and uploading extensions\r\n for Dynatrace Extensions 2.0 framework\r\n \"\"\"\r\n pass\r\n\r\n\r\[email protected](aliases=[\"extensions\", \"ext\"])\r\ndef extension():\r\n \"\"\"\r\n Set of utilities for signing, building and uploading extensions\r\n\r\n \\b\r\n Example flow:\r\n gencerts -> build -> upload\r\n \"\"\"\r\n pass\r\n\r\n\r\[email protected](aliases=[\"extensions_dev\", \"ext_dev\"], hidden=True)\r\ndef extension_dev():\r\n pass\r\n\r\n\r\[email protected](\r\n help=\"Creates CA key and certificate, needed to create developer certificate used for extension signing\"\r\n)\r\[email protected](\r\n \"--ca-cert\", default=DEFAULT_CA_CERT, show_default=True, help=\"CA certificate output path\"\r\n)\r\[email protected](\r\n \"--ca-key\", default=DEFAULT_CA_KEY, show_default=True, help=\"CA key output path\"\r\n)\r\[email protected](\r\n \"--ca-subject\", callback=validate_parse_subject, default=\"/CN=Default Extension CA/O=Some Company/OU=Extension CA\",\r\n show_default=True, help=\"Certificate subject. Accepted format is /key0=value0/key1=value1/...\"\r\n)\r\[email protected](\r\n \"--ca-passphrase\", type=str, prompt=\"CA private key passphrase\", confirmation_prompt=True, hide_input=True, default=\"\",\r\n help=\"Sets passphrase for CA private key encryption - private key is not encrypted if empty\"\r\n)\r\[email protected](\r\n \"--no-ca-passphrase\", default=False, is_flag=True, is_eager=True, help=\"Skips prompt for CA private key encryption passphrase - private key is not encrypted\",\r\n callback=lambda c, p, v: edit_other_option_if_true(c, p, v, \"ca_passphrase\", lambda param: setattr(param, 'prompt', None))\r\n)\r\[email protected](\r\n \"--force\", is_flag=True, help=\"Overwrites already existing CA key and certificate\"\r\n)\r\[email protected](\r\n \"--days-valid\", default=DEFAULT_CERT_VALIDITY, show_default=True, type=int, help=\"Number of days certificate will be valid\"\r\n)\r\ndef genca(**kwargs):\r\n _genca(kwargs[\"ca_cert\"], kwargs[\"ca_key\"], kwargs[\"force\"], kwargs[\"ca_subject\"], kwargs[\"days_valid\"], kwargs[\"ca_passphrase\"])\r\n\r\n\r\n\r\[email protected](\r\n help=\"Creates developer key and certificate used for extension signing\"\r\n)\r\[email protected](\r\n \"--ca-cert\", default=DEFAULT_CA_CERT, show_default=True, help=\"CA certificate input path\"\r\n)\r\[email protected](\r\n \"--ca-key\", default=DEFAULT_CA_KEY, show_default=True, help=\"CA key input path\"\r\n)\r\[email protected](\r\n \"--ca-passphrase\", type=str, prompt=\"CA private key passphrase\", hide_input=True, default=\"\",\r\n help=\"Passphrase used for CA private key encryption\"\r\n)\r\[email protected](\r\n \"--no-ca-passphrase\", default=False, is_flag=True, is_eager=True, help=\"Skips prompt for CA private key encryption passphrase\",\r\n callback=lambda c, p, v: edit_other_option_if_true(c, p, v, \"ca_passphrase\", lambda param: setattr(param, 'prompt', None))\r\n)\r\[email protected](\r\n \"--dev-cert\", default=DEFAULT_DEV_CERT, show_default=True, help=\"Developer certificate output path\"\r\n)\r\[email protected](\r\n \"--dev-key\", default=DEFAULT_DEV_KEY, show_default=True, help=\"Developer key output path\"\r\n)\r\[email protected](\r\n \"--dev-passphrase\", type=str, prompt=\"Developer private key passphrase\", confirmation_prompt=True, hide_input=True, default=\"\",\r\n help=\"Sets passphrase for developer private key encryption - private key is not encrypted if empty\"\r\n)\r\[email protected](\r\n \"--no-dev-passphrase\", default=False, is_flag=True, is_eager=True, help=\"Skips prompt for developer private key encryption passphrase - private key is not encrypted\",\r\n callback=lambda c, p, v: edit_other_option_if_true(c, p, v, \"dev_passphrase\", lambda param: setattr(param, 'prompt', None))\r\n)\r\[email protected](\r\n \"--dev-subject\", callback=validate_parse_subject, default=\"/CN=Some Developer/O=Some Company/OU=Extension Development\",\r\n show_default=True, help=\"certificate subject. Accepted format is /key0=value0/key1=value1/...\"\r\n)\r\[email protected](\r\n \"--days-valid\", default=DEFAULT_CERT_VALIDITY, show_default=True, type=int, help=\"Number of days certificate will be valid\"\r\n)\r\ndef gendevcert(**kwargs):\r\n _gendevcert(\r\n kwargs[\"ca_cert\"],\r\n kwargs[\"ca_key\"],\r\n kwargs[\"dev_cert\"],\r\n kwargs[\"dev_key\"],\r\n kwargs[\"dev_subject\"],\r\n kwargs[\"days_valid\"],\r\n kwargs[\"ca_passphrase\"],\r\n kwargs[\"dev_passphrase\"]\r\n )\r\n\r\n\r\n\r\[email protected](\r\n help=\"Creates CA key, CA certificate, developer key and developer certificate used for extension signing\"\r\n)\r\[email protected](\r\n \"--ca-cert\", default=DEFAULT_CA_CERT, show_default=True, help=\"CA certificate output path\"\r\n)\r\[email protected](\r\n \"--ca-key\", default=DEFAULT_CA_KEY, show_default=True, help=\"CA key output path\"\r\n)\r\[email protected](\r\n \"--ca-passphrase\", type=str, prompt=\"CA private key passphrase\", confirmation_prompt=True, hide_input=True, default=\"\",\r\n help=\"Sets passphrase for CA private key encryption - private key is not encrypted if empty\"\r\n)\r\[email protected](\r\n \"--no-ca-passphrase\", default=False, is_flag=True, is_eager=True, help=\"Skips prompt for CA private key encryption passphrase - private key is not encrypted\",\r\n callback=lambda c, p, v: edit_other_option_if_true(c, p, v, \"ca_passphrase\", lambda param: setattr(param, 'prompt', None))\r\n)\r\[email protected](\r\n \"--ca-subject\", callback=validate_parse_subject, default=\"/CN=Default Extension CA/O=Some Company/OU=Extension CA\",\r\n show_default=True, help=\"certificate subject. Accepted format is /key0=value0/key1=value1/...\"\r\n)\r\[email protected](\r\n \"--force\", is_flag=True, help=\"overwrites already existing CA key and certificate\"\r\n)\r\[email protected](\r\n \"--dev-cert\", default=DEFAULT_DEV_CERT, show_default=True, help=\"Developer certificate output path\"\r\n)\r\[email protected](\r\n \"--dev-key\", default=DEFAULT_DEV_KEY, show_default=True, help=\"Developer key output path\"\r\n)\r\[email protected](\r\n \"--dev-passphrase\", type=str, prompt=\"Developer private key passphrase\", confirmation_prompt=True, hide_input=True, default=\"\",\r\n help=\"Sets passphrase for developer private key encryption - private key is not encrypted if empty\"\r\n)\r\[email protected](\r\n \"--no-dev-passphrase\", default=False, is_flag=True, is_eager=True, help=\"Skips prompt for developer private key encryption passphrase - private key is not encrypted\",\r\n callback=lambda c, p, v: edit_other_option_if_true(c, p, v, \"dev_passphrase\", lambda param: setattr(param, 'prompt', None))\r\n)\r\[email protected](\r\n \"--dev-subject\", callback=validate_parse_subject, default=\"/CN=Some Developer/O=Some Company/OU=Extension Development\",\r\n show_default=True, help=\"certificate subject. Accepted format is /key0=value0/key1=value1/...\"\r\n)\r\[email protected](\r\n \"--days-valid\", default=DEFAULT_CERT_VALIDITY, show_default=True, type=int, help=\"Number of days certificate will be valid\"\r\n)\r\ndef gencerts(**kwargs):\r\n _genca(kwargs[\"ca_cert\"], kwargs[\"ca_key\"], kwargs[\"force\"], kwargs[\"ca_subject\"], kwargs[\"days_valid\"], kwargs[\"ca_passphrase\"])\r\n _gendevcert(\r\n kwargs[\"ca_cert\"],\r\n kwargs[\"ca_key\"],\r\n kwargs[\"dev_cert\"],\r\n kwargs[\"dev_key\"],\r\n kwargs[\"dev_subject\"],\r\n kwargs[\"days_valid\"],\r\n kwargs[\"ca_passphrase\"],\r\n kwargs[\"dev_passphrase\"]\r\n )\r\n\r\n\r\n\r\[email protected](\r\n help=f\"Builds extension package from the given extension directory (default: {DEFAULT_EXTENSION_DIR}) that contains extension.yaml and additional asset directories\"\r\n)\r\[email protected](\r\n \"--extension-directory\",\r\n default=DEFAULT_EXTENSION_DIR, show_default=True,\r\n help=\"Directory where the `extension.yaml' and other extension files are located\",\r\n)\r\[email protected](\r\n \"--target-directory\",\r\n default=DEFAULT_TARGET_PATH, show_default=True,\r\n help=\"Directory where extension package should be written\",\r\n)\r\[email protected](\r\n \"--certificate\",\r\n default=DEFAULT_DEV_CERT, show_default=True,\r\n help=\"Developer certificate used for signing\",\r\n)\r\[email protected](\r\n \"--private-key\",\r\n default=DEFAULT_DEV_KEY, show_default=True,\r\n help=\"Developer private key used for signing\",\r\n)\r\[email protected](\r\n \"--dev-passphrase\", type=str, prompt=\"Developer private key passphrase\", hide_input=True, default=\"\",\r\n help=\"Passphrase used for developer private key encryption\"\r\n)\r\[email protected](\r\n \"--no-dev-passphrase\", default=False, is_flag=True, is_eager=True, help=\"Skips prompt for developer private key encryption passphrase\",\r\n callback=lambda c, p, v: edit_other_option_if_true(c, p, v, \"dev_passphrase\", lambda param: setattr(param, 'prompt', None))\r\n)\r\[email protected](\r\n \"--keep-intermediate-files\",\r\n is_flag=True,\r\n default=False,\r\n help=\"Do not delete the signature and `extension.zip' files after building extension archive\",\r\n)\r\ndef build(**kwargs):\r\n extension_dir_path = kwargs[\"extension_directory\"]\r\n require_dir_exists(extension_dir_path)\r\n\r\n target_dir_path = kwargs[\"target_directory\"]\r\n if os.path.exists(target_dir_path):\r\n require_dir_exists(target_dir_path)\r\n if not os.path.isdir(target_dir_path):\r\n print(\"%s is not a directory, aborting!\" % target_dir_path)\r\n return\r\n else:\r\n print(\"Creating target directory: %s\" % target_dir_path)\r\n os.makedirs(target_dir_path, exist_ok=True)\r\n\r\n extension_zip_path = os.path.join(target_dir_path, EXTENSION_ZIP)\r\n extension_zip_sig_path = os.path.join(target_dir_path, EXTENSION_ZIP_SIG)\r\n\r\n certificate_file_path = kwargs[\"certificate\"]\r\n require_file_exists(certificate_file_path)\r\n private_key_file_path = kwargs[\"private_key\"]\r\n require_file_exists(private_key_file_path)\r\n\r\n building.build_extension(\r\n extension_dir_path,\r\n extension_zip_path,\r\n extension_zip_sig_path,\r\n target_dir_path,\r\n certificate_file_path,\r\n private_key_file_path,\r\n kwargs['dev_passphrase'],\r\n kwargs[\"keep_intermediate_files\"],\r\n )\r\n\r\n\r\n\r\[email protected](\r\n help=\"Validates extension package using Dynatrace Cluster API\"\r\n)\r\[email protected](\r\n \"extension-zip\", type=click.Path(exists=True, readable=True)\r\n)\r\[email protected](\r\n \"--tenant-url\", prompt=True, help=\"Dynatrace environment URL, e.g., https://<tenantid>.live.dynatrace.com\"\r\n)\r\[email protected](\r\n \"--api-token\", prompt=True, help=\"Dynatrace API token. Please note that token needs to have the 'Write extension' scope enabled.\"\r\n)\r\ndef validate(**kwargs):\r\n extension_zip = kwargs['extension_zip']\r\n require_file_exists(extension_zip)\r\n server_api.validate(extension_zip, kwargs['tenant_url'], kwargs['api_token'])\r\n\r\n\r\n\r\[email protected](\r\n help=\"Uploads extension package to the Dynatrace Cluster\"\r\n)\r\[email protected](\r\n \"extension-zip\", type=click.Path(exists=True, readable=True)\r\n)\r\[email protected](\r\n \"--tenant-url\", prompt=True, help=\"Dynatrace environment URL, e.g., https://<tenantid>.live.dynatrace.com\"\r\n)\r\[email protected](\r\n \"--api-token\", prompt=True, help=\"Dynatrace API token. Please note that token needs to have the 'Write extension' scope enabled.\"\r\n)\r\ndef upload(**kwargs):\r\n extension_zip = kwargs['extension_zip']\r\n require_file_exists(extension_zip)\r\n server_api.upload(extension_zip, kwargs['tenant_url'], kwargs['api_token'])\r\n\r\n\r\n\r\n@extension_dev.command(\r\n help=\"Command packs python package as a datasource. It uses pip to download all dependencies and create whl files\"\r\n)\r\[email protected](\r\n \"path-to-setup-py\",\r\n)\r\[email protected](\r\n \"--additional-libraries-dir\",\r\n default=None,\r\n help=\"Path to folder containing additional directories\"\r\n)\r\[email protected](\r\n \"--extension-directory\",\r\n default=DEFAULT_EXTENSION_DIR,\r\n help=\"Directory where extension files are. Default: \"\r\n + DEFAULT_EXTENSION_DIR,\r\n)\r\ndef prepare_python(path_to_setup_py, **kwargs):\r\n additional_libraries_dir = kwargs.get(\"additional_libraries_dir\", None)\r\n extension_directory = kwargs[\"extension_directory\"]\r\n\r\n return dev.pack_python_extension(\r\n setup_path=path_to_setup_py,\r\n target_path=extension_directory,\r\n additional_path=additional_libraries_dir)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
},
{
"alpha_fraction": 0.6171284914016724,
"alphanum_fraction": 0.6204869747161865,
"avg_line_length": 36.21875,
"blob_id": "aca4b1aeaeaf2cbd79ba610fa06cc40b369a8d1a",
"content_id": "d074b553e00334200774a01c2062cedc408417e1",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1191,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 32,
"path": "/dtcli/dev.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "import subprocess\nimport sys\nimport tempfile\nimport shutil\nimport os\n\n\ndef pack_python_extension(setup_path, target_path, additional_path):\n with tempfile.TemporaryDirectory() as tmp:\n args = [sys.executable, '-m', 'pip', 'wheel', '-w', tmp]\n if additional_path is not None:\n args.extend(['-f', additional_path])\n\n args.append(setup_path)\n result = subprocess.run(args, capture_output=True)\n\n if result.returncode != 0:\n print(\"Error building python extension: {}\".format(result.stderr.decode('utf-8')), file=sys.stderr)\n return result.returncode\n\n lib_folder = os.path.join(target_path, 'lib')\n if not os.path.exists(lib_folder):\n os.makedirs(lib_folder)\n if not os.path.isdir(lib_folder):\n print('ERROR - {} is file, needs to be a folder'.format(lib_folder), file=sys.stderr)\n return 1\n for file in os.listdir(tmp):\n src_file = os.path.join(tmp, file)\n dst_file = os.path.join(lib_folder, file)\n shutil.copy(src=src_file, dst=dst_file)\n print(\"Python extension packed successfully to {}\".format(lib_folder))\n return 0\n"
},
{
"alpha_fraction": 0.7091836929321289,
"alphanum_fraction": 0.7091836929321289,
"avg_line_length": 15.333333015441895,
"blob_id": "536556cf3d7b2eb527014c7c623d42a4127a045c",
"content_id": "5396c37c57d79d83f7a2597aa615d9b64f88a5c8",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 196,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 12,
"path": "/Dockerfile",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "FROM python:latest\n\n# Install package build system\nRUN pip install poetry\n\n# Project directory must be mounted in /app\nWORKDIR /app\n\nCOPY . .\nRUN poetry install\n\nCMD [ \"tail\", \"-f\", \"/dev/null\" ]\n"
},
{
"alpha_fraction": 0.7395042777061462,
"alphanum_fraction": 0.7481032013893127,
"avg_line_length": 47.219512939453125,
"blob_id": "ce4dcf0f4e52736a506bc701693c326ee7e718e4",
"content_id": "030c4da41ab46d015edac7341212814bb07618fa",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1977,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 41,
"path": "/tests/test_utils.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nfrom dtcli import utils\n\ndef test_require_extension_name_valid():\n utils.require_extension_name_valid(\"custom:e\")\n utils.require_extension_name_valid(\"custom:some.test.ext\")\n utils.require_extension_name_valid(\"custom:some_simple.test.ext-1\")\n utils.require_extension_name_valid(\"custom:_some_simple_test_extension\")\n utils.require_extension_name_valid(\"custom:-some-simple.test.ext_1_\")\n\ndef test_require_extension_name_valid_negative():\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"some.test.ext\")\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"custom:\")\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"custom:.some.test.ext.\")\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"custom:some.test..ext\")\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"custom:som:e.t/est.e$xt\")\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"custom:SOME.test.ext\")\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"custom:SOME123.test.ext\")\n with pytest.raises(utils.ExtensionBuildError):\n utils.require_extension_name_valid(\"custom:\\u0194test,ext\")\n"
},
{
"alpha_fraction": 0.6468005180358887,
"alphanum_fraction": 0.6548515558242798,
"avg_line_length": 39.52542495727539,
"blob_id": "652da39bf4ecef37e0d25b50e920ef1278919279",
"content_id": "a6c9a1500139b1f0be58037c04292d09d57be029",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9564,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 236,
"path": "/tests/test_signing.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport os\nimport pytest\n\nfrom cryptography import x509 as crypto_x509\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.x509.oid import NameOID\nfrom dtcli import signing\nfrom dtcli import utils\n\ndef test_generate_ca():\n cert_path = \"test_ca_certificate.crt\"\n key_path = \"test_ca_key.key\"\n not_valid_after = datetime.datetime.today().replace(microsecond=0) + datetime.timedelta(days=123)\n passphrase = \"secretpassphrase\"\n signing.generate_ca(\n cert_path,\n key_path,\n {\n \"CN\": \"Some Common Name\",\n \"O\": \"Some Org Name\",\n \"OU\": \"Some OU\",\n \"L\": \"Some Locality\",\n \"S\": \"Some State\",\n \"C\": \"PL\"\n },\n not_valid_after,\n passphrase\n )\n assert os.path.exists(cert_path)\n assert os.path.exists(key_path)\n\n with open(cert_path, \"rb\") as fp:\n ca_cert = crypto_x509.load_pem_x509_certificate(fp.read())\n\n assert ca_cert.issuer.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == \"Some Common Name\"\n assert ca_cert.issuer.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)[0].value == \"Some Org Name\"\n assert ca_cert.issuer.get_attributes_for_oid(NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value == \"Some OU\"\n assert ca_cert.issuer.get_attributes_for_oid(NameOID.LOCALITY_NAME)[0].value == \"Some Locality\"\n assert ca_cert.issuer.get_attributes_for_oid(NameOID.STATE_OR_PROVINCE_NAME)[0].value == \"Some State\"\n assert ca_cert.issuer.get_attributes_for_oid(NameOID.COUNTRY_NAME)[0].value == \"PL\"\n\n assert ca_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == \"Some Common Name\"\n assert ca_cert.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)[0].value == \"Some Org Name\"\n assert ca_cert.subject.get_attributes_for_oid(NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value == \"Some OU\"\n assert ca_cert.subject.get_attributes_for_oid(NameOID.LOCALITY_NAME)[0].value == \"Some Locality\"\n assert ca_cert.subject.get_attributes_for_oid(NameOID.STATE_OR_PROVINCE_NAME)[0].value == \"Some State\"\n assert ca_cert.subject.get_attributes_for_oid(NameOID.COUNTRY_NAME)[0].value == \"PL\"\n\n assert ca_cert.not_valid_after == not_valid_after\n\n with open(key_path, \"rb\") as fp:\n ca_private_key = serialization.load_pem_private_key(fp.read(), password=passphrase.encode())\n assert (\n ca_cert.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1) ==\n ca_private_key.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1)\n )\n\n os.remove(cert_path)\n os.remove(key_path)\n\ndef test_generate_ca_empty_attributes():\n cert_path = \"test_ca_certificate.crt\"\n key_path = \"test_ca_key.key\"\n\n signing.generate_ca(\n cert_path,\n key_path,\n {},\n datetime.datetime.today() + datetime.timedelta(days=1)\n )\n assert os.path.exists(cert_path)\n assert os.path.exists(key_path)\n\n with open(cert_path, \"rb\") as fp:\n ca_cert = crypto_x509.load_pem_x509_certificate(fp.read())\n\n assert not ca_cert.issuer.get_attributes_for_oid(NameOID.COMMON_NAME)\n assert not ca_cert.issuer.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)\n assert not ca_cert.issuer.get_attributes_for_oid(NameOID.ORGANIZATIONAL_UNIT_NAME)\n assert not ca_cert.issuer.get_attributes_for_oid(NameOID.LOCALITY_NAME)\n assert not ca_cert.issuer.get_attributes_for_oid(NameOID.STATE_OR_PROVINCE_NAME)\n assert not ca_cert.issuer.get_attributes_for_oid(NameOID.COUNTRY_NAME)\n\n assert not ca_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)\n assert not ca_cert.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)\n assert not ca_cert.subject.get_attributes_for_oid(NameOID.ORGANIZATIONAL_UNIT_NAME)\n assert not ca_cert.subject.get_attributes_for_oid(NameOID.LOCALITY_NAME)\n assert not ca_cert.subject.get_attributes_for_oid(NameOID.STATE_OR_PROVINCE_NAME)\n assert not ca_cert.subject.get_attributes_for_oid(NameOID.COUNTRY_NAME)\n\n with open(key_path, \"rb\") as fp:\n ca_private_key = serialization.load_pem_private_key(fp.read(), password=None)\n assert (\n ca_cert.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1) ==\n ca_private_key.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1)\n )\n\n os.remove(cert_path)\n os.remove(key_path)\n\ndef test_generate_cert():\n ca_cert_path = \"test_ca_certificate.crt\"\n ca_key_path = \"test_ca_key.key\"\n ca_passphrase = \"secretcapassphrase\"\n\n signing.generate_ca(\n ca_cert_path,\n ca_key_path,\n {\n \"CN\": \"Some Common Name\",\n \"O\": \"Some Org Name\",\n \"OU\": \"Some OU\",\n \"L\": \"Some Locality\",\n \"S\": \"Some State\",\n \"C\": \"PL\"\n },\n datetime.datetime.today() + datetime.timedelta(days=1),\n ca_passphrase\n )\n assert os.path.exists(ca_cert_path)\n assert os.path.exists(ca_key_path)\n\n dev_cert_path = \"test_dev_certificate.crt\"\n dev_key_path = \"test_dev_key.key\"\n not_valid_after = datetime.datetime.today().replace(microsecond=0) + datetime.timedelta(days=123)\n dev_passphrase = \"secretdevpassphrase\"\n\n signing.generate_cert(\n ca_cert_path,\n ca_key_path,\n dev_cert_path,\n dev_key_path,\n {\n \"CN\": \"Some Other Common Name\",\n \"O\": \"Some Other Org Name\",\n \"OU\": \"Some Other OU\",\n \"L\": \"Some Locality\",\n \"S\": \"Some State\",\n \"C\": \"PL\"\n },\n not_valid_after,\n ca_passphrase,\n dev_passphrase\n )\n assert os.path.exists(dev_cert_path)\n assert os.path.exists(dev_key_path)\n\n with open(dev_cert_path, \"rb\") as fp:\n dev_cert = crypto_x509.load_pem_x509_certificate(fp.read())\n\n assert dev_cert.issuer.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == \"Some Common Name\"\n assert dev_cert.issuer.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)[0].value == \"Some Org Name\"\n assert dev_cert.issuer.get_attributes_for_oid(NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value == \"Some OU\"\n assert dev_cert.issuer.get_attributes_for_oid(NameOID.LOCALITY_NAME)[0].value == \"Some Locality\"\n assert dev_cert.issuer.get_attributes_for_oid(NameOID.STATE_OR_PROVINCE_NAME)[0].value == \"Some State\"\n assert dev_cert.issuer.get_attributes_for_oid(NameOID.COUNTRY_NAME)[0].value == \"PL\"\n\n assert dev_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == \"Some Other Common Name\"\n assert dev_cert.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)[0].value == \"Some Other Org Name\"\n assert dev_cert.subject.get_attributes_for_oid(NameOID.ORGANIZATIONAL_UNIT_NAME)[0].value == \"Some Other OU\"\n assert dev_cert.subject.get_attributes_for_oid(NameOID.LOCALITY_NAME)[0].value == \"Some Locality\"\n assert dev_cert.subject.get_attributes_for_oid(NameOID.STATE_OR_PROVINCE_NAME)[0].value == \"Some State\"\n assert dev_cert.subject.get_attributes_for_oid(NameOID.COUNTRY_NAME)[0].value == \"PL\"\n\n assert dev_cert.not_valid_after == not_valid_after\n\n with open(dev_key_path, \"rb\") as fp:\n dev_private_key = serialization.load_pem_private_key(fp.read(), password=dev_passphrase.encode())\n assert (\n dev_cert.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1) ==\n dev_private_key.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1)\n )\n\n os.remove(ca_cert_path)\n os.remove(ca_key_path)\n os.remove(dev_cert_path)\n os.remove(dev_key_path)\n\ndef test_generate_cert_issuer_eq_subject():\n ca_cert_path = \"test_ca_certificate.crt\"\n ca_key_path = \"test_ca_key.key\"\n\n signing.generate_ca(\n ca_cert_path,\n ca_key_path,\n {\n \"CN\": \"Some Common Name\",\n \"O\": \"Some Org Name\",\n \"OU\": \"Some OU\",\n \"L\": \"Some Locality\",\n \"S\": \"Some State\",\n \"C\": \"PL\"\n },\n datetime.datetime.today() + datetime.timedelta(days=1)\n )\n assert os.path.exists(ca_cert_path)\n assert os.path.exists(ca_key_path)\n\n dev_cert_path = \"test_dev_certificate.crt\"\n dev_key_path = \"test_dev_key.key\"\n with pytest.raises(utils.KeyGenerationError):\n signing.generate_cert(\n ca_cert_path,\n ca_key_path,\n dev_cert_path,\n dev_key_path,\n {\n \"CN\": \"Some Common Name\",\n \"O\": \"Some Org Name\",\n \"OU\": \"Some OU\",\n \"L\": \"Some Locality\",\n \"S\": \"Some State\",\n \"C\": \"PL\"\n },\n datetime.datetime.today() + datetime.timedelta(days=1)\n )\n assert not os.path.exists(dev_cert_path)\n assert not os.path.exists(dev_key_path)\n\n os.remove(ca_cert_path)\n os.remove(ca_key_path)\n"
},
{
"alpha_fraction": 0.6051305532455444,
"alphanum_fraction": 0.6074209809303284,
"avg_line_length": 30.340740203857422,
"blob_id": "4132286f25e7b25697303014e0b5bbba9bcf1561",
"content_id": "713f681474023db42c2f833442fcc25264b4c6bb",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4366,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 135,
"path": "/dtcli/building.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport glob\r\nimport os\r\nimport os.path\r\nimport zipfile\r\nimport datetime\r\n\r\nimport yaml\r\n\r\nfrom . import utils\r\nfrom . import signing\r\nfrom . import __version__\r\n\r\nfrom .constants import EXTENSION_YAML, EXTENSION_ZIP, EXTENSION_ZIP_SIG\r\n\r\ndef _generate_build_comment():\r\n build_data = {\r\n \"Generator\": f\"dt-cli {__version__}\",\r\n \"Creation-time\": datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'\r\n }\r\n\r\n return '\\n'.join(': '.join(pair) for pair in build_data.items())\r\n\r\ndef _zip_extension(extension_dir_path, extension_zip_path):\r\n\r\n extension_yaml_path = os.path.join(extension_dir_path, EXTENSION_YAML)\r\n utils.require_file_exists(extension_yaml_path)\r\n\r\n utils.check_file_exists(extension_zip_path)\r\n print(\"Building %s from %s\" % (extension_zip_path, extension_dir_path))\r\n\r\n try:\r\n with zipfile.ZipFile(extension_zip_path, \"w\") as zf:\r\n\r\n for file_path in glob.glob(\r\n os.path.join(extension_dir_path, \"**\"), recursive=True\r\n ):\r\n if os.path.isdir(file_path):\r\n continue\r\n rel_path = os.path.relpath(file_path, extension_dir_path)\r\n print(\"Adding file: %s as %s\" % (file_path, rel_path))\r\n zf.write(file_path, arcname=rel_path)\r\n\r\n except Exception as e:\r\n print(e)\r\n raise\r\n\r\n else:\r\n print(\"Wrote %s file\" % extension_zip_path)\r\n\r\n\r\ndef _package(\r\n extension_dir_path,\r\n target_dir_path,\r\n extension_zip_path,\r\n extension_zip_sig_path,\r\n):\r\n extension_yaml_path = os.path.join(extension_dir_path, EXTENSION_YAML)\r\n with open(extension_yaml_path, \"r\") as fp:\r\n metadata = yaml.safe_load(fp)\r\n extension_file_name = \"%s-%s.zip\" % (\r\n metadata[\"name\"],\r\n metadata[\"version\"],\r\n )\r\n\r\n utils.require_extension_name_valid(extension_file_name)\r\n extension_file_name = extension_file_name.replace(\":\", \"_\")\r\n\r\n extension_file_path = os.path.join(target_dir_path, extension_file_name)\r\n utils.check_file_exists(extension_file_path)\r\n try:\r\n with zipfile.ZipFile(extension_file_path, \"w\") as zf:\r\n zf.comment = bytes(_generate_build_comment(), \"utf-8\")\r\n zf.write(extension_zip_path, arcname=EXTENSION_ZIP)\r\n zf.write(extension_zip_sig_path, arcname=EXTENSION_ZIP_SIG)\r\n except Exception as e:\r\n print(e)\r\n raise\r\n else:\r\n print(\"Wrote %s file\" % extension_file_path)\r\n\r\n\r\ndef build_extension(\r\n extension_dir_path,\r\n extension_zip_path,\r\n extension_zip_sig_path,\r\n target_dir_path,\r\n certificate_file_path,\r\n private_key_file_path,\r\n dev_passphrase=None,\r\n keep_intermediate_files=False,\r\n):\r\n try:\r\n utils.require_dir_exists(extension_dir_path)\r\n utils.require_dir_exists(target_dir_path)\r\n _zip_extension(extension_dir_path, extension_zip_path)\r\n signing.sign_file(\r\n extension_zip_path,\r\n extension_zip_sig_path,\r\n certificate_file_path,\r\n private_key_file_path,\r\n dev_passphrase\r\n )\r\n utils.require_file_exists(extension_zip_path)\r\n utils.require_file_exists(extension_zip_sig_path)\r\n _package(\r\n extension_dir_path,\r\n target_dir_path,\r\n extension_zip_path,\r\n extension_zip_sig_path,\r\n )\r\n if not keep_intermediate_files:\r\n utils.remove_files(\r\n [\r\n extension_zip_path,\r\n extension_zip_sig_path,\r\n ]\r\n )\r\n except utils.ExtensionBuildError:\r\n print(\"Failed to build extension! :-(\")\r\n else:\r\n print(\"Extension built successfuly! :-)\")\r\n"
},
{
"alpha_fraction": 0.530598521232605,
"alphanum_fraction": 0.6086079478263855,
"avg_line_length": 23.20339012145996,
"blob_id": "3d6b492867d3c2da8aad159a129deda9663dfeb8",
"content_id": "4f5791225040f1701ccc19185dc57e9b90772034",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 1487,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 59,
"path": "/pyproject.toml",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\r\nauthors = [\"Wiktor Bachnik <[email protected]>\", \"Vagiz Duseev <[email protected]>\"]\r\ndescription = \"Dynatrace CLI\"\r\ndocumentation = \"https://dt-cli.readthedocs.io\"\r\nhomepage = \"https://github.com/dynatrace-oss/dt-cli\"\r\nkeywords = [\"dynatrace\", \"cli\", \"extensions\"]\r\nlicense = \"Apache-2.0\"\r\nmaintainers = [\"Wiktor Bachnik <[email protected]>\", \"Vagiz Duseev <[email protected]>\"]\r\nname = \"dt-cli\"\r\npackages = [\r\n {include = \"dtcli\"},\r\n]\r\nreadme = \"README.md\"\r\nrepository = \"https://github.com/dynatrace-oss/dt-cli\"\r\nversion = \"1.0.0\"\r\n\r\n[tool.poetry.dependencies]\r\nPyYAML = \"^5.4.1\"\r\nasn1crypto = \"^1.4.0\"\r\nclick = \"^7.1.2\"\r\nclick-aliases = \"^1.0.1\"\r\ncryptography = \"^3.4.7\"\r\npython = \"^3.8\"\r\nwheel = \"^0.36.2\"\r\nrequests = \"^2.26.0\"\r\n\r\n[tool.poetry.dev-dependencies]\r\nSphinx = \"^3.5.4\"\r\nblack = {version = \"^20.8b1\", allow-prereleases = true}\r\nbump2version = \"^1.0.1\"\r\nflake8 = \"^3.9.1\"\r\nflake8-blind-except = \"^0.2.0\"\r\nflake8-bugbear = \"^21.4.3\"\r\nflake8-comprehensions = \"^3.4.0\"\r\nflake8-docstrings = \"^1.6.0\"\r\nflake8-import-order = \"^0.18.1\"\r\nflake8-polyfill = \"^1.0.2\"\r\nipython = \"^7.22.0\"\r\nmypy = \"^0.812\"\r\npydocstyle = \"^6.0.0\"\r\npyinstaller = \"^4.3\"\r\npytest = \"^5.2\"\r\npytest-black = \"^0.3.12\"\r\npytest-cov = \"^2.11.1\"\r\npytest-flake8 = \"^1.0.7\"\r\npytest-mock = \"^3.5.1\"\r\npytest-mypy = \"^0.8.1\"\r\nradon = \"^4.5.0\"\r\nsphinxcontrib-programoutput = \"^0.17\"\r\n\r\n[build-system]\r\nbuild-backend = \"poetry.core.masonry.api\"\r\nrequires = [\"poetry-core>=1.0.0\"]\r\n\r\n[tool.poetry.scripts]\r\ndt = 'dtcli.scripts.dt:main'\r\n\r\n[tool.black]\r\nline-length = 78\r\n"
},
{
"alpha_fraction": 0.7232304811477661,
"alphanum_fraction": 0.7341197729110718,
"avg_line_length": 38.814815521240234,
"blob_id": "b7cb79a3b7da24dded1d7a27142bf139e31f1eaf",
"content_id": "52edc4cf890b6ee43f155030c66b4fac2911a7f3",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1102,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 27,
"path": "/dtcli/constants.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport os.path\r\n\r\n\r\nEXTENSION_YAML = \"extension.yaml\"\r\nEXTENSION_ZIP = \"extension.zip\"\r\nEXTENSION_ZIP_SIG = \"extension.zip.sig\"\r\nDEFAULT_TARGET_PATH = os.path.curdir\r\nDEFAULT_EXTENSION_DIR = os.path.join(os.path.curdir, \"extension\")\r\nDEFAULT_DEV_CERT = os.path.join(os.path.curdir, \"developer.pem\")\r\nDEFAULT_DEV_KEY = os.path.join(os.path.curdir, \"developer.key\")\r\nDEFAULT_CA_CERT = os.path.join(os.path.curdir, \"ca.pem\")\r\nDEFAULT_CA_KEY = os.path.join(os.path.curdir, \"ca.key\")\r\nDEFAULT_CERT_VALIDITY = 365 * 3\r\n"
},
{
"alpha_fraction": 0.6467469930648804,
"alphanum_fraction": 0.6525301337242126,
"avg_line_length": 39.540000915527344,
"blob_id": "1db37d8eff5546977be994da7e55abf1076ae43a",
"content_id": "5577e057bd3914568c63cf1c8e07ea9cf327a792",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2075,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 50,
"path": "/dtcli/server_api.py",
"repo_name": "djelinski/dt-cli",
"src_encoding": "UTF-8",
"text": "# Copyright 2021 Dynatrace LLC\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport requests\r\n\r\nfrom . import utils as dtcliutils\r\n\r\n\r\ndef validate(extension_zip_file, tenant_url, api_token):\r\n url = f\"{tenant_url}/api/v2/extensions?validateOnly=true\"\r\n\r\n with open(extension_zip_file, \"rb\") as extzf:\r\n headers = {\r\n 'Accept': 'application/json; charset=utf-8',\r\n 'Authorization': f'Api-Token {api_token}'\r\n }\r\n try:\r\n response = requests.post(url, files={'file': (extension_zip_file, extzf, 'application/zip')}, headers=headers)\r\n response.raise_for_status()\r\n print(f\"Extension validation successful!\")\r\n except requests.exceptions.HTTPError as e:\r\n print(f\"Extension validation failed!\")\r\n raise dtcliutils.ExtensionValidationError(response.text)\r\n\r\ndef upload(extension_zip_file, tenant_url, api_token):\r\n url = f\"{tenant_url}/api/v2/extensions\"\r\n\r\n with open(extension_zip_file, \"rb\") as extzf:\r\n headers = {\r\n 'Accept': 'application/json; charset=utf-8',\r\n 'Authorization': f'Api-Token {api_token}'\r\n }\r\n try:\r\n response = requests.post(url, files={'file': (extension_zip_file, extzf, 'application/zip')}, headers=headers)\r\n response.raise_for_status()\r\n print(f\"Extension upload successful!\")\r\n except requests.exceptions.HTTPError as e:\r\n print(f\"Extension upload failed!\")\r\n raise dtcliutils.ExtensionValidationError(response.text)"
}
] | 13 |
rhythmswing/LinguisticAnalysis
|
https://github.com/rhythmswing/LinguisticAnalysis
|
2218f9278888a02fb4b99dae915c092ef8c12fd4
|
7e0c3bd8fc318a337ed287c26dfcac9f9641db9d
|
6b6a43df92865e23fa28dcb34de35a619703ce01
|
refs/heads/master
| 2021-05-14T14:35:00.715594 | 2018-01-02T05:27:12 | 2018-01-02T05:27:12 | 115,973,751 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5718954205513,
"alphanum_fraction": 0.5866013169288635,
"avg_line_length": 34.30769348144531,
"blob_id": "2a1abf29d6fb4aae27f3350b8530c68ad60863fb",
"content_id": "e8480fcf2add03be87f34fc9f0836385b3274320",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1836,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 52,
"path": "/origin.py",
"repo_name": "rhythmswing/LinguisticAnalysis",
"src_encoding": "UTF-8",
"text": "import urllib.request\nfrom lxml import etree\nfrom io import StringIO\nfrom textblob import TextBlob\nimport numpy as np\n\nimport time \nimport sys\n\ndef get_origin(word):\n #url = 'http://www.etymonline.com/word/%s' % word\n url = 'https://en.wiktionary.org/wiki/%s' % word\n # print(url)\n html = urllib.request.urlopen(url)\n context = html.read().decode('utf-8')\n\n# obtained from the website.\n# origin_path = '//*[@id=\"root\"]/div/div/div[3]/div/div/div[1]/div[2]/div/section/object/p[2]/text()[1]'\n #origin_path = '//*[@id=\"mw-content-text\"]/div/p[3]/span[1]/span/a'\n origin_path = '//*[@id=\"mw-content-text\"]/div/p[3]/span[1]/a'\n\n tree = etree.parse(StringIO(context), etree.HTMLParser())\n if len(tree.xpath(origin_path))>0:\n origin = tree.xpath(origin_path)[0].text\n else:\n origin = 'Not Found'\n return origin\n \ndef sample_origin(text, sample_size=100):\n words = TextBlob(text).words\n origins = []\n while len(origins) < sample_size:\n sys.stdout.write('\\r%f%%' % (100.0 * len(origins) / sample_size))\n sys.stdout.flush()\n index = np.random.choice(len(words))\n if words[index].isalpha() and len(words[index]) > 1: \n origin = get_origin(words[index].lower())\n if origin != 'Not Found':\n origins.append(origin)\n index+=1\n return origins\n\nif __name__ == '__main__':\n files = ['speeches.txt', 'DT.txt', 'DT&HC.txt', 'DT&HC_T.txt', 'DT&HC_H.txt']\n for filename in files:\n print('Filename: ' + filename)\n with open(filename, 'r+', encoding='utf-8') as f:\n text = f.read()\n origins = sample_origin(text, 100)\n set_origins = set(origins)\n for o in set_origins:\n print('Percentage for %s: %f' % (o, 1.0 * origins.count(o) / len(origins)))\n"
},
{
"alpha_fraction": 0.6388888955116272,
"alphanum_fraction": 0.6431623697280884,
"avg_line_length": 30.200000762939453,
"blob_id": "a9d9e9b699bd84cc0980e1c9e90b04b59d3e36c8",
"content_id": "269867bc19183b91ed7a31e10605f1ec7761a7c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 15,
"path": "/cloud.py",
"repo_name": "rhythmswing/LinguisticAnalysis",
"src_encoding": "UTF-8",
"text": "from wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport seaborn as sn\n\nfiles = ['speeches.txt', 'DT.txt', 'DT&HC.txt',\n 'DT&HC_T.txt', 'DT&HC_H.txt']\nfor filename in files:\n with open(filename,'r+', encoding='utf-8') as f:\n text = f.read()\n\n wordcloud = WordCloud().generate(text)\n sn.set(style='white')\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.savefig('cloud/'+filename[:-4]+'_cloud.pdf')\n"
},
{
"alpha_fraction": 0.5836237072944641,
"alphanum_fraction": 0.5844947695732117,
"avg_line_length": 25.113636016845703,
"blob_id": "dcf0e7b53ffed220c63853deabfb8ce0443df608",
"content_id": "f9ff51ed99218939ff587c535de3c19c41734bc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1148,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 44,
"path": "/lexical.py",
"repo_name": "rhythmswing/LinguisticAnalysis",
"src_encoding": "UTF-8",
"text": "from textblob import TextBlob\nfrom nltk.corpus import brown as b\nimport numpy as np\n\n\ndef analysis(text):\n corpus = text.split()\n for i, c in enumerate(corpus):\n corpus[i] = c.lower()\n lower = ' '.join(corpus)\n text = TextBlob(lower)\n for i, t in enumerate(text.words):\n corpus[i] = t.lemma\n lemma = ' '.join(corpus)\n text = TextBlob(lemma)\n\n brown_words = list(b.words())\n for i, c in enumerate(brown_words):\n brown_words[i] = c.lower()\n\n lower = ' '.join(brown_words)\n brown = TextBlob(lower)\n for i, t in enumerate(brown.words):\n brown_words[i] = t.lemma\n lemma = ' '.join(brown_words)\n brown = TextBlob(lemma)\n\n text_freq = []\n brown_freq = []\n vocabulary = set(text.words)\n for v in vocabulary:\n if v in brown.words:\n text_freq.append(text.words.count(v))\n brown_freq.append(brown.words.count(v))\n print(np.array(text_freq), np.array(brown_freq))\n\n\nif __name__ == '__main__':\n filename = ['DT.txt']\n for name in filename:\n with open(name, 'r+', encoding='utf-8') as f:\n text = f.read()\n\n analysis(text)"
},
{
"alpha_fraction": 0.6494201421737671,
"alphanum_fraction": 0.6556645631790161,
"avg_line_length": 27.743589401245117,
"blob_id": "72aa3c5b35a0723d78e20474d812656f8f921c43",
"content_id": "5607e8d8b0d680ae2997ade736db18174bae8c16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1121,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 39,
"path": "/semantics.py",
"repo_name": "rhythmswing/LinguisticAnalysis",
"src_encoding": "UTF-8",
"text": "from textblob import TextBlob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sn\n\n\ndef analyze(text, filename='analysis'):\n corpus = TextBlob(text)\n\n sentences = corpus.sentences\n sentiments = {'polarity': [], 'subjectivity': []}\n\n for sent in sentences:\n sentiments['polarity'].append(sent.sentiment.polarity)\n sentiments['subjectivity'].append(sent.sentiment.subjectivity)\n\n for s in sentiments:\n sentiments[s] = np.array(sentiments[s])\n\n print(sentiments['polarity'].mean(), sentiments['polarity'].std())\n print(sentiments['subjectivity'].mean(), sentiments['subjectivity'].std())\n\n plt.figure(figsize=(9, 9))\n plt.xlabel('Polarity')\n plt.ylabel('Density')\n sn.distplot(sentiments['polarity'], bins=10)\n plt.savefig(filename + '_polarity.pdf')\n\n\nsn.set(style='white', font_scale=3)\n\n\nfiles = ['speeches.txt', 'DT.txt', 'DT&HC.txt',\n 'DT&HC_T.txt', 'DT&HC_H.txt']\nfor filename in files:\n print('filename: ' + filename)\n with open(filename, 'r+', encoding='utf-8') as f:\n text = f.read()\n analyze(text, filename[:-4])\n"
}
] | 4 |
gitdlam/random-crap
|
https://github.com/gitdlam/random-crap
|
7dcf48f02e0350a1eba917ef2d56bb9ed8d5b8f2
|
b1ec1eccf0aca5401b7df2707faa65d178864e07
|
412f22fe2e18c73f74110ab141c88bb881ce9b71
|
refs/heads/master
| 2020-05-17T18:10:27.768045 | 2014-03-18T02:20:14 | 2014-03-18T02:20:14 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5782185792922974,
"alphanum_fraction": 0.5920658707618713,
"avg_line_length": 34.53333282470703,
"blob_id": "b836f5b42154f17df9afd7cbdc08768ea56c7d9a",
"content_id": "405d2206d14b37fca9e4921764a4c809ffcceb53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2672,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 75,
"path": "/match_left_right.py",
"repo_name": "gitdlam/random-crap",
"src_encoding": "UTF-8",
"text": "# input \"left.txt\" list of names\n# input \"right.txt\" list of names\n# input \"ignore.txt\" list of names (from output common_words100.py ran for both lists, plus manual edits)\n# input \"common.txt\" list of names (from output common_words100.py ran for both lists, plus manual edits)\n# output ALL of \"left.txt\" in original order, plus extra column of possible matches with \"right.txt\"\n#\nfrom collections import Counter\n#from itertools import chain, combinations\nfrom sets import Set\n\nright_lines0 = [line.strip() for line in open('right.txt')]\nleft_lines0 = [line.strip() for line in open('left.txt')]\n\n\nignore = [line.strip() for line in open('ignore.txt')]\nignore_set = Set(ignore)\ncommon = [line.strip() for line in open('common.txt')]\ncommon_set = Set(common)\n\nright_lines1 = {}\nright_lines2 = []\nright_sets = {}\nfor i, val in enumerate(right_lines0):\n right_lines2.append(val.lower())\n right_lines1[val.lower()] = val\n right_set = Set(val.lower().split()) - ignore_set\n right_sets[val.lower()] = right_set\n\n \nleft_lines1 = {}\nleft_lines2 = []\nresult = {}\n\nfor i, val in enumerate(left_lines0):\n result[val.lower()] = []\n \nfor i, val in enumerate(left_lines0):\n left_lines2.append(val.lower())\n left_lines1[val.lower()] = val\n \n left_set = Set(val.lower().split()) - ignore_set \n if len(left_set) == 0:\n left_set = Set(val.lower().split())\n if len(left_set) >= 2:\n if len(left_set) > len(left_set - common_set) and len(left_set - common_set) > 0:\n left_set = left_set - common_set \n left_set_len = len(left_set) \n best_match = 0\n for j, right_val in enumerate(right_lines2):\n right_set = right_sets[right_val]\n \n match_set = left_set & right_set\n\n match_set_len = len(match_set)\n \n if match_set_len > best_match:\n result[val.lower()] = [right_lines1[right_val]]\n best_match = match_set_len \n elif left_set_len == match_set_len:\n result[val.lower()].append(right_lines1[right_val])\n best_match = match_set_len\n elif left_set_len in (2,3) and match_set_len ==1: \n if best_match <= 1: \n result[val.lower()].append(right_lines1[right_val])\n best_match = 1\n elif match_set_len >= 2: \n if best_match <= 2: \n result[val.lower()].append(right_lines1[right_val])\n best_match = match_set_len\n \nfor i in left_lines2:\n output_line = left_lines1[i] + \"\\t\"\n for j in result[i]:\n output_line = output_line + \"|||||\" + j\n print output_line \n"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.6719456911087036,
"avg_line_length": 24.705883026123047,
"blob_id": "a20165ec4e3e431dd6375abe5586d19e980031b7",
"content_id": "805afe9dd2cdb1c94a4191ea5e6f15c03adb64a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 17,
"path": "/common_words100.py",
"repo_name": "gitdlam/random-crap",
"src_encoding": "UTF-8",
"text": "# input are names of 1 or more words. 1 name per line.\n# outputs the 100 most common words. 1 word per line.\n#\nfrom collections import Counter\nfrom itertools import chain, combinations\n\nlines = [line.strip().lower() for line in open('data.txt')]\n\nlines_flat = []\n\nfor i in lines:\n lines_flat.extend(i.split(\" \"))\n \ncounter = Counter(lines_flat)\nresult = counter.most_common(100)\nfor i in result:\n print i[0] + \"\\t\" + str(i[1])\n \n"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6122449040412903,
"avg_line_length": 11.25,
"blob_id": "7efc78c932c5e1a877d772c154c0ed26a5b74ebd",
"content_id": "02b18e91aec515e5176f20f2e9f4b445af5fcfa1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 49,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 4,
"path": "/README.md",
"repo_name": "gitdlam/random-crap",
"src_encoding": "UTF-8",
"text": "random-crap\n===========\n\nHelper scripts for work\n"
}
] | 3 |
ishpreet09/Pro-C105StandardDeviation
|
https://github.com/ishpreet09/Pro-C105StandardDeviation
|
086bcf9e451d3789ba981ae038eb9f181a3e4369
|
fc86ef2d32fc6ffdfb29f4f0f0c5bcbfbf452403
|
fe936b9344144ace59e1611f86143f5f46f395b6
|
refs/heads/master
| 2023-06-15T14:12:07.050689 | 2021-07-19T10:39:39 | 2021-07-19T10:39:39 | 387,428,067 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5983871221542358,
"alphanum_fraction": 0.6112903356552124,
"avg_line_length": 16.84848403930664,
"blob_id": "411c9fb8c24a10daa6aa268cea087407c96a7b8d",
"content_id": "3c4c7b4fbef2994f8bf9c08a3509beb7c968ce25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 620,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 33,
"path": "/Pro-C105StandardDeviation/StandardDeviation.py",
"repo_name": "ishpreet09/Pro-C105StandardDeviation",
"src_encoding": "UTF-8",
"text": "import math\r\nimport csv\r\n\r\nwith open(\"D:/DATA DESKTOP/Notes Of Code/Python/Homework/Pro-C105StandardDeviation/data.csv\", newline='') as f:\r\n reader=csv.reader(f)\r\n file_data=list(reader)\r\n\r\ndata = file_data[0]\r\n\r\ndef mean(data):\r\n n=len(data)\r\n total=0\r\n for x in data:\r\n total += int(x)\r\n \r\n mean=total / n\r\n return mean\r\n\r\nsquaredList=[]\r\nfor number in data:\r\n a = int(number) - mean(data)\r\n a= a**2\r\n squaredList.append(a)\r\n\r\n#getting sum\r\nsum =0\r\nfor i in squaredList:\r\n sum =sum + i\r\n\r\nresult = sum/ (len(data)-1)\r\n\r\nstd_deviation = math.sqrt(result)\r\nprint(std_deviation)"
}
] | 1 |
roastduck/mnist
|
https://github.com/roastduck/mnist
|
240e0d49057a66d2bf58ab05ba7c1d8bd1afb3cd
|
a69263687e5f5b00522360554690d8fd650b6b39
|
5c3bb85cb648c7720462083500e0bb7d22f846be
|
refs/heads/master
| 2021-03-19T17:42:23.980939 | 2017-06-09T11:50:01 | 2017-06-09T11:50:01 | 92,031,327 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5753726363182068,
"alphanum_fraction": 0.5997628569602966,
"avg_line_length": 39.4315071105957,
"blob_id": "35c3a440df9c6f175088b23c1faf6106579193c6",
"content_id": "86788167cd8dd385c43158059bfd21b3327f5254",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5904,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 146,
"path": "/src/cnn.py",
"repo_name": "roastduck/mnist",
"src_encoding": "UTF-8",
"text": "import sys\nimport json\nimport tensorflow as tf\n\nimport inout\nimport disturb\n\ndef weightVar(shape):\n ''' Generate variables as weight '''\n\n with tf.name_scope('weight'):\n ret = tf.Variable(tf.truncated_normal(shape, stddev = 0.1))\n # tf.summary.histogram('histogram', ret)\n return ret\n\ndef biasVar(shape):\n ''' Generate variables as bias '''\n\n with tf.name_scope('bias'):\n ret = tf.Variable(tf.constant(0.1, shape = shape))\n # tf.summary.histogram('histogram', ret)\n return ret\n\ndef conv2d(x, W):\n ''' 2D convolutional operator '''\n\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef maxPool2x2(x):\n ''' 2*2 max-pooling operator '''\n\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\ndef convLayer(x, height, width, inChannels, outChannels, name):\n ''' Convolutional layer '''\n\n with tf.name_scope(name):\n weight = weightVar([height, width, inChannels, outChannels])\n bias = biasVar([outChannels])\n conv = tf.nn.elu(conv2d(x, weight) + bias)\n return maxPool2x2(conv)\n\ndef denseLayer(x, inChannels, outChannels, name):\n ''' Densely connected layer '''\n\n with tf.name_scope(name):\n weight = weightVar([inChannels, outChannels])\n bias = biasVar([outChannels])\n return tf.nn.elu(tf.matmul(x, weight) + bias)\n\ndef outLayer(x, inChannels, outChannels):\n ''' Output layer '''\n\n with tf.name_scope('output'):\n weight = weightVar([inChannels, outChannels])\n bias = biasVar([outChannels])\n return tf.matmul(x, weight) + bias\n\ndef run(action, expId = None, runId = None, stepId = None):\n if action == 'train':\n with open(\"experiments/%s/%s/conf.json\"%(expId, runId)) as f:\n conf = json.load(f)\n assert \"startEpisode\" in conf\n assert \"endEpisode\" in conf\n assert \"learningRate\" in conf\n assert \"fromCheckpoint\" in conf\n\n x = tf.placeholder(tf.float32, shape=[None, 784], name = 'x')\n _y = tf.placeholder(tf.float32, shape=[None, 10], name = 'y_true')\n xImage = tf.reshape(x, [-1,28,28,1])\n\n keepProb = tf.placeholder(tf.float32, name = 'keepProb')\n conv1 = tf.nn.dropout(convLayer(xImage, 5, 5, 1, 32, 'conv1'), keepProb)\n conv2 = tf.nn.dropout(convLayer(conv1, 5, 5, 32, 64, 'conv2'), keepProb)\n conv2Flat = tf.reshape(conv2, [-1, 7 * 7 * 64])\n dense1 = tf.nn.dropout(denseLayer(conv2Flat, 7 * 7 * 64, 1024, 'dense1'), keepProb)\n\n y = outLayer(dense1, 1024, 10)\n entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=_y, logits=y))\n if action == 'train':\n optimizer = tf.train.AdamOptimizer(conf[\"learningRate\"]).minimize(entropy)\n output = tf.argmax(y, 1)\n _output = tf.argmax(_y, 1)\n correct = tf.equal(output, _output)\n trainError = 1 - tf.reduce_mean(tf.cast(correct, tf.float32))\n validateError = 1 - tf.reduce_mean(tf.cast(correct, tf.float32)) # Call either this two\n trainSummary = tf.summary.scalar('trainError', trainError)\n validateSummary = tf.summary.scalar('validateError', validateError)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n if action == 'train':\n summaryWriter = tf.summary.FileWriter('experiments/%s/%s/logs'%(expId, runId), sess.graph)\n # summaries = tf.summary.merge_all()\n\n saver = tf.train.Saver(tf.global_variables(), max_to_keep = None)\n\n if action == 'train':\n if action == 'train' and conf[\"fromCheckpoint\"] is not None:\n saver.restore(sess, \"experiments/%s/%s/checkpoint%s\"%(expId, runId - 1, conf[\"fromCheckpoint\"]))\n\n trainData, validateData = inout.DataGetter(50, 5000)\n for i, trainBatch in zip(range(conf[\"startEpisode\"], conf[\"endEpisode\"]), trainData):\n if (i + 1) % 100 == 0:\n validateBatch = next(validateData)\n errT, summT = sess.run((trainError, trainSummary), feed_dict = {x: trainBatch[0], _y: trainBatch[1], keepProb: 1.0})\n errV, summV = sess.run((validateError, validateSummary), feed_dict = {x: validateBatch[0], _y: validateBatch[1], keepProb: 1.0})\n print(\"Step %d, training error %g, validating error %g\"%(i, errT, errV))\n summaryWriter.add_summary(summT, i)\n summaryWriter.add_summary(summV, i)\n if (i + 1) % 1000 == 0:\n saver.save(sess, \"experiments/%s/%s/checkpoint%s\"%(expId, runId, i))\n\n disturb.disturbBatch(trainBatch[0])\n sess.run(optimizer, feed_dict = {x: trainBatch[0], _y: trainBatch[1], keepProb: 0.75})\n else:\n if action == 'test':\n saver.restore(sess, \"experiments/%s/%s/checkpoint%s\"%(expId, runId, stepId))\n else:\n saver.restore(sess, \"../data/cnn_final\")\n imgId = 0\n with open('../data/submission.csv', 'w') as f:\n f.write('ImageId,Label\\n')\n for batch in inout.TestGetter():\n for res in sess.run(output, feed_dict = {x: batch, keepProb: 1}):\n imgId += 1\n f.write('%d,%d\\n'%(imgId, res))\n\nif __name__ == '__main__':\n if not (len(sys.argv) == 4 and sys.argv[1] == 'train') and not (len(sys.argv) == 5 and sys.argv[1] == 'test') and not (len(sys.argv) == 2 and sys.argv[1] == 'run'):\n print(\"Usage:\")\n print(\"Train a network and save to checkpoint:\")\n print(\" python3 cnn.py train <experimentID> <runID>\")\n print(\"Test a trained checkpoint:\")\n print(\" python3 cnn.py test <experimentID> <runID> <stepID>\")\n print(\"Run the final network:\")\n print(\" python3 cnn.py run\")\n exit(0)\n\n if sys.argv[1] == 'train':\n run('train', sys.argv[2], int(sys.argv[3]))\n elif sys.argv[1] == 'test':\n run('test', sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))\n else:\n run('run')\n\n"
},
{
"alpha_fraction": 0.6012207269668579,
"alphanum_fraction": 0.609359085559845,
"avg_line_length": 38.29999923706055,
"blob_id": "4f5afa3d45d3d57fa461780061e909304f287ac4",
"content_id": "1b702adf1d6edea2d3ff46014866c335d7236452",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1966,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 50,
"path": "/src/inout.py",
"repo_name": "roastduck/mnist",
"src_encoding": "UTF-8",
"text": "import random\nimport itertools\nimport numpy # Should be imported after random and itertools\n\ndef oneHot(size, key):\n return numpy.array([i == key for i in range(size)])\n\ndef DataGetter(trainSize, validateSize):\n ''' Get a random sample from train.csv each time\n @param trainSize, validateSize: batch size for training set and validation set correspondingly.\n `trainSize` means sampled size and `validateSize` means valid size\n @return : (training getter, validating getter) '''\n\n f = open('../data/train.csv')\n next(f) # The first line is header\n lines = map(lambda row: row.strip().split(','), f)\n dataset = list(map(lambda row: (numpy.array(tuple(map(float, row[1:]))), oneHot(10, int(row[0]))), lines))\n del f\n trainset = dataset[validateSize:]\n validateset = dataset[:validateSize]\n random.shuffle(trainset)\n\n def subGetter(dataset, batchSize): # batchSize = None means whole set\n if batchSize is None:\n batchSize = len(dataset)\n assert batchSize <= len(dataset)\n pos = 0\n while True:\n _pos = (pos + batchSize) % len(dataset)\n if pos < _pos:\n batch = itertools.islice(dataset, pos, _pos)\n elif pos > _pos:\n batch = itertools.chain(itertools.islice(dataset, pos, len(dataset)), itertools.islice(dataset, 0, _pos))\n else:\n batch = dataset\n pos = _pos\n\n yield list(map(numpy.array, zip(*batch)))\n\n return subGetter(trainset, trainSize), subGetter(validateset, None)\n\ndef TestGetter():\n f = open('../data/test.csv')\n next(f) # The first line is header\n lines = map(lambda row: row.strip().split(','), f)\n dataset = list(map(lambda row: numpy.array(tuple(map(float, row))), lines))\n del f\n\n for batch in [dataset[i * 500 : (i + 1) * 500] for i in range(len(dataset) // 500)]:\n yield numpy.array(batch)\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7730769515037537,
"avg_line_length": 32.91304397583008,
"blob_id": "cca9638193b8197abf76db3f759e528b8454f117",
"content_id": "4c59944e8f005ce298a243206c05ee69334abb85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1400,
"license_type": "no_license",
"max_line_length": 246,
"num_lines": 23,
"path": "/README.md",
"repo_name": "roastduck/mnist",
"src_encoding": "UTF-8",
"text": "# MNIST Experiment\n\n源代码见`src`文件夹,文档见`doc/document.pdf`,`data/submission_final_cnn.csv`是用CNN模型运行出来的最佳结果,`data/submission_final_mlp.csv`是用MLP模型运行出来的最佳结果。程序运行方法如下:\n\n## 需求\n\n1. Python >= 3.5\n2. Tensorflow >= 1.1 (低于1.1将导致CNN模型的预处理部分无法运行)\n\n## 用已训练网络生成结果\n\n首先切换到`src`文件夹,然后使用相应模型运行:\n\n1. 使用CNN模型(效果最佳),运行`python3 cnn.py run`,或\n2. 使用MLP模型,运行`python3 mlp.py run`。\n\n结果将保存在`data/submission.csv`\n\n## 训练网络(非必须)\n\n程序设计为一次连续的训练分为多次运行,每次运行从上一次运行保存的保存点接着进行。首先切换到`src`文件夹,建立`src/experiments/<训练ID>/<运行ID>`目录,创建`src/experiments/<训练ID>/<运行ID>/conf.json`配置文件,设定\"startEpisode\"、\"endEpisode\"表示起止训练轮数,\"learningRate\"表示学习率、\"fromCheckpoint\"表示从<运行ID-1>目录的上一次运行中的哪一个保存点开始继续训练。\n\n以CNN为例执行`python3 cnn.py train <训练ID> <运行ID>`训练,每100轮训练会保存一个保存点到相应文件夹,执行`python3 cnn.py test <训练ID> <运行ID> <保存点ID>`以某个保存点的网络生成结果。MLP类似。\n"
},
{
"alpha_fraction": 0.5768501162528992,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 34.06666564941406,
"blob_id": "02ce17437b36b5b7a644f616835b2087f904b1a2",
"content_id": "b73380693424eb2729326cde8dda4df76cabfd9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 527,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 15,
"path": "/src/disturb.py",
"repo_name": "roastduck/mnist",
"src_encoding": "UTF-8",
"text": "import numpy\nimport tensorflow.contrib.keras as keras\n\ndef disturb(img):\n mat = numpy.reshape(img, (1, 28, 28))\n\n mat = keras.preprocessing.image.random_zoom(mat, (0.8, 1.2), 1, 2, 0, 'constant', 0.0)\n mat = keras.preprocessing.image.random_rotation(mat, 20.0, 1, 2, 0, 'constant', 0.0)\n mat = keras.preprocessing.image.random_shift(mat, 0.15, 0.15, 1, 2, 0, 'constant', 0.0)\n\n return numpy.reshape(mat, (784,))\n\ndef disturbBatch(batch):\n for i in range(batch.shape[0]):\n batch[i] = disturb(batch[i])\n\n"
}
] | 4 |
HaoranZhao1988/DDAD
|
https://github.com/HaoranZhao1988/DDAD
|
70862ee93afae1e7014e0f814543338b804a8770
|
ef9d7a3705aafbbc7378c2566aac5d32eb46d2c9
|
6c11efd4a89904222d01f32d9789d738e19b2077
|
refs/heads/master
| 2023-07-27T00:41:08.960775 | 2021-09-13T01:26:44 | 2021-09-13T01:26:44 | 405,786,262 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6698229908943176,
"alphanum_fraction": 0.6858789920806885,
"avg_line_length": 35.818180084228516,
"blob_id": "68ed034256bec6ccf11d864adfa37bd6e83c1cf5",
"content_id": "ff1034872bff89b065d98d8f2fb3997e3f3e052d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2429,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 66,
"path": "/network/segmentation/deeplabv3.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from torchvision.models._utils import IntermediateLayerGetter\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom .mobilenet import MobileNetV2\nfrom ._deeplab import DeepLabHead, DeepLabV3\nfrom torchvision.models.segmentation.fcn import FCN, FCNHead\nimport torch.nn as nn\nfrom torchvision.models import resnet\n\ndef _segm_mobilenet(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n backbone = MobileNetV2(pretrained=pretrained_backbone)\n\n return_layers = {'features': 'out'}\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n model_map = {\n 'deeplab': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n\n inplanes = 320\n classifier = model_map[name][0](inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, None)\n return model\n\n\ndef _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n\n return_layers = {'layer4': 'out'}\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n model_map = {\n 'deeplab': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n inplanes = 2048\n classifier = model_map[name][0](inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, None)\n return model\n\ndef deeplabv3_mobilenet(progress=True,num_classes=21, aux_loss=None, dropout_p=0.0, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n model = _segm_mobilenet(\"deeplab\", \"mobilenet_v2\", num_classes, aux_loss, **kwargs)\n for m in model.modules():\n if isinstance(m, nn.Dropout):\n m.p = dropout_p\n return model\n\ndef deeplabv3_resnet50(progress=True, num_classes=21, dropout_p=0.0, aux_loss=None, **kwargs):\n model = _segm_resnet(\"deeplab\", backbone_name='resnet50', num_classes=num_classes, aux=aux_loss, **kwargs)\n for m in model.modules():\n if isinstance(m, nn.Dropout):\n m.p = dropout_p\n return model"
},
{
"alpha_fraction": 0.6013289093971252,
"alphanum_fraction": 0.6096345782279968,
"avg_line_length": 25.217391967773438,
"blob_id": "e1d898624041fbcf83c89c542a70ecb66994096c",
"content_id": "e5525efcaef1d2f48df3a36f74df401637702a35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 602,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 23,
"path": "/get_results.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "import argparse\nimport csv\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--log', type=str)\n\nargs = parser.parse_args()\n\nbest_acc_list = []\nacc_array = []\nmax_acc =0.0\nwith open(args.log, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n row = list(map(float, row))\n best_acc = np.max(row)\n best_acc_list.append(best_acc)\n acc_array.append(row)\n if best_acc > max_acc:\n max_acc = best_acc\n print(best_acc_list)\n print(\"Mean=%.4f, Std=%.4f, Best=%.4f\"%(np.mean(best_acc_list), np.std( best_acc_list), max_acc))"
},
{
"alpha_fraction": 0.4699796736240387,
"alphanum_fraction": 0.5072972178459167,
"avg_line_length": 53.125,
"blob_id": "f8957fb279f9d4d866eaddeb229931cc6129387a",
"content_id": "a5760523d71f8e873bcaa75e55563cacdf1a9e08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10826,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 200,
"path": "/dataloader.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from torchvision import datasets, transforms\nimport torch\nimport os\nfrom dataset.cityscapes1 import Cityscapes\nfrom dataset.caltech import Caltech101\nfrom dataset.camvid import CamVid\nfrom dataset.voc import VOCSegmentation\nfrom dataset.nyu import NYUv2, NYUv2Depth\nfrom utils import ext_transforms\n\n\n\ndef get_dataloader(args):\n if args.dataset.lower()=='mnist':\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_root, train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_root, train=False, download=True,\n transform=transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n\n elif args.dataset.lower()=='cifar10':\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(args.data_root, train=True, download=True,\n transform=transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(args.data_root, train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n elif args.dataset.lower()=='cifar100':\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(args.data_root, train=True, download=True,\n transform=transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(args.data_root, train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n elif args.dataset.lower()=='caltech101':\n train_loader = torch.utils.data.DataLoader(\n Caltech101(args.data_root, train=True, download=args.download,\n transform=transforms.Compose([\n transforms.Resize(128),\n transforms.RandomCrop(128),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n Caltech101(args.data_root, train=False, download=args.download,\n transform=transforms.Compose([\n transforms.Resize(128),\n transforms.CenterCrop(128),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))\n ])),\n batch_size=args.test_batch_size, shuffle=False, num_workers=2)\n elif args.dataset.lower()=='imagenet':\n transform_train = transforms.Compose([\n transforms.RandomResizedCrop(128),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n ])\n transform_test = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(128),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n ])\n\n train_val_dataset_dir = os.path.join(args.data_root, \"train\")\n test_dataset_dir = os.path.join(args.data_root, \"val\")\n\n trainset = datasets.ImageFolder(root=train_val_dataset_dir, transform=transform_train)\n valset = datasets.ImageFolder(root=test_dataset_dir, transform=transform_test)\n\n train_loader = torch.utils.data.DataLoader(trainset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=5,\n pin_memory=True)\n test_loader = torch.utils.data.DataLoader(valset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=5,\n pin_memory=True)\n\n ########### Segmentation\n elif args.dataset.lower()=='camvid':\n print(args.data_root)\n train_loader = torch.utils.data.DataLoader(\n CamVid(args.data_root, split='train',\n transform=ext_transforms.ExtCompose([\n ext_transforms.ExtResize(256),\n ext_transforms.ExtRandomCrop(128, pad_if_needed=True),\n ext_transforms.ExtRandomHorizontalFlip(),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize((0.5,), (0.5,))\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n CamVid(args.data_root, split='test',\n transform=ext_transforms.ExtCompose([\n ext_transforms.ExtResize(256),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize((0.5,), (0.5,))\n ])),\n batch_size=args.test_batch_size, shuffle=False, num_workers=2)\n elif args.dataset.lower() in ['nyuv2']:\n train_loader = torch.utils.data.DataLoader(\n NYUv2(args.data_root, split='train',\n transform=ext_transforms.ExtCompose([\n ext_transforms.ExtResize(256),\n ext_transforms.ExtRandomCrop(128, pad_if_needed=True),\n ext_transforms.ExtRandomHorizontalFlip(),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize((0.5,), (0.5,))\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n NYUv2(args.data_root, split='test',\n transform=ext_transforms.ExtCompose([\n ext_transforms.ExtResize(256),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize((0.5,), (0.5,))\n ])),\n batch_size=args.test_batch_size, shuffle=False, num_workers=2)\n elif args.dataset.lower() in ['cityscapes']:\n train_loader = torch.utils.data.DataLoader(\n Cityscapes(args.data_root, split='train',\n transform=ext_transforms.ExtCompose([\n ext_transforms.ExtResize(256),\n ext_transforms.ExtRandomCrop(128, pad_if_needed=True),\n ext_transforms.ExtRandomHorizontalFlip(),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize((0.5,), (0.5,))\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n Cityscapes(args.data_root, split='val',\n transform=ext_transforms.ExtCompose([\n ext_transforms.ExtResize(256),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize((0.5,), (0.5,))\n ])),\n batch_size=args.test_batch_size, shuffle=False, num_workers=2)\n\n elif args.dataset.lower() in ['voc2012']:\n train_loader = torch.utils.data.DataLoader(\n VOCSegmentation(args.data_root, image_set='train',\n transform=ext_transforms.ExtCompose([\n # ext_transforms.ExtRandomScale((0.5, 2.0)),\n # ext_transforms.ExtRandomCrop(513, pad_if_needed=True),\n ext_transforms.ExtResize(256),\n ext_transforms.ExtRandomCrop(128, pad_if_needed=True),\n ext_transforms.ExtRandomHorizontalFlip(),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(\n VOCSegmentation(args.data_root, image_set='val',\n transform=ext_transforms.ExtCompose([\n ext_transforms.ExtResize(256),\n ext_transforms.ExtCenterCrop(224),\n ext_transforms.ExtToTensor(),\n ext_transforms.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])),\n batch_size=args.test_batch_size, shuffle=False, num_workers=2)\n\n return train_loader, test_loader\n\n"
},
{
"alpha_fraction": 0.5068529844284058,
"alphanum_fraction": 0.5568707585334778,
"avg_line_length": 37.882354736328125,
"blob_id": "1cd3eec0f39a43dfce3a252a70f7ad8eb12a7fcf",
"content_id": "4fd08b23602bbc3773cf9529e7e7638c7562e6db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11236,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 289,
"path": "/network/segmentation/segnet.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "import torch.nn as nn\n\nfrom .utils import segnetDown2, segnetDown3, segnetDown4, segnetUp2, segnetUp3,segnetUp4\nfrom torchvision.models import vgg\nimport torch\n\nclass SegNetVgg16(nn.Module):\n def __init__(self, num_classes=21, in_channels=3, is_unpooling=True, pretrained_backbone=False):\n super(SegNetVgg16, self).__init__()\n #self.num_classes = num_classes\n self.in_channels = in_channels\n self.is_unpooling = is_unpooling\n\n self.down1 = segnetDown2(self.in_channels, 64)\n self.down2 = segnetDown2(64, 128)\n self.down3 = segnetDown3(128, 256)\n self.down4 = segnetDown3(256, 512)\n self.down5 = segnetDown3(512, 512)\n\n self.up5 = segnetUp3(512, 512)\n self.up4 = segnetUp3(512, 256)\n self.up3 = segnetUp3(256, 128)\n self.up2 = segnetUp2(128, 64)\n self.up1 = segnetUp2(64, num_classes)\n\n #for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight)\n # nn.init.constant_(m.bias, 0)\n # elif isinstance(m, nn.BatchNorm2d):\n # nn.init.constant_(m.weight, 1)\n # nn.init.constant_(m.bias, 0)\n #self.init_vgg16_params()\n\n if pretrained_backbone==True:\n self.init_vgg16_params()\n\n\n def forward(self, inputs, positive_out=False):\n\n down1, indices_1, unpool_shape1 = self.down1(inputs)\n down2, indices_2, unpool_shape2 = self.down2(down1)\n down3, indices_3, unpool_shape3 = self.down3(down2)\n down4, indices_4, unpool_shape4 = self.down4(down3)\n down5, indices_5, unpool_shape5 = self.down5(down4)\n\n up5 = self.up5(down5, indices_5, unpool_shape5)\n up4 = self.up4(up5, indices_4, unpool_shape4)\n up3 = self.up3(up4, indices_3, unpool_shape3)\n up2 = self.up2(up3, indices_2, unpool_shape2)\n up1 = self.up1(up2, indices_1, unpool_shape1)\n if positive_out:\n return torch.relu( up1 )\n return up1\n\n def init_vgg16_params(self):\n\n vgg16 = vgg.vgg16_bn(pretrained=True)\n\n blocks = [self.down1, self.down2, self.down3, self.down4, self.down5]\n\n features = list(vgg16.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n elif isinstance(_layer, nn.BatchNorm2d):\n vgg_layers.append(_layer)\n \n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit]\n else:\n units = [\n conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit,\n ]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n elif isinstance(_layer, nn.BatchNorm2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n elif isinstance(l1, nn.BatchNorm2d) and isinstance(l2, nn.BatchNorm2d):\n l2.running_mean.data = l1.running_mean.data\n l2.running_var.data = l1.running_var.data\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n\nclass SegNetVgg19(nn.Module):\n def __init__(self, num_classes=21, in_channels=3, is_unpooling=True, pretrained_backbone=False):\n super(SegNetVgg19, self).__init__()\n #self.num_classes = num_classes\n self.in_channels = in_channels\n self.is_unpooling = is_unpooling\n\n self.down1 = segnetDown2(self.in_channels, 64)\n self.down2 = segnetDown2(64, 128)\n self.down3 = segnetDown4(128, 256)\n self.down4 = segnetDown4(256, 512)\n self.down5 = segnetDown4(512, 512)\n\n self.up5 = segnetUp4(512, 512)\n self.up4 = segnetUp4(512, 256)\n self.up3 = segnetUp4(256, 128)\n self.up2 = segnetUp2(128, 64)\n self.up1 = segnetUp2(64, num_classes)\n\n #for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight)\n # nn.init.constant_(m.bias, 0)\n # elif isinstance(m, nn.BatchNorm2d):\n # nn.init.constant_(m.weight, 1)\n # nn.init.constant_(m.bias, 0)\n if pretrained_backbone==True:\n self.init_vgg19_params()\n \n\n def forward(self, inputs, positive_out=False):\n\n down1, indices_1, unpool_shape1 = self.down1(inputs)\n down2, indices_2, unpool_shape2 = self.down2(down1)\n down3, indices_3, unpool_shape3 = self.down3(down2)\n down4, indices_4, unpool_shape4 = self.down4(down3)\n down5, indices_5, unpool_shape5 = self.down5(down4)\n\n up5 = self.up5(down5, indices_5, unpool_shape5)\n up4 = self.up4(up5, indices_4, unpool_shape4)\n up3 = self.up3(up4, indices_3, unpool_shape3)\n up2 = self.up2(up3, indices_2, unpool_shape2)\n up1 = self.up1(up2, indices_1, unpool_shape1)\n if positive_out:\n return torch.relu( up1 )\n return up1\n\n def init_vgg19_params(self):\n\n vgg19 = vgg.vgg19_bn(pretrained=True)\n\n blocks = [self.down1, self.down2, self.down3, self.down4, self.down5]\n\n features = list(vgg19.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n elif isinstance(_layer, nn.BatchNorm2d):\n vgg_layers.append(_layer)\n \n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit]\n else:\n units = [\n conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit,\n conv_block.conv4.cbr_unit,\n ]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n elif isinstance(_layer, nn.BatchNorm2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n elif isinstance(l1, nn.BatchNorm2d) and isinstance(l2, nn.BatchNorm2d):\n l2.running_mean.data = l1.running_mean.data\n l2.running_var.data = l1.running_var.data\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n\n\nclass SegNetVgg13(nn.Module):\n def __init__(self, num_classes=21, in_channels=3, is_unpooling=True, pretrained_backbone=False):\n super(SegNetVgg13, self).__init__()\n #self.num_classes = num_classes\n self.in_channels = in_channels\n self.is_unpooling = is_unpooling\n\n self.down1 = segnetDown2(self.in_channels, 64)\n self.down2 = segnetDown2(64, 128)\n self.down3 = segnetDown2(128, 256)\n self.down4 = segnetDown2(256, 512)\n self.down5 = segnetDown2(512, 512)\n\n self.up5 = segnetUp2(512, 512)\n self.up4 = segnetUp2(512, 256)\n self.up3 = segnetUp2(256, 128)\n self.up2 = segnetUp2(128, 64)\n self.up1 = segnetUp2(64, num_classes)\n\n #for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight)\n # nn.init.constant_(m.bias, 0)\n # elif isinstance(m, nn.BatchNorm2d):\n # nn.init.constant_(m.weight, 1)\n # nn.init.constant_(m.bias, 0)\n if pretrained_backbone==True:\n self.init_vgg13_params()\n \n\n def forward(self, inputs, positive_out=False):\n\n down1, indices_1, unpool_shape1 = self.down1(inputs)\n down2, indices_2, unpool_shape2 = self.down2(down1)\n down3, indices_3, unpool_shape3 = self.down3(down2)\n down4, indices_4, unpool_shape4 = self.down4(down3)\n down5, indices_5, unpool_shape5 = self.down5(down4)\n\n up5 = self.up5(down5, indices_5, unpool_shape5)\n up4 = self.up4(up5, indices_4, unpool_shape4)\n up3 = self.up3(up4, indices_3, unpool_shape3)\n up2 = self.up2(up3, indices_2, unpool_shape2)\n up1 = self.up1(up2, indices_1, unpool_shape1)\n if positive_out:\n return torch.relu( up1 )\n return up1\n\n def init_vgg13_params(self):\n\n vgg13 = vgg.vgg13_bn(pretrained=True)\n\n blocks = [self.down1, self.down2, self.down3, self.down4, self.down5]\n\n features = list(vgg13.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n elif isinstance(_layer, nn.BatchNorm2d):\n vgg_layers.append(_layer)\n \n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit]\n else:\n units = [\n conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n #conv_block.conv3.cbr_unit,\n #conv_block.conv4.cbr_unit,\n ]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n elif isinstance(_layer, nn.BatchNorm2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n elif isinstance(l1, nn.BatchNorm2d) and isinstance(l2, nn.BatchNorm2d):\n l2.running_mean.data = l1.running_mean.data\n l2.running_var.data = l1.running_var.data\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data"
},
{
"alpha_fraction": 0.6190342307090759,
"alphanum_fraction": 0.6370016932487488,
"avg_line_length": 45.875,
"blob_id": "e0a5f50c8265a227c738b429d0d4500e6a71b5a7",
"content_id": "8dcf9916727960eed042f2078fd76126e85bb5b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7124,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 152,
"path": "/train_teacher_seg.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torchvision import datasets, transforms\nimport torchvision\nimport network\nfrom dataloader import get_dataloader\nfrom utils.stream_metrics import StreamSegMetrics\n\nfrom utils.visualizer import VisdomPlotter\nfrom utils.misc import pack_images, denormalize\nfrom collections import OrderedDict\nfrom utils import focal_loss\nimport numpy as np\nimport random\n\nvp = None\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device, dtype=torch.long)\n optimizer.zero_grad()\n output = model(data)\n loss = focal_loss(output, target, gamma=2, ignore_index=255) #focal_loss(output, target, gamma=2)#F.cross_entropy(output, target, ignore_index=255)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n seg_metrics = StreamSegMetrics(args.num_classes)\n\n with torch.no_grad():\n for i, (data, target) in enumerate(test_loader):\n data, target = data.to(device), target.to(device, dtype=torch.long)\n output = model(data)\n seg_metrics.update(output.max(1)[1].detach().cpu().numpy().astype('uint8'), target.detach().cpu().numpy().astype('uint8'))\n # if i==0:\n # vp.add_image( 'input', pack_images( ((data+1)/2).clamp(0, 1.0).cpu().numpy() ) )\n # vp.add_image( 'target', pack_images( test_loader.dataset.decode_target(target.cpu().numpy()), channel_last=True ).astype('uint8') )\n # vp.add_image( 'pred', pack_images( test_loader.dataset.decode_target(output.max(1)[1].detach().cpu().numpy().astype('uint8')), channel_last=True ).astype('uint8') )\n\n results = seg_metrics.get_results()\n\n print('\\nTest set: Acc= %.6f, mIoU: %.6f\\n'%(results['Overall Acc'],results['Mean IoU']))\n return results\n \ndef get_model(args):\n if args.model.lower()=='deeplabv3_resnet50':\n return network.segmentation.deeplabv3.deeplabv3_resnet50(num_classes=args.num_classes, dropout_p=0.5, pretrained_backbone=True)\n elif args.model.lower()=='segnet_vgg19':\n return network.segmentation.segnet.SegNetVgg19(args.num_classes, pretrained_backbone=True)\n elif args.model.lower()=='segnet_vgg16':\n return network.segmentation.segnet.SegNetVgg16(args.num_classes, pretrained_backbone=True)\n elif args.model.lower()=='segnet_vgg13':\n return network.segmentation.segnet.SegNetVgg13(args.num_classes, pretrained_backbone=True)\n \ndef main():\n # Training settings\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_classes', type=int, default=21)\n parser.add_argument('--batch_size', type=int, default=16, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test_batch_size', type=int, default=16, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=300, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--data_root', type=str, default='/root/data/unpacked/')\n parser.add_argument('--dataset', type=str, default='voc2012', choices=['camvid', 'nyuv2', 'cityscapes','voc2012'],\n help='dataset name (default: camvid)')\n parser.add_argument('--model', type=str, default='deeplabv3_resnet50', choices=['deeplabv3_resnet50', 'segnet_vgg19', 'segnet_vgg16'],\n help='model name (default: deeplabv3_resnet50)')\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n parser.add_argument('--gamma', type=float, default=0.1)\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--no_cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--step_size', type=int, default=100, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--ckpt', type=str, default=None)\n parser.add_argument('--log_interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--test_only', action='store_true', default=False)\n parser.add_argument('--download', action='store_true', default=False)\n parser.add_argument('--scheduler', action='store_true', default=False)\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n # torch.manual_seed(args.seed)\n # torch.cuda.manual_seed(args.seed)\n # np.random.seed(args.seed)\n # random.seed(args.seed)\n # torch.backends.cudnn.deterministic = True\n # torch.backends.cudnn.benchmark = False\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n print(args)\n\n # global vp\n # vp = VisdomPlotter('15550', 'teacher-seg-%s'%args.dataset)\n\n train_loader, test_loader = get_dataloader(args)\n model = get_model(args)\n\n if args.ckpt is not None:\n model.load_state_dict( torch.load( args.ckpt ) )\n model = model.to(device)\n if torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)\n\n best_result = 0\n if args.scheduler:\n scheduler = optim.lr_scheduler.StepLR(optimizer, args.step_size, gamma=args.gamma)\n\n if args.test_only:\n results = test(args, model, device, test_loader)\n return\n\n for epoch in range(1, args.epochs + 1):\n if args.scheduler:\n scheduler.step()\n print(\"Lr = %.6f\"%(optimizer.param_groups[0]['lr']))\n print(\"train\")\n train(args, model, device, train_loader, optimizer, epoch)\n print(\"val\")\n results = test(args, model, device, test_loader)\n # vp.add_scalar('mIoU', epoch, results['Mean IoU'])\n if results['Mean IoU']>best_result:\n best_result = results['Mean IoU']\n torch.save(model.state_dict(),\"checkpoint/teacher/%s-%s-3.pt\"%(args.dataset, args.model))\n print(\"Best mIoU=%.6f\"%best_result)\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.5694699287414551,
"alphanum_fraction": 0.5972100496292114,
"avg_line_length": 40.13442611694336,
"blob_id": "c6b8f34f401b71440f4b34d7ccbca7bbbf30a9fe",
"content_id": "c0cd49adb3c86e6d5489b1492f7fc85b72b79e20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12545,
"license_type": "no_license",
"max_line_length": 201,
"num_lines": 305,
"path": "/DFAD_cifar.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torchvision.utils as vutils\n\nimport network\n#from utils.visualizer import VisdomPlotter\n#from utils.misc import pack_images, denormalize\nfrom dataloader import get_dataloader\nimport os, random\nimport numpy as np\nimport torchvision\nimport glob\n\n#vp = VisdomPlotter('15550', env='DFAD-cifar')\n\nclass DeepInversionFeatureHook():\n '''\n Implementation of the forward hook to track feature statistics and compute a loss on them.\n Will compute mean and variance, and will use l2 as a loss\n '''\n\n def __init__(self, module):\n self.hook = module.register_forward_hook(self.hook_fn)\n\n def hook_fn(self, module, input, output):\n # hook co compute deepinversion's feature distribution regularization\n\n\n nch = input[0].shape[1]\n\n mean = input[0].mean([0, 2, 3])\n var = input[0].permute(1, 0, 2, 3).contiguous().view([nch, -1]).var(1, unbiased=False)\n\n # forcing mean and variance to match between two distributions\n # other ways might work better, e.g. KL divergence\n r_feature = torch.norm(module.running_var.data.type(var.type()) - var, 2) + torch.norm(\n module.running_mean.data.type(var.type()) - mean, 2)\n\n self.r_feature = r_feature\n # must have no output\n\n def close(self):\n self.hook.remove()\n\ndef kdloss(y, teacher_scores):\n p = F.log_softmax(y/4, dim=1)\n q = F.softmax(teacher_scores/4, dim=1)\n l_kl = F.kl_div(p, q, size_average=False) * (4**2) / y.shape[0]\n return l_kl\n\n\ndef train(args, teacher, student, generator, device, optimizer, epoch, prefix, loss_r_feature_layers):\n teacher.eval()\n student.train()\n generator.train()\n optimizer_S, optimizer_G = optimizer\n\n # add this kd_loss for Adaptive\n kl_loss = nn.KLDivLoss(reduction='batchmean').cuda()\n\n for i in range( args.epoch_itrs ):\n for k in range(5):\n z = torch.randn( (args.batch_size, args.nz, 1, 1) ).to(device)\n optimizer_S.zero_grad()\n fake = generator(z).detach()\n t_logit = teacher(fake)\n s_logit = student(fake)\n\n loss_S = F.l1_loss(s_logit, t_logit.detach())\n\n # #T = 4\n #loss_S = kdloss(s_logit, t_logit.detach())\n\n # # competition loss, Adaptive DeepInvesrion\n # # jensen shanon divergence:\n # # another way to force KL between negative probabilities\n # T = 3.0\n # P = F.softmax(s_logit / T, dim=1)\n # Q = F.softmax(t_logit / T, dim=1)\n # M = 0.5 * (P + Q)\n #\n # P = torch.clamp(P, 0.01, 0.99)\n # Q = torch.clamp(Q, 0.01, 0.99)\n # M = torch.clamp(M, 0.01, 0.99)\n # eps = 0.0\n # # loss_verifier_cig = 0.5 * kl_loss(F.log_softmax(outputs_verifier / T, dim=1), M) + 0.5 * kl_loss(F.log_softmax(outputs/T, dim=1), M)\n # loss_verifier_cig = 0.5 * kl_loss(torch.log(P + eps), M) + 0.5 * kl_loss(torch.log(Q + eps), M)\n # # JS criteria - 0 means full correlation, 1 - means completely different\n # # loss_verifier_cig = 1.0 - torch.clamp(loss_verifier_cig, 0.0, 1.0)\n # # loss_S = - loss_verifier_cig\n # loss_verifier_cig = torch.clamp(loss_verifier_cig, 0.0, 1.0)\n # loss_S = loss_verifier_cig\n\n loss_S.backward()\n optimizer_S.step()\n\n z = torch.randn( (args.batch_size, args.nz, 1, 1) ).to(device)\n optimizer_G.zero_grad()\n generator.train()\n fake = generator(z)\n t_logit = teacher(fake) \n s_logit = student(fake)\n\n #loss_G = - torch.log( F.l1_loss( s_logit, t_logit )+1)\n #loss_G = - F.l1_loss( s_logit, t_logit )\n\n # R_feature loss\n loss_distr = sum([mod.r_feature for mod in loss_r_feature_layers])\n loss_G = 0.01*loss_distr # best for noise before BN\n loss_G += - 0.1*F.l1_loss( s_logit, t_logit )\n\n\n # # jensen shanon divergence:\n # # another way to force KL between negative probabilities\n # T = 3.0\n # PP = F.softmax(s_logit / T, dim=1)\n # QQ = F.softmax(t_logit / T, dim=1)\n # MM = 0.5 * (PP + QQ)\n #\n # PP = torch.clamp(PP, 0.01, 0.99)\n # QQ = torch.clamp(QQ, 0.01, 0.99)\n # MM = torch.clamp(MM, 0.01, 0.99)\n # eps = 0.0\n # # loss_verifier_cig = 0.5 * kl_loss(F.log_softmax(outputs_verifier / T, dim=1), M) + 0.5 * kl_loss(F.log_softmax(outputs/T, dim=1), M)\n # loss_verifier_cig = 0.5 * kl_loss(torch.log(PP + eps), MM) + 0.5 * kl_loss(torch.log(QQ + eps), MM)\n # # JS criteria - 0 means full correlation, 1 - means completely different\n # loss_verifier_cig = 1.0 - torch.clamp(loss_verifier_cig, 0.0, 1.0)\n #\n # loss_G += loss_verifier_cig\n\n loss_G.backward()\n optimizer_G.step()\n\n if i % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tG_Loss: {:.6f} S_loss: {:.6f}'.format(\n epoch, i, args.epoch_itrs, 100*float(i)/float(args.epoch_itrs), loss_G.item(), loss_S.item()))\n #vp.add_scalar('Loss_S', (epoch-1)*args.epoch_itrs+i, loss_S.item())\n #vp.add_scalar('Loss_G', (epoch-1)*args.epoch_itrs+i, loss_G.item())\n\n name_use = \"best_images_our\"\n if prefix is not None:\n name_use = prefix + name_use\n next_batch = len(glob.glob(\"./%s/*.png\" % name_use)) // 1\n\n vutils.save_image(fake.data.clone(),\n './{}/output_{}.png'.format(name_use, epoch),\n normalize=True, scale_each=True, nrow=10)\n\ndef test(args, student, generator, device, test_loader, epoch=0):\n student.eval()\n generator.eval()\n\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for i, (data, target) in enumerate(test_loader):\n data, target = data.to(device), target.to(device)\n\n z = torch.randn( (data.shape[0], args.nz, 1, 1), device=data.device, dtype=data.dtype )\n fake = generator(z)\n output = student(data)\n #if i==0:\n #vp.add_image( 'input', pack_images( denormalize(data,(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)).clamp(0,1).detach().cpu().numpy() ) )\n #vp.add_image( 'generated', pack_images( denormalize(fake,(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)).clamp(0,1).detach().cpu().numpy() ) )\n\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n acc = correct/len(test_loader.dataset)\n return acc\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='DFAD CIFAR')\n parser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 256)')\n parser.add_argument('--test_batch_size', type=int, default=128, metavar='N',\n help='input batch size for testing (default: 128)')\n \n parser.add_argument('--epochs', type=int, default=500, metavar='N',\n help='number of epochs to train (default: 500)')\n parser.add_argument('--epoch_itrs', type=int, default=50)\n parser.add_argument('--lr_S', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--lr_G', type=float, default=1e-3,\n help='learning rate (default: 0.1)')\n parser.add_argument('--data_root', type=str, default='/root/data/unpacked/CIFAR100')\n\n parser.add_argument('--dataset', type=str, default='cifar100', choices=['cifar10', 'cifar100'],\n help='dataset name (default: cifar10)')\n parser.add_argument('--model', type=str, default='resnet18_8x', choices=['resnet18_8x'],\n help='model name (default: resnet18_8x)')\n parser.add_argument('--weight_decay', type=float, default=5e-4)\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--ckpt', type=str, default='checkpoint/teacher/cifar100-resnet34_8x.pt')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--nz', type=int, default=256)\n parser.add_argument('--test-only', action='store_true', default=False)\n parser.add_argument('--download', action='store_true', default=False)\n parser.add_argument('--step_size', type=int, default=100, metavar='S')\n parser.add_argument('--scheduler', action='store_true', default=False)\n\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n \n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n print(args)\n\n _, test_loader = get_dataloader(args)\n\n num_classes = 10 if args.dataset=='cifar10' else 100\n teacher = network.resnet_8x.ResNet34_8x(num_classes=num_classes)\n student = network.resnet_8x.ResNet18_8x(num_classes=num_classes)\n generator = network.gan.GeneratorA(nz=args.nz, nc=3, img_size=32)\n\n teacher.load_state_dict( torch.load( args.ckpt ) )\n print(\"Teacher restored from %s\"%(args.ckpt))\n\n teacher = teacher.to(device)\n student = student.to(device)\n generator = generator.to(device)\n\n teacher = nn.DataParallel(teacher)\n student = nn.DataParallel(student)\n generator = nn.DataParallel(generator)\n\n teacher.eval()\n\n # deepinversion\n loss_r_feature_layers = []\n for module in teacher.modules():\n if isinstance(module, nn.BatchNorm2d):\n loss_r_feature_layers.append(DeepInversionFeatureHook(module))\n\n optimizer_S = optim.SGD( student.parameters(), lr=args.lr_S, weight_decay=args.weight_decay, momentum=0.9 )\n optimizer_G = optim.Adam( generator.parameters(), lr=args.lr_G )\n \n if args.scheduler:\n scheduler_S = optim.lr_scheduler.MultiStepLR(optimizer_S, [100, 200], 0.1)\n scheduler_G = optim.lr_scheduler.MultiStepLR(optimizer_G, [100, 200], 0.1)\n best_acc = 0\n if args.test_only:\n acc = test(args, student, generator, device, test_loader)\n return\n acc_list = []\n\n\n\n prefix = \"runs/cifar100_generation/\"\n for create_folder in [prefix, prefix + \"/best_images_our/\"]:\n if not os.path.exists(create_folder):\n os.makedirs(create_folder)\n\n\n\n for epoch in range(1, args.epochs + 1):\n # Train\n if args.scheduler:\n scheduler_S.step()\n scheduler_G.step()\n\n train(args, teacher=teacher, student=student, generator=generator, device=device, optimizer=[optimizer_S, optimizer_G], epoch=epoch, prefix=prefix, loss_r_feature_layers= loss_r_feature_layers)\n # Test\n acc = test(args, student, generator, device, test_loader, epoch)\n acc_list.append(acc)\n if acc>best_acc:\n best_acc = acc\n torch.save(student.state_dict(),\"checkpoint/student/%s-%s.pt\"%(args.dataset, 'resnet18_8x'))\n torch.save(generator.state_dict(),\"checkpoint/student/%s-%s-generator.pt\"%(args.dataset, 'resnet18_8x'))\n #vp.add_scalar('Acc', epoch, acc)\n print(\"Best Acc=%.6f\"%best_acc)\n\n import csv\n os.makedirs('log', exist_ok=True)\n with open('log/DFAD-%s.csv'%(args.dataset), 'a') as f:\n writer = csv.writer(f)\n writer.writerow(acc_list)\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.6075717210769653,
"alphanum_fraction": 0.6220813989639282,
"avg_line_length": 43.09558868408203,
"blob_id": "ea84fc3aff746c37478dba09e9bec432ac81230a",
"content_id": "0a0c50115d8158dc31be2ad571bd22be32b494e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5996,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 136,
"path": "/train_teacher.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport network\nfrom dataloader import get_dataloader\n\nimport random\nimport numpy as np\nimport os\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n if args.verbose and batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(args, model, device, test_loader, cur_epoch):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nEpoch {} Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n cur_epoch, test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n return correct/len(test_loader.dataset)\n \ndef get_model(args):\n if args.model.lower()=='lenet5':\n return network.lenet.LeNet5()\n elif args.model.lower()=='resnet34':\n return torchvision.models.resnet34(num_classes=args.num_classes, pretrained=args.pretrained)\n elif args.model.lower()=='resnet34_8x':\n return network.resnet_8x.ResNet34_8x(num_classes=args.num_classes)\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_classes', type=int, default=10)\n parser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test_batch_size', type=int, default=128, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--data_root', type=str, default='/root/data/unpacked/CIFAR10')\n parser.add_argument('--epochs', type=int, default=30, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--weight_decay', type=float, default=5e-4)\n parser.add_argument('--dataset', type=str, default='mnist', choices=['mnist', 'svhn', 'cifar10', 'caltech101', 'nyuv2'],\n help='dataset name (default: mnist)')\n parser.add_argument('--model', type=str, default='lenet5', choices=['lenet5', 'resnet34', 'resnet34_8x'],\n help='model name (default: mnist)')\n \n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--step_size', type=int, default=50, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--ckpt', type=str, default=None)\n parser.add_argument('--log_interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--test_only', action='store_true', default=False)\n parser.add_argument('--download', action='store_true', default=False)\n parser.add_argument('--pretrained', action='store_true', default=False)\n parser.add_argument('--scheduler', action='store_true', default=False)\n parser.add_argument('--verbose', action='store_true', default=False)\n\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n os.makedirs('checkpoint/teacher', exist_ok=True)\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n print(args)\n\n train_loader, test_loader = get_dataloader(args)\n model = get_model(args)\n\n if args.ckpt is not None:\n model.load_state_dict( torch.load( args.ckpt ) )\n \n model = model.to(device)\n optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)\n best_acc = 0\n if args.scheduler:\n scheduler = optim.lr_scheduler.StepLR(optimizer, args.step_size, 0.1)\n\n if args.test_only:\n acc = test(args, model, device, test_loader, 0)\n return\n \n for epoch in range(1, args.epochs + 1):\n if args.scheduler:\n scheduler.step()\n #print(\"Lr = %.6f\"%(optimizer.param_groups[0]['lr']))\n train(args, model, device, train_loader, optimizer, epoch)\n acc = test(args, model, device, test_loader, epoch)\n if acc>best_acc:\n best_acc = acc\n torch.save(model.state_dict(),\"checkpoint/teacher/%s-%s.pt\"%(args.dataset, args.model))\n print(\"Best Acc=%.6f\"%best_acc)\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.5498466491699219,
"alphanum_fraction": 0.5641615390777588,
"avg_line_length": 33.017391204833984,
"blob_id": "c7d4772c81f9196a50b3290484f8dbbc1db49720",
"content_id": "62f10bf14b01ced82e78dbd0876a4d2b990b78ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3914,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 115,
"path": "/utils/utils.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nclass ScaleInvariantLoss(nn.Module):\n \"\"\"This criterion is used in depth prediction task.\n **Parameters:**\n - **la** (int, optional): Default value is 0.5. No need to change.\n - **ignore_index** (int, optional): Value to ignore.\n **Shape:**\n - **inputs**: $(N, H, W)$.\n - **targets**: $(N, H, W)$.\n - **output**: scalar.\n \"\"\"\n def __init__(self, la=0.5, ignore_index=0):\n super(ScaleInvariantLoss, self).__init__()\n self.la = la\n self.ignore_index = ignore_index\n\n def forward(self, inputs, targets):\n size = inputs.size()\n if len(size) > 2:\n inputs = inputs.view(size[0], -1)\n targets = targets.view(size[0], -1)\n \n inv_mask = targets.eq(self.ignore_index)\n nums = (1-inv_mask.float()).sum(1)\n\n log_d = torch.log(inputs) - torch.log(targets)\n log_d[inv_mask] = 0\n\n loss = torch.div(torch.pow(log_d, 2).sum(1), nums) - \\\n self.la * torch.pow(torch.div(log_d.sum(1), nums), 2)\n\n return loss.mean()\n\nclass FocalLoss(nn.Module):\n def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255):\n super(FocalLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.ignore_index = ignore_index\n self.size_average = size_average\n\n def forward(self, inputs, targets):\n ce_loss = F.cross_entropy(\n inputs, targets, reduction='none', ignore_index=self.ignore_index)\n pt = torch.exp(-ce_loss)\n focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss\n if self.size_average:\n return focal_loss.mean()\n else:\n return focal_loss.sum()\n\ndef focal_loss(inputs, targets, alpha=1, gamma=0, size_average=True, ignore_index=255):\n ce_loss = F.cross_entropy(\n inputs, targets, reduction='none', ignore_index=ignore_index)\n pt = torch.exp(-ce_loss)\n focal_loss = alpha * (1-pt)**gamma * ce_loss\n if size_average:\n return focal_loss.mean()\n else:\n return focal_loss.sum()\n\ndef kldiv(logits, targets, reduction='batchmean'):\n p = F.log_softmax(logits, dim=1)\n q = F.softmax(targets, dim=1)\n return F.kl_div(p, q, reduction=reduction)\n\ndef soft_cross_entropy(logits, target, T=1.0, size_average=True, target_is_prob=False):\n \"\"\" Cross Entropy for soft targets\n \n **Parameters:**\n - **logits** (Tensor): logits score (e.g. outputs of fc layer)\n - **targets** (Tensor): logits of soft targets\n - **T** (float): temperature of distill\n - **size_average**: average the outputs\n - **target_is_prob**: set True if target is already a probability.\n \"\"\"\n if target_is_prob:\n p_target = target\n else:\n p_target = F.softmax(target/T, dim=1)\n \n logp_pred = F.log_softmax(logits/T, dim=1)\n # F.kl_div(logp_pred, p_target, reduction='batchmean')*T*T\n ce = torch.sum(-p_target * logp_pred, dim=1)\n if size_average:\n return ce.mean() * T * T\n else:\n return ce * T * T\n\ndef pairwise_distances(x, y=None):\n '''\n Input: x is a Nxd matrix\n y is an optional Mxd matirx\n Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]\n if y is not given then use 'y=x'.\n i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2\n '''\n if len(x.shape)!=2:\n x = x.view(x.shape[0], -1)\n \n x_norm = (x**2).sum(1).view(-1, 1)\n if y is not None:\n y_t = torch.transpose(y, 0, 1)\n y_norm = (y**2).sum(1).view(1, -1)\n else:\n y_t = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n \n dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)\n # Ensure diagonal is zero if x=y\n # if y is None:\n # dist = dist - torch.diag(dist.diag)\n return dist"
},
{
"alpha_fraction": 0.5215409994125366,
"alphanum_fraction": 0.5621375441551208,
"avg_line_length": 32.54166793823242,
"blob_id": "4ea34e5cde933f8db2d59d899d8e74bca42be796",
"content_id": "cbe4b60a1f5fcc7abec7184d997359188fe1e1c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2414,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 72,
"path": "/dataset/camvid.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "import os\nimport torch.utils.data as data\nfrom glob import glob\nfrom PIL import Image\nimport numpy as np\n\nclass CamVid(data.Dataset):\n \"\"\"CamVid dataset loader where the dataset is arranged as in https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid.\n \n **Parameters:**\n - **root_dir** (string): Root directory path.\n - **mode** (string): The type of dataset: 'train' for training set, 'val'. for validation set, and 'test' for test set.\n - **transform** (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. Default: None.\n - **label_transform** (callable, optional): A function/transform that takes in the target and transform it. Default: None.\n - **loader** (callable, optional): A function to load an image given its path. By default ``default_loader`` is used.\n \"\"\"\n\n # Default encoding for pixel value, class name, and class color\n cmap = np.array([\n (128, 128, 128),\n (128, 0, 0),\n (192, 192, 128),\n #(255, 69, 0),\n (128, 64, 128),\n (60, 40, 222),\n (128, 128, 0),\n (192, 128, 128),\n (64, 64, 128),\n (64, 0, 128),\n (64, 64, 0),\n (0, 128, 192),\n (0, 0, 0),\n ])\n\n def __init__(self,\n root,\n split='train',\n transform=None):\n self.root = root\n self.split = split\n self.transform = transform\n\n self.images = glob(os.path.join(self.root, self.split, '*.png'))\n self.labels = glob(os.path.join(\n self.root, self.split+'annot', '*.png'))\n self.images.sort()\n self.labels.sort()\n\n def __getitem__(self, idx):\n \"\"\"\n Args:\n - index (``int``): index of the item in the dataset\n Returns:\n A tuple of ``PIL.Image`` (image, label) where label is the ground-truth\n of the image.\n \"\"\"\n\n img, label = Image.open(self.images[idx]), Image.open(self.labels[idx])\n\n if self.transform is not None:\n img, label = self.transform(img, label)\n label[label == 11] = 255 # ignore void\n return img, label\n\n def __len__(self):\n return len(self.images)\n\n @classmethod\n def decode_target(cls, mask):\n \"\"\"decode semantic mask to RGB image\"\"\"\n mask[mask == 255] = 11\n return cls.cmap[mask]"
},
{
"alpha_fraction": 0.7538461685180664,
"alphanum_fraction": 0.7538461685180664,
"avg_line_length": 21,
"blob_id": "18aa5f76d73d2acc52e7492316b027e148311da2",
"content_id": "973597f67e8806a7b8c3e10fb2e6a17990d3b2be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 3,
"path": "/utils/__init__.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from .utils import *\nfrom . import misc\n#from . import visualizer"
},
{
"alpha_fraction": 0.7613636255264282,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 21.25,
"blob_id": "15add782ecaac5d8d4814d758d73c46c512b395d",
"content_id": "6b0928463dc5cd0034761bb891f12f492329aa0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 4,
"path": "/network/__init__.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from . import gan\nfrom . import lenet\nfrom . import resnet_8x\nfrom . import segmentation"
},
{
"alpha_fraction": 0.7580645084381104,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 20,
"blob_id": "473a818ab0920906441011238da92dc2aad0a562",
"content_id": "20dda2483fa92b23b08acffcc477b6bff4e86e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/network/segmentation/__init__.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "from . import fcn\nfrom . import segnet\nfrom . import deeplabv3"
},
{
"alpha_fraction": 0.583898663520813,
"alphanum_fraction": 0.5870646834373474,
"avg_line_length": 28.373332977294922,
"blob_id": "4df8c43c0d15ec8e2dfb6cee7331cb47ad12c9ff",
"content_id": "22b29d297d6a4ed334a40b63ed0c9f9a8bffbb1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2211,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 75,
"path": "/utils/misc.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport math\nimport torchvision\nimport torch \nimport os, sys\n\ndef pack_images(images, col=None, channel_last=False):\n # N, C, H, W\n if isinstance(images, (list, tuple) ):\n images = np.stack(images, 0)\n if channel_last:\n images = images.transpose(0,3,1,2) # make it channel first\n assert len(images.shape)==4\n assert isinstance(images, np.ndarray)\n \n N,C,H,W = images.shape\n if col is None:\n col = int(math.ceil(math.sqrt(N)))\n row = int(math.ceil(N / col))\n pack = np.zeros( (C, H*row, W*col), dtype=images.dtype )\n for idx, img in enumerate(images):\n h = (idx//col) * H\n w = (idx% col) * W\n pack[:, h:h+H, w:w+W] = img\n return pack\n\n\ndef denormalize(tensor, mean, std):\n _mean = [ -m / s for m, s in zip(mean, std) ]\n _std = [ 1/s for s in std ]\n\n _mean = torch.as_tensor(_mean, dtype=tensor.dtype, device=tensor.device)\n _std = torch.as_tensor(_std, dtype=tensor.dtype, device=tensor.device)\n tensor.sub_(_mean[None, :, None, None]).div_(_std[None, :, None, None])\n return tensor\n\n #torchvision.transforms.functional.normalize\n #return normalize( tensor, _mean, _std ) #torchvision.transforms.functional.normalize(tensor, _mean, _std)\n \nclass Logger(object):\n \"\"\"\n Write console output to external text file.\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.\n \"\"\"\n def __init__(self, fpath=None):\n self.console = sys.stdout\n self.file = None\n if fpath is not None:\n os.makedirs(os.path.dirname(fpath), exist_ok=True)\n self.file = open(fpath, 'w')\n\n def __del__(self):\n self.close()\n\n def __enter__(self):\n pass\n\n def __exit__(self, *args):\n self.close()\n\n def write(self, msg):\n self.console.write(msg)\n if self.file is not None:\n self.file.write(msg)\n\n def flush(self):\n self.console.flush()\n if self.file is not None:\n self.file.flush()\n os.fsync(self.file.fileno())\n\n def close(self):\n self.console.close()\n if self.file is not None:\n self.file.close()\n\n\n \n\n"
},
{
"alpha_fraction": 0.5646663904190063,
"alphanum_fraction": 0.5777115225791931,
"avg_line_length": 33.410255432128906,
"blob_id": "a0b86e23d4263a8020b0175579d60333bb4242b5",
"content_id": "77028e8e1294e6cb0879cc9a3182e432516da418",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2683,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 78,
"path": "/dataset/sunrgbd.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "import os\nimport torch.utils.data as data\nfrom glob import glob\nfrom PIL import Image\nimport numpy as np\n\ndef colormap(N=256, normalized=False):\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap/255 if normalized else cmap\n return cmap\n\n\nclass SUNRGBD(data.Dataset):\n \"\"\"SUNRGBD dataset loader where the dataset is arranged as in https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid.\n \n **Parameters:**\n - **root_dir** (string): Root directory path.\n - **mode** (string): The type of dataset: 'train' for training set, 'val'. for validation set, and 'test' for test set.\n - **transform** (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. Default: None.\n - **label_transform** (callable, optional): A function/transform that takes in the target and transform it. Default: None.\n - **loader** (callable, optional): A function to load an image given its path. By default ``default_loader`` is used.\n \"\"\"\n\n # Default encoding for pixel value, class name, and class color\n cmap = colormap()\n def __init__(self,\n root,\n split='train',\n transform=None):\n self.root = root\n self.split = split\n self.transform = transform\n\n self.images = glob(os.path.join(self.root, 'SUNRGBD-%s_images'%self.split, '*.jpg'))\n self.labels = glob(os.path.join(self.root, '%s13labels'%self.split, '*.png'))\n\n self.images.sort()\n self.labels.sort()\n\n def __getitem__(self, idx):\n \"\"\"\n Args:\n - index (``int``): index of the item in the dataset\n Returns:\n A tuple of ``PIL.Image`` (image, label) where label is the ground-truth\n of the image.\n \"\"\"\n\n img, label = Image.open(self.images[idx]), Image.open(self.labels[idx])\n\n if self.transform is not None:\n img, label = self.transform(img, label)\n label = label-1 # ignore void 0->255\n return img, label\n\n def __len__(self):\n return len(self.images)\n\n @classmethod\n def decode_target(cls, mask):\n \"\"\"decode semantic mask to RGB image\"\"\"\n #mask[mask == 255] = 11\n return cls.cmap[mask.astype('uint8')+1]"
},
{
"alpha_fraction": 0.5918112397193909,
"alphanum_fraction": 0.6185341477394104,
"avg_line_length": 35.14124298095703,
"blob_id": "4a03654d7c5314a4c38aa3e992ca57ea77961d17",
"content_id": "2ff94eb2baa7af79b03005dc834340213c9e0ad6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6399,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 177,
"path": "/network/segmentation/utils.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": " \nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\n\n\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass _SimpleSegmentationModel(nn.Module):\n def __init__(self, backbone, classifier, aux_classifier=None):\n super(_SimpleSegmentationModel, self).__init__()\n self.backbone = backbone\n self.classifier = classifier\n self.aux_classifier = aux_classifier\n\n def forward(self, x):\n input_shape = x.shape[-2:]\n features = self.backbone(x)\n\n x = features[\"out\"]\n #print(x.shape)\n x = self.classifier(x)\n x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)\n return x\n\n\nclass conv2DBatchNormRelu(nn.Module):\n def __init__(\n self,\n in_channels,\n n_filters,\n k_size,\n stride,\n padding,\n bias=True,\n dilation=1,\n is_batchnorm=True,\n dropout=0.0,\n ):\n super(conv2DBatchNormRelu, self).__init__()\n\n conv_mod = nn.Conv2d(\n int(in_channels),\n int(n_filters),\n kernel_size=k_size,\n padding=padding,\n stride=stride,\n bias=bias,\n dilation=dilation,\n )\n if is_batchnorm:\n self.cbr_unit = nn.Sequential(\n conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True)\n )\n else:\n self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True))\n\n def forward(self, inputs):\n outputs = self.cbr_unit(inputs)\n return outputs\n \nclass segnetDown2(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetDown2, self).__init__()\n self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n unpooled_shape = outputs.size()\n outputs, indices = self.maxpool_with_argmax(outputs)\n return outputs, indices, unpooled_shape\n\n\nclass segnetDown3(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetDown3, self).__init__()\n self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.conv3 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n outputs = self.conv3(outputs)\n unpooled_shape = outputs.size()\n outputs, indices = self.maxpool_with_argmax(outputs)\n return outputs, indices, unpooled_shape\n\nclass segnetDown3(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetDown3, self).__init__()\n self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.conv3 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n outputs = self.conv3(outputs)\n unpooled_shape = outputs.size()\n outputs, indices = self.maxpool_with_argmax(outputs)\n return outputs, indices, unpooled_shape\n\nclass segnetDown4(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetDown4, self).__init__()\n self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.conv3 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.conv4 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)\n self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n outputs = self.conv3(outputs)\n outputs = self.conv4(outputs)\n unpooled_shape = outputs.size()\n outputs, indices = self.maxpool_with_argmax(outputs)\n return outputs, indices, unpooled_shape\n\n\nclass segnetUp2(nn.Module):\n def __init__(self, in_size, out_size, dropout=0.0):\n super(segnetUp2, self).__init__()\n self.unpool = nn.MaxUnpool2d(2, 2)\n self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n \n def forward(self, inputs, indices, output_shape):\n outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)\n outputs = self.conv1(outputs)\n outputs = self.conv2(outputs)\n return outputs\n\n\nclass segnetUp3(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetUp3, self).__init__()\n self.unpool = nn.MaxUnpool2d(2, 2)\n self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv3 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n\n def forward(self, inputs, indices, output_shape):\n outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)\n outputs = self.conv1(outputs)\n outputs = self.conv2(outputs)\n outputs = self.conv3(outputs)\n return outputs\n\nclass segnetUp4(nn.Module):\n def __init__(self, in_size, out_size):\n super(segnetUp4, self).__init__()\n self.unpool = nn.MaxUnpool2d(2, 2)\n self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv2 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv3 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)\n self.conv4 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)\n\n def forward(self, inputs, indices, output_shape):\n outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)\n outputs = self.conv1(outputs)\n outputs = self.conv2(outputs)\n outputs = self.conv3(outputs)\n outputs = self.conv4(outputs)\n return outputs"
},
{
"alpha_fraction": 0.5160167217254639,
"alphanum_fraction": 0.5353760719299316,
"avg_line_length": 36.54450225830078,
"blob_id": "cc31680520386a582fd0d8db09b9718afc2ac6fa",
"content_id": "b0d7dbc77eb6bdd7b36be4dc0daa64fd052fb84d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7180,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 191,
"path": "/dataset/nyu.py",
"repo_name": "HaoranZhao1988/DDAD",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\n\nimport os\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\nfrom scipy.io import loadmat\nimport numpy as np\nimport glob\nfrom torchvision import transforms\nimport random\n\nimport matplotlib.pyplot as plt\n\n\ndef colormap(N=256, normalized=False):\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap/255 if normalized else cmap\n return cmap\n\nclass NYUv2(data.Dataset):\n \"\"\"NYUv2 depth dataset loader.\n \n **Parameters:**\n - **root** (string): Root directory path.\n - **split** (string, optional): 'train' for training set, and 'test' for test set. Default: 'train'.\n - **num_classes** (string, optional): The number of classes, must be 40 or 13. Default:13.\n - **transform** (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. Default: None.\n - **target_transforms** (callable, optional): A list of function/transform that takes in the target and transform it. Default: None.\n - **ds_type** (string, optional): To pick samples with labels or not. Default: 'labeled'.\n \"\"\"\n cmap = colormap()\n\n def __init__(self,\n root,\n split='train',\n num_classes=13,\n transform=None,\n ds_type='labeled'):\n\n assert(split in ('train', 'test'))\n assert(ds_type in ('labeled', 'unlabeled'))\n self.root = root\n self.split = split\n self.ds_type = ds_type\n self.transform = transform\n self.num_classes = num_classes\n self.train_idx = np.array([255, ] + list(range(num_classes)))\n\n if ds_type == 'labeled':\n split_mat = loadmat(os.path.join(\n self.root, 'nyuv2-meta-data', 'splits.mat'))\n\n idxs = split_mat[self.split+'Ndxs'].reshape(-1)\n\n self.images = [os.path.join(self.root, '480_640', 'IMAGE', '%04d.png' % (idx))\n for idx in idxs]\n if self.num_classes == 13:\n self.targets = [os.path.join(self.root, 'nyuv2-meta-data', '%s_labels_13' % self.split, 'new_nyu_class13_%04d.png' % idx)\n for idx in idxs]\n elif self.num_classes == 40:\n self.targets = [os.path.join(self.root, '480_640', 'SEGMENTATION', '%04d.png' % idx)\n for idx in idxs]\n else:\n raise ValueError(\n 'Invalid number of classes! Please use 13 or 40')\n else:\n self.images = [glob.glob(os.path.join(\n self.root, 'unlabeled_images/*.png'))]\n print(self.split, len(self.images))\n\n\n def __getitem__(self, idx):\n if self.ds_type == 'labeled':\n image = Image.open(self.images[idx])\n target = Image.open(self.targets[idx])\n\n if self.transform:\n image, target = self.transform(image, target)\n #print(target)\n target = self.train_idx[target]\n return image, target\n else:\n image = Image.open(self.images[idx])\n if self.transforms is not None:\n image = self.transforms(image)\n image = transforms.ToTensor()(image)\n return image, None\n\n def __len__(self):\n return len(self.images)\n\n @classmethod\n def decode_target(cls, target):\n target = (target+1).astype('uint8') # 255 -> 0, 0->1, 1->2\n return cls.cmap[target]\n\nclass NYUv2Depth(data.Dataset):\n \"\"\"NYUv2 depth dataset loader.\n \n **Parameters:**\n - **root** (string): Root directory path.\n - **split** (string, optional): 'train' for training set, and 'test' for test set. Default: 'train'.\n - **num_classes** (string, optional): The number of classes, must be 40 or 13. Default:13.\n - **transform** (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. Default: None.\n - **target_transforms** (callable, optional): A list of function/transform that takes in the target and transform it. Default: None.\n - **ds_type** (string, optional): To pick samples with labels or not. Default: 'labeled'.\n \"\"\"\n cmap = colormap()\n\n def __init__(self,\n root,\n split='train',\n num_classes=13,\n transform=None,\n #target_transforms=None,\n ds_type='labeled'):\n\n assert(split in ('train', 'test'))\n assert(ds_type in ('labeled', 'unlabeled'))\n\n self.root = root\n self.split = split\n self.ds_type = ds_type\n self.transform = transform\n\n self.num_classes = num_classes\n\n self.train_idx = np.array([255, ] + list(range(num_classes)))\n \n if ds_type == 'labeled':\n split_mat = loadmat(os.path.join(\n self.root, 'nyuv2-meta-data', 'splits.mat'))\n\n idxs = split_mat[self.split+'Ndxs'].reshape(-1)\n self.images = [os.path.join(self.root, '480_640', 'IMAGE', '%d.png' % (idx-1))\n for idx in idxs]\n if self.num_classes == 13:\n self.targets = [os.path.join(self.root, 'nyuv2-meta-data', '%s_labels_13' % self.split, 'new_nyu_class13_%04d.png' % idx)\n for idx in idxs]\n elif self.num_classes == 40:\n self.targets = [os.path.join(self.root, '480_640', 'SEGMENTATION', '%04d.png' % idx)\n for idx in idxs]\n else:\n raise ValueError(\n 'Invalid number of classes! Please use 13 or 40')\n self.depths = [os.path.join(\n self.root, 'FINAL_480_640', 'DEPTH', '%04d.png' % idx) for idx in idxs]\n else:\n self.images = [glob.glob(os.path.join(\n self.root, 'unlabeled_images/*.png'))]\n\n def __getitem__(self, idx):\n if self.ds_type == 'labeled':\n image = Image.open(self.images[idx])\n depth = Image.open(self.depths[idx])\n #print(np.array(depth,dtype='float').max())\n if self.transform:\n image, depth = self.transform(image, depth)\n return image, depth / 1000\n else:\n image = Image.open(self.images[idx])\n if self.transform is not None:\n image = self.transform(image)\n #image = transforms.ToTensor()(image)\n return image, None\n\n def __len__(self):\n return len(self.images)\n\n @classmethod\n def decode_target(cls, target):\n cm = plt.get_cmap('jet')\n target = (target/7).clip(0,1)\n target = cm(target)[:,:,:,:3]\n return target\n \n"
}
] | 16 |
Willsr71/MulticraftAPI
|
https://github.com/Willsr71/MulticraftAPI
|
c8d17741879471e99b3b342902872ed57411e63a
|
d8b8e245e81fffd436131335c39f5afc8a02014f
|
1555c4f34aef373959e8655eed0d1c34cca0f43b
|
refs/heads/master
| 2020-12-24T21:00:52.692119 | 2016-05-26T01:23:41 | 2016-05-26T01:23:41 | 59,411,297 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6137946248054504,
"alphanum_fraction": 0.6144686341285706,
"avg_line_length": 35.785125732421875,
"blob_id": "7634d223e59f6ada16944d29c7df183b8ebb5145",
"content_id": "b882f20ee5c6695b4442f88205cfa3eea57338dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4451,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 121,
"path": "/servermanager.py",
"repo_name": "Willsr71/MulticraftAPI",
"src_encoding": "UTF-8",
"text": "import time\nimport util\nimport shutil\nimport threading\nimport multicraftapi\nfrom time import gmtime, strftime\n\nconfig = util.get_json_file(\"hidden_config.json\")\nserver_data = util.get_json_file(\"server_data.json\")\napi = multicraftapi.MulticraftAPI(config[\"multicraftapi\"][\"location\"], config[\"multicraftapi\"][\"user\"], config[\"multicraftapi\"][\"key\"], config[\"debug\"][\"show_api_request_info\"])\n\nservers = {}\nactive_backups = {}\n\n\nclass ServerBackupThread(threading.Thread):\n def __init__(self, server):\n threading.Thread.__init__(self)\n self.server = server\n\n def run(self):\n start_time = round(time.time())\n\n server_name = self.server[\"name\"]\n server_name = server_name.replace(\" \", \"_\")\n server_name = server_name.lower()\n\n server_location = config[\"server_location\"] + self.server[\"dir\"]\n\n temp_location = strftime(config[\"temp_location\"], gmtime(start_time))\n temp_location = temp_location.replace(\"{SERVER_NAME}\", server_name)\n\n backup_location = strftime(config[\"backup_location\"], gmtime(start_time))\n backup_location = backup_location.replace(\"{SERVER_NAME}\", server_name)\n\n print(server_location, \" => \", temp_location)\n\n string = strftime(\"Backing up \" + self.server[\"name\"] + \". Last backup took %M minutes and %S seconds.\", gmtime(server_data[\"backup_times\"][self.server[\"id\"]]))\n print(string)\n api.send_all_console_command(\"say \" + string)\n\n start_time = time.time()\n\n try:\n util.copy_directory(server_location, temp_location)\n except PermissionError:\n string = \"Backup for server \" + self.server[\"name\"] + \" failed. Permission Error.\"\n print(string)\n api.send_all_console_command(\"say \" + string)\n\n finish_time = time.time() - start_time\n\n string = strftime(\"Finished backing up \" + self.server[\"name\"] + \". Backup took %M minutes and %S seconds.\", gmtime(finish_time))\n print(string)\n api.send_all_console_command(\"say \" + string)\n\n api.start_server(self.server[\"id\"])\n server_data[\"restarts\"][self.server[\"id\"]] = time.time()\n server_data[\"backup_times\"][self.server[\"id\"]] = finish_time\n\n util.set_json_file(\"server_data.json\", server_data)\n\n print(temp_location, \" => \", backup_location)\n\n util.zip_directory(temp_location, backup_location, config[\"debug\"][\"show_folders_in_backup_progress\"], config[\"debug\"][\"show_files_in_backup_progress\"])\n\n print(\"Finished zipping \" + self.server[\"name\"] + \".\")\n\n del active_backups[self.server[\"id\"]]\n\n\ndef get_servers():\n global servers\n\n server_list = api.list_servers()[\"data\"][\"Servers\"]\n for server in server_list:\n servers[server] = api.get_server(server)[\"data\"][\"Server\"]\n if server not in server_data[\"restarts\"]:\n server_data[\"restarts\"][server] = 0\n if server not in server_data[\"backup_times\"]:\n server_data[\"backup_times\"][server] = 0\n\n util.set_json_file(\"server_data.json\", server_data)\n\n\ndef check_servers():\n if config[\"debug\"][\"show_server_status_info\"]:\n print(\"==================================================\")\n for server in servers:\n if round(time.time() - server_data[\"restarts\"][server]) < config[\"ignore_after_start_delay\"]:\n if config[\"debug\"][\"show_server_status_info\"]:\n print(\"Skipping server \" + servers[server][\"name\"])\n continue\n\n status = api.get_server_status(server)[\"data\"][\"status\"]\n if config[\"debug\"][\"show_server_status_info\"]:\n print(status + (\" \" if status == \"online\" else \" \") + servers[server][\"name\"])\n\n if status == \"offline\" and server not in active_backups:\n server_to_pass = servers[server]\n server_to_pass[\"id\"] = server\n\n thread = ServerBackupThread(server_to_pass)\n thread.start()\n active_backups[server] = thread\n\n\nprint(\"Starting initial check...\")\n\nget_servers()\ncheck_servers()\n\nprint(\"Startup done. Polling every \" + str(config[\"polling_interval\"]) + \" seconds.\")\ntime.sleep(config[\"polling_interval\"] - (time.time() % config[\"polling_interval\"]))\nwhile True:\n t = time.time()\n\n if round(t) % config[\"polling_interval\"] == 0:\n check_servers()\n get_servers()\n time.sleep(config[\"polling_interval\"] - (time.time() % config[\"polling_interval\"]))\n"
},
{
"alpha_fraction": 0.6103542447090149,
"alphanum_fraction": 0.6246594190597534,
"avg_line_length": 22.677419662475586,
"blob_id": "3b15e06a23b38d6441426b1a824b0dd719c7e0d6",
"content_id": "30d2d96541d9381106305424f927f45f4819240d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1468,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 62,
"path": "/util.py",
"repo_name": "Willsr71/MulticraftAPI",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport json\nimport shutil\nimport zipfile\n\n\ndef get_json_file(file_name):\n try:\n return json.loads(open(file_name).read())\n except FileNotFoundError:\n print(\"File \\\"\" + file_name + \"\\\" does not exist\")\n sys.exit(1)\n\n\ndef set_json_file(file_name, json_arr, indents=True):\n if indents:\n indents = 2\n else:\n indents = None\n\n return open(file_name, 'w').write(json.dumps(json_arr, indent=indents))\n\n\ndef print_line(w):\n sys.stdout.write(w)\n sys.stdout.flush()\n\n\ndef copy_directory(source, dest):\n if not os.path.exists(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n shutil.copytree(source, dest)\n\n\ndef move_file(source, dest):\n if not os.path.exists(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n shutil.move(source, dest)\n\n\ndef zip_directory(directory, zip_location, verbose=False, very_verbose=False):\n if not os.path.exists(os.path.dirname(zip_location)):\n os.makedirs(os.path.dirname(zip_location))\n zip_file = zipfile.ZipFile(zip_location, \"w\", zipfile.ZIP_DEFLATED)\n for root, dirs, files in os.walk(directory):\n if verbose:\n print(root)\n\n for file in files:\n if very_verbose:\n print(file)\n\n zip_file.write(os.path.join(root, file))\n zip_file.close()\n\n\nclass colors:\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n END = '\\033[0m'\n"
},
{
"alpha_fraction": 0.6450794339179993,
"alphanum_fraction": 0.6469447016716003,
"avg_line_length": 38.53687286376953,
"blob_id": "640f191b99864a25665d39bf6d4cdecf1cba6001",
"content_id": "189b3423286ae943c28a22ad637dcddabb4213cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13403,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 339,
"path": "/multicraftapi.py",
"repo_name": "Willsr71/MulticraftAPI",
"src_encoding": "UTF-8",
"text": "import requests\nimport hashlib\nimport hmac\nimport json\nfrom util import print_line, colors\n\n\nclass MulticraftAPI:\n multicraft_location = \"\"\n multicraft_user = \"\"\n multicraft_key = \"\"\n debug = False\n\n def __init__(self, location, user, key, debug=False):\n self.multicraft_location = location\n self.multicraft_user = user\n self.multicraft_key = key\n self.debug = debug\n\n def print_debug(self, w):\n if self.debug:\n print_line(w)\n\n def send_request(self, operation, content=None):\n params = {}\n if content is not None:\n params = content\n\n params[\"_MulticraftAPIMethod\"] = operation\n params[\"_MulticraftAPIUser\"] = self.multicraft_user\n\n keystr = \"\"\n\n for param in params:\n keystr += param + str(params[param])\n\n key = hmac.new(self.multicraft_key.encode('utf-8'), keystr.encode('utf-8'), hashlib.sha256).hexdigest()\n params[\"_MulticraftAPIKey\"] = key\n\n request = requests.post(self.multicraft_location, params)\n\n self.print_debug(colors.YELLOW + str(request.elapsed) + colors.END + \" / \")\n self.print_debug(colors.GREEN if request.status_code == 200 else colors.RED)\n self.print_debug(str(request.status_code) + \" \" + request.reason + colors.END)\n\n if not request.status_code == 200:\n self.print_debug(\"\\n\")\n return {}\n\n result = json.loads(request.text)\n\n self.print_debug(\" / \")\n self.print_debug(colors.GREEN + \"success\" if result[\"success\"] else colors.RED + \"error\")\n if not result[\"success\"]:\n self.print_debug(\": \")\n\n error_string = \"\"\n for error in result[\"errors\"]:\n error_string += error + \", \"\n\n self.print_debug(error_string[:-2])\n\n self.print_debug(colors.END + \"\\n\")\n\n return result\n\n # User functions\n\n def list_users(self):\n return self.send_request(\"listUsers\")\n\n def find_users(self, field, value):\n return self.send_request(\"findUsers\", {\"field\": field, \"value\": value})\n\n def get_user(self, user_id):\n return self.send_request(\"getUser\", {\"id\": user_id})\n\n def get_current_user(self):\n return self.send_request(\"getCurrentUser\")\n\n def update_user(self, user_id, field, value):\n return self.send_request(\"updateUser\", {\"id\": user_id, \"field\": field, \"value\": value})\n\n def create_user(self, name, email, password):\n return self.send_request(\"createUser\", {\"name\": name, \"email\": email, \"password\": password})\n\n def delete_user(self, user_id):\n return self.send_request(\"deleteUser\", {\"id\": user_id})\n\n def get_user_role(self, user_id, server_id):\n return self.send_request(\"getUserRole\", {\"user_id\": user_id, \"server_id\": server_id})\n\n def set_user_role(self, user_id, server_id, role):\n return self.send_request(\"setUserRole\", {\"user_id\": user_id, \"server_id\": server_id, \"role\": role})\n\n def get_user_ftp_access(self, user_id, server_id):\n return self.send_request(\"getUserFtpAccess\", {\"user_id\": user_id, \"server_id\": server_id})\n\n def set_user_fep_access(self, user_id, server_id, mode):\n return self.send_request(\"setUserFtpAccess\", {\"user_id\": user_id, \"server_id\": server_id, \"mode\": mode})\n\n def get_user_id(self, name):\n return self.send_request(\"getUserId\", {\"name\": name})\n\n def validate_user(self, name, password):\n return self.send_request(\"validateUser\", {\"name\": name, \"password\": password})\n\n def generate_user_api_key(self, user_id):\n return self.send_request(\"generateUserApiKey\", {\"user_id\": user_id})\n\n def get_user_api_key(self, user_id):\n return self.send_request(\"getUserApiKey\", {\"user_id\": user_id})\n\n def remove_user_api_key(self, user_id):\n return self.send_request(\"removeUserApiKey\", {\"user_id\": user_id})\n\n # Player functions\n\n def list_players(self, server_id):\n return self.send_request(\"listPlayers\", {\"server_id\": server_id})\n\n def find_players(self, server_id, field, value):\n return self.send_request(\"findPlayers\", {\"server_id\": server_id, \"field\": field, \"value\": value})\n\n def get_player(self, player_id):\n return self.send_request(\"getPlayer\", {\"id\": player_id})\n\n def update_player(self, player_id, field, value):\n return self.send_request(\"updatePlayer\", {\"id\": player_id, \"field\": field, \"value\": value})\n\n def create_player(self, server_id, name):\n return self.send_request(\"createPlayer\", {\"server_id\": server_id, \"name\": name})\n\n def delete_player(self, player_id):\n return self.send_request(\"deletePlayer\", {\"id\": player_id})\n\n def assign_player_to_user(self, player_id, user_id):\n return self.send_request(\"assignPlayerToUser\", {\"player_id\": player_id, \"user_id\": user_id})\n\n # Command functions\n\n def list_commands(self, server_id):\n return self.send_request(\"listCommands\", {\"server_id\": server_id})\n\n def find_commands(self, server_id, field, value):\n return self.send_request(\"findCommands\", {\"server_id\": server_id, \"field\": field, \"value\": value})\n\n def get_command(self, command_id):\n return self.send_request(\"getCommand\", {\"id\": command_id})\n\n def update_command(self, command_id, field, value):\n return self.send_request(\"updateCommand\", {\"id\": command_id, \"field\": field, \"value\": value})\n\n def create_command(self, server_id, name, role, chat, response, run):\n return self.send_request(\"createCommand\", {\"server_id\": server_id, \"name\": name, \"role\": role, \"chat\": chat, \"response\": response, \"run\": run})\n\n def delete_command(self, command_id):\n return self.send_request(\"deleteCommand\", {\"id\": command_id})\n\n # Server functions\n\n def list_servers(self):\n return self.send_request(\"listServers\")\n\n def find_servers(self, field, value):\n return self.send_request(\"findServers\", {\"field\": field, \"value\": value})\n\n def list_servers_by_connection(self, connection_id):\n return self.send_request(\"listServersByConnection\", {\"connection_id\", connection_id})\n\n def list_servers_by_owner(self, user_id):\n return self.send_request(\"listServersByOwner\", {\"user_id\": user_id})\n\n def get_server(self, server_id):\n return self.send_request(\"getServer\", {\"id\": server_id})\n\n def update_server(self, server_id, field, value):\n return self.send_request(\"updateServer\", {\"id\": server_id, \"field\": field, \"value\": value})\n\n def create_server_on(self, daemon_id=0, no_commands=0, no_setup_script=0):\n return self.send_request(\"createServerOn\", {\"daemon_id\": daemon_id, \"no_commands\": no_commands, \"no_setup_script\": no_setup_script})\n\n def create_server(self, name=\"\", port=0, players=0, no_setup_script=0):\n return self.send_request(\"createServer\", {\"name\": name, \"port\": port, \"players\": players, \"no_setup_script\": no_setup_script})\n\n def suspend_server(self, server_id, stop=1):\n return self.send_request(\"suspendServer\", {\"id\": server_id, \"stop\": stop})\n\n def resume_server(self, server_id, start=1):\n return self.send_request(\"resumeServer\", {\"id\": server_id, \"start\": start})\n\n def delete_server(self, server_id, delete_dir=\"no\", delete_user=\"no\"):\n return self.send_request(\"deleteServer\", {\"id\": server_id, \"delete_dir\": delete_dir, \"delete_user\": delete_user})\n\n def get_server_status(self, server_id, player_list=0):\n return self.send_request(\"getServerStatus\", {\"id\": server_id, \"player_list\": player_list})\n\n def get_server_owner(self, server_id):\n return self.send_request(\"getServerOwner\", {\"server_id\": server_id})\n\n def set_server_owner(self, server_id, user_id):\n return self.send_request(\"setServerOwner\", {\"server_id\": server_id, \"user_id\": user_id})\n\n def get_server_config(self, server_id):\n return self.send_request(\"getServerConfig\", {\"id\": server_id})\n\n def update_server_config(self, server_id, field, value):\n return self.send_request(\"updateServerConfig\", {\"server_id\": server_id, \"field\": field, \"value\": value})\n\n def start_server_backup(self, server_id):\n return self.send_request(\"startServerBackup\", {\"id\": server_id})\n\n def get_server_backup_status(self, server_id):\n return self.send_request(\"getServerBackupStatus\", {\"id\": server_id})\n\n def start_server(self, server_id):\n return self.send_request(\"startServer\", {\"id\": server_id})\n\n def stop_server(self, server_id):\n return self.send_request(\"stopServer\", {\"id\": server_id})\n\n def restart_server(self, server_id):\n return self.send_request(\"restartServer\", {\"id\": server_id})\n\n def kill_server(self, server_id):\n return self.send_request(\"killServer\", {\"id\": server_id})\n\n def start_all_servers(self):\n return self.send_request(\"startAllServers\")\n\n def stop_all_servers(self):\n return self.send_request(\"stopAllServers\")\n\n def restart_all_servers(self):\n return self.send_request(\"restartAllServers\")\n\n def kill_all_servers(self):\n return self.send_request(\"killAllServers\")\n\n def send_console_command(self, server_id, command):\n return self.send_request(\"sendConsoleCommand\", {\"server_id\": server_id, \"command\": command})\n\n def send_all_console_command(self, command):\n return self.send_request(\"sendAllConsoleCommand\", {\"command\": command})\n\n def run_command(self, server_id, command_id, run_for=0):\n return self.send_request(\"runCommand\", {\"server_id\": server_id, \"command_id\": command_id, \"run_for\": run_for})\n\n def get_server_log(self, server_id):\n return self.send_request(\"getServerLog\", {\"id\": server_id})\n\n def clear_server_log(self, server_id):\n return self.send_request(\"clearServerLog\", {\"id\": server_id})\n\n def get_server_chat(self, server_id):\n return self.send_request(\"getServerChat\", {\"id\": server_id})\n\n def clear_server_chat(self, server_id):\n return self.send_request(\"clearServerChat\", {\"id\": server_id})\n\n def send_server_control(self, server_id, command):\n return self.send_request(\"sendServerControl\", {\"id\": server_id, \"command\": command})\n\n def get_server_resources(self, server_id):\n return self.send_request(\"getServerResources\", {\"id\": server_id})\n\n def move_server(self, server_id, daemon_id):\n return self.send_request(\"moveServer\", {\"server_id\": server_id, \"daemon_id\": daemon_id})\n\n # Daemon functions\n\n def list_connections(self):\n return self.send_request(\"listConnections\")\n\n def find_connections(self, field, value):\n return self.send_request(\"findConnections\", {\"field\": field, \"value\": value})\n\n def get_connection(self, connection_id):\n return self.send_request(\"getConnection\", {\"id\": connection_id})\n\n def remove_connection(self, connection_id):\n return self.send_request(\"removeConnection\", {\"id\", connection_id})\n\n def get_connection_status(self, connection_id):\n return self.send_request(\"getConnectionStatus\", {\"id\": connection_id})\n\n def get_connection_memory(self, connection_id, include_suspended=0):\n return self.send_request(\"getConnectionMemory\", {\"id\": connection_id, \"include_suspended\": include_suspended})\n\n def get_statistics(self, daemon_id=0, include_suspended=0):\n return self.send_request(\"getStatistics\", {\"daemon_id\": daemon_id, \"include_suspended\": include_suspended})\n\n # Settings functions\n\n def list_settings(self):\n return self.send_request(\"listSettings\")\n\n def get_setting(self, key):\n return self.send_request(\"getSetting\", {\"key\": key})\n\n def set_setting(self, key, value):\n return self.send_request(\"setSetting\", {\"key\": key, \"value\": value})\n\n def delete_setting(self, key):\n return self.send_request(\"deleteSetting\", {\"key\": key})\n\n # Schedule functions\n\n def list_schedules(self, server_id):\n return self.send_request(\"listSchedules\", {\"server_id\": server_id})\n\n def find_schedules(self, server_id, field, value):\n return self.send_request(\"findSchedules\", {\"server_id\": server_id, \"field\": field, \"value\": value})\n\n def get_schedule(self, schedule_id):\n return self.send_request(\"getSchedule\", {\"id\": schedule_id})\n\n def update_schedule(self, schedule_id, field, value):\n return self.send_request(\"updateSchedule\", {\"id\": schedule_id, \"field\": field, \"value\": value})\n\n def create_schedule(self, server_id, name, ts, interval, cmd, status, for_):\n return self.send_request(\"createSchedule\", {\"server_id\": server_id, \"name\": name, \"ts\": ts, \"interval\": interval, \"cmd\": cmd, \"status\": status, \"for\": for_})\n\n def delete_schedule(self, server_id):\n return self.send_request(\"deleteSchedule\", {\"id\": server_id})\n\n # Database functions\n\n def get_database_info(self, server_id):\n return self.send_request(\"getDatabaseInfo\", {\"server_id\": server_id})\n\n def create_database(self, server_id):\n return self.send_request(\"createDatabase\", {\"server_id\": server_id})\n\n def change_database_password(self, server_id):\n return self.send_request(\"changeDatabasePassword\", {\"server_id\": server_id})\n\n def delete_database(self, server_id):\n return self.send_request(\"deleteDatabase\", {\"server_id\": server_id})\n"
}
] | 3 |
marcoceppi/findon-cl
|
https://github.com/marcoceppi/findon-cl
|
60f0ab21d3d5e6b25bc2a70cbba59dab4eac435b
|
215e6b3c872c9958dda5e29833ba878ad2c1e28b
|
fce60d8a53a899bd045ec62f490ac5480965b240
|
refs/heads/master
| 2022-07-31T16:45:56.855066 | 2019-11-22T10:54:03 | 2019-11-22T10:57:11 | 223,384,590 | 0 | 0 | null | 2019-11-22T10:55:03 | 2019-11-22T10:57:28 | 2022-07-06T20:23:56 |
Python
|
[
{
"alpha_fraction": 0.6818450689315796,
"alphanum_fraction": 0.6818450689315796,
"avg_line_length": 31.5,
"blob_id": "a9877d73cc2d741bd91f0b7adf487d065e10fd6b",
"content_id": "e0c44534c95e1c52a1082a7ad37c85c2615940b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1691,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 52,
"path": "/tests/test_scrapers_craigslist.py",
"repo_name": "marcoceppi/findon-cl",
"src_encoding": "UTF-8",
"text": "\nimport pytest\nimport asynctest\n\nfrom unittest.mock import patch\n\nfrom findoncl.scrapers.craigslist import Craigslist\n\n\ndef test_Craigslist_build_url():\n c = Craigslist(regions=[], categories=[])\n assert 'https://my-region.craigslist.org/search/scategories' == c.build_url('my-region', 'scategories')\n\n\ndef test_Craigslist():\n c = Craigslist(regions=[], categories='cta')\n assert [] == c.regions\n assert ['cta'] == c.categories\n\n c = Craigslist(regions='washdc', categories=['cta'])\n assert ['washdc'] == c.regions\n assert ['cta'] == c.categories\n\n c = Craigslist(regions='washdc', categories='cta')\n assert ['washdc'] == c.regions\n assert ['cta'] == c.categories\n\n\ndef test_Craigslist_bad_values():\n with pytest.raises(TypeError):\n Craigslist(regions=[], categories=None)\n\n with pytest.raises(TypeError):\n Craigslist(regions=None, categories=[])\n\n\[email protected]\n@patch('findoncl.scrapers.craigslist.ClientSession')\nasync def test_Craigslist_close(mock_client_session):\n mock_client_session.return_value.close = asynctest.CoroutineMock()\n c = Craigslist(regions=[], categories=[])\n await c.close()\n mock_client_session.return_value.close.assert_called()\n\n\[email protected]\nasync def test_Craigslist_run():\n with patch.object(Craigslist, 'search_site', new_class=asynctest.CoroutineMock) as mock_search_site:\n mock_search_site.return_value = ['test', 'success', 'a cl record']\n c = Craigslist(regions=['test'], categories=['electronics'])\n records = await c.run()\n mock_search_site.assert_called_with('test', 'electronics')\n assert ['test', 'success', 'a cl record'] == records\n"
},
{
"alpha_fraction": 0.6202531456947327,
"alphanum_fraction": 0.655063271522522,
"avg_line_length": 18.15151596069336,
"blob_id": "9bd6a12f0c9ae75c919e44b170471292aa8de4e8",
"content_id": "91d8db010287e20d654e086abffd18abe250230a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 33,
"path": "/tox.ini",
"repo_name": "marcoceppi/findon-cl",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist=py3\nskipsdist=True\nminversion=2.9.0\nskip_missing_interpreters=True\n\n[testenv]\ndescription=\n run tests with pytest under {basepython} - the posargs specify the tests to run\n\ndeps=\n -r{toxinidir}/requirements.txt\n asynctest\n pytest-asyncio\n pytest<4.0.0\n pytest-cov>=2.5.1\n pytest-html>=1.14.2\n pytest-mock>=1.6.0\n pytest-profiling>=1.2.6\ncommands=\n pytest -s \\\n --cov-report term \\\n --cov=findoncl \\\n -p no:warnings \\\n {posargs}\n\n[testenv:lint]\ndeps=\n -r{toxinidir}/requirements.txt\n flake8\n\ncommands=\n flake8 --show-source --statistics findoncl tests\n"
},
{
"alpha_fraction": 0.4897959232330322,
"alphanum_fraction": 0.6938775777816772,
"avg_line_length": 15.333333015441895,
"blob_id": "202f9277dbe4b10e5d913a730c014f2af6a2b39e",
"content_id": "dcf97db2f4016d6249a9c650e669ba37b561e4b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "marcoceppi/findon-cl",
"src_encoding": "UTF-8",
"text": "aiohttp==3.6.2\nbeautifulsoup4==4.8.1\nlxml==4.4.1\n"
},
{
"alpha_fraction": 0.5822339653968811,
"alphanum_fraction": 0.5866314768791199,
"avg_line_length": 22.6875,
"blob_id": "9fe63d0d5733663ca262fae4bf975573f90fdf9d",
"content_id": "385f1f3a974b6f72508a0425caeefef7e8c29caa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1137,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 48,
"path": "/setup.py",
"repo_name": "marcoceppi/findon-cl",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport re\n\nfrom pathlib import Path\nfrom typing import List\n\nfrom codecs import open\nfrom setuptools import find_packages, setup\n\n\nhere = Path(__file__).parent\n\n\n# Load the package's __init__.py file as a dictionary.\npkg = {}\nwith open(here / 'findoncl' / '__init__.py', 'r', 'utf-8') as f:\n pkg = {k: v for k, v in re.findall(r\"^(__\\w+__) = \\'(.+)\\'\", f.read(), re.M)}\n\n# Load the README\nreadme = ''\nif os.path.exists(here / 'README.md'):\n with open(here / 'README.md', 'r', 'utf-8') as f:\n readme = f.read()\n\nsetup(\n name=pkg['__title__'],\n version=pkg['__version__'],\n description=pkg['__description__'],\n license=pkg['__license__'],\n long_description=readme,\n long_description_content_type='text/markdown',\n url=pkg['__url__'],\n author=pkg['__author__'],\n author_email=pkg['__author_email__'],\n packages=find_packages(),\n package_data={'': ['LICENSE']},\n python_requires='>=3.6',\n install_requires=[\n\n ],\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Operating System :: OS Independent',\n ],\n zip_safe=False,\n)\n"
},
{
"alpha_fraction": 0.5780590772628784,
"alphanum_fraction": 0.5991561412811279,
"avg_line_length": 32.71428680419922,
"blob_id": "334fbef9b9784d0286a1d524da13a1c35398f83a",
"content_id": "29ff61db3c4d477cab9329de4f2f476ac910d92d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 7,
"path": "/findoncl/__init__.py",
"repo_name": "marcoceppi/findon-cl",
"src_encoding": "UTF-8",
"text": "\n__title__ = 'findon-cl'\n__version__ = '0.0.1'\n__description__ = 'Find things on CL!'\n__author__ = 'Marco Ceppi'\n__author_email__ = '[email protected]'\n__url__ = 'https://github.com/marcoceppi/findon-cl'\n__license__ = 'GNU General Public License v3.0'\n"
},
{
"alpha_fraction": 0.556620717048645,
"alphanum_fraction": 0.5572198629379272,
"avg_line_length": 29.898147583007812,
"blob_id": "5f9d6a462a383e3396c9b1163c6297523882dba6",
"content_id": "973e4749eed133d26bd81126fbe79ba6d9a34540",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3338,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 108,
"path": "/findoncl/scrapers/craigslist.py",
"repo_name": "marcoceppi/findon-cl",
"src_encoding": "UTF-8",
"text": "\nimport asyncio\nimport collections\n\nfrom aiohttp import ClientSession\nfrom bs4 import BeautifulSoup\n\n\nclass CraigslistRecord(collections.UserDict):\n @classmethod\n def from_dom(cls, *, dom=None, region=None, category=None):\n data = {\n 'id': None,\n 'title': dom.find(id='titletextonly').string,\n 'region': region,\n 'category': category,\n 'price': None,\n 'url': dom.find(rel='canonical').get('href'),\n 'description': dom.find(id='postingbody').get_text(),\n 'meta': {},\n 'created': dom.find('time').get('datetime'),\n }\n\n price = dom.find(class_='price')\n data['price'] = price.string if price else None\n\n for postinfo in dom.find_all(class_='postinginfo'):\n postinfo_text = postinfo.string\n\n if not postinfo_text:\n continue\n\n if 'post id:' in postinfo_text:\n # todo: cast to int?\n data['id'] = postinfo_text.replace('post id: ', '')\n\n for attrgroup in dom.find_all(class_='attrgroup'):\n for attr in attrgroup.find_all('span'):\n attr_data = attr.get_text()\n\n if not attr_data:\n continue\n\n if ':' in attr_data:\n key, val = attr_data.split(': ', 1)\n else:\n key = '_title'\n val = attr_data\n\n data['meta'][key] = val\n\n return cls(**data)\n\n\nclass Craigslist():\n def __init__(self, regions, categories):\n\n if isinstance(regions, str):\n regions = [regions]\n\n if isinstance(categories, str):\n categories = [categories]\n\n if not isinstance(regions, collections.Sequence):\n raise TypeError('regions must be a list type')\n\n if not isinstance(categories, collections.Sequence):\n raise TypeError('cagetories must be a list type')\n\n self.regions = regions\n self.categories = categories\n self.session = ClientSession()\n\n async def run(self):\n sites = []\n for region in self.regions:\n for category in self.categories:\n sites.append(self.search_site(region, category))\n\n results = await asyncio.gather(*sites)\n return [item for result in results for item in result]\n\n def build_url(self, region, category):\n return f'https://{region}.craigslist.org/search/{category}'\n\n async def search_site(self, region, category):\n url = self.build_url(region, category)\n\n site = await self.fetch(url)\n advert_links = self.parse_search(site)\n data = await asyncio.gather(*[self.fetch(link) for link in advert_links])\n return [CraigslistRecord.from_dom(dom=dom, region=region, category=category) for dom in data]\n\n async def close(self):\n await self.session.close()\n\n async def fetch(self, url):\n async with self.session.get(url) as response:\n text = await response.text()\n return BeautifulSoup(text, 'lxml')\n\n def parse(self, dom):\n links = []\n for link in dom.find_all('a'):\n if 'hdrlnk' not in link.get('class', []):\n continue\n links.append(link.get('href'))\n\n return links\n"
}
] | 6 |
Nikita-3013/NWS
|
https://github.com/Nikita-3013/NWS
|
2cf46ff284f1cd7114c9c51c27e0f71de2d4c79e
|
82a7daa4010a5c72ef80723931476719fa143cec
|
f0fb9e18488bef3b25d8281745367604afb685ba
|
refs/heads/main
| 2023-08-22T04:52:24.314555 | 2021-10-12T16:15:52 | 2021-10-12T16:15:52 | 410,022,754 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6284403800964355,
"alphanum_fraction": 0.6353210806846619,
"avg_line_length": 24.705883026123047,
"blob_id": "f840e44b14ab337464f5dd659022be08481f1592",
"content_id": "b8a2eb3497e582e74ad66c4c400efaad8bbe18fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 17,
"path": "/tutorial/sampleapp/models.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "from django.db import models\nimport json\n\n# Create your models here.\nclass City():\n def __init__(self,name,latitude=0,longitude=0,temperature=0):\n self.name = name\n self.latitude = float(latitude)\n self.longitude = float(longitude)\n self.temperature = float(temperature)\n\n\nclass GridPoint():\n def __init__(self,Id,GridX,GridY):\n self.Id = Id\n self.GridX = GridX\n self.GridY = GridY"
},
{
"alpha_fraction": 0.6431924700737,
"alphanum_fraction": 0.6995305418968201,
"avg_line_length": 35.69565200805664,
"blob_id": "d1b318ea4a19741a4661a76ecbda67bf848f585e",
"content_id": "da57177449a3da2631d72904f103d7f4e2841695",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 23,
"path": "/tutorial/sampleapp/tests/test_NWSProvider.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "from unittest import TestCase\nfrom sampleapp.NWSProvider.NWSDataProvider import *\nfrom sampleapp.models import City\n\nclass NWSDataProviderTestCase(TestCase):\n def test_get_valid_gridpoint(self):\n gridpoint = getGridPoint(40.6943,-73.9249)\n self.assertEqual(gridpoint.Id,'OKX')\n \n def test_get_invalid_gridpoint(self):\n gridpoint = getGridPoint(40.6943,40.6943)\n self.assertIsNone(gridpoint)\n\n def test_get_forecast(self):\n cityobj = City('New York',40.6943,-73.9249)\n resultcity = getforecastByCity(cityobj)\n self.assertIsInstance(resultcity,City)\n self.assertIsNotNone(resultcity.temperature)\n\n def test_get_invalidObj_forecast(self):\n cityobj = City('New York',40.6943,40.6943)\n resultcity = getforecastByCity(cityobj)\n self.assertIsNone(resultcity)\n "
},
{
"alpha_fraction": 0.78125,
"alphanum_fraction": 0.78125,
"avg_line_length": 15,
"blob_id": "114fb79c3566ae96d70aceacf4b2801f86708c6a",
"content_id": "a3eb8f2faf558f8278c2a49214b71e8ed11036a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "# NWS\n National Weather Service\n"
},
{
"alpha_fraction": 0.6721014380455017,
"alphanum_fraction": 0.6811594367027283,
"avg_line_length": 31.52941131591797,
"blob_id": "5641dc147954552ffab73e69475007f2252f57b8",
"content_id": "515bd36e339ade228337560804167856f0e89ad7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/tutorial/sampleapp/tests/test_citycrud.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "from unittest import TestCase\nfrom sampleapp.database.citiesCrud import *\n\nclass citiescrudTestCase(TestCase):\n def test_getCities(self):\n citylist = getCities()\n self.assertGreater(len(citylist) , 0)\n\n def test_getByCityName(self):\n obj = getByCityName('New York')\n self.assertNotEqual(obj.latitude ,0)\n self.assertNotEqual(obj.longitude,0)\n \n def test_invalid_getByCityName(self):\n obj = getByCityName('New Yor')\n self.assertEqual(obj.latitude ,0)\n self.assertEqual(obj.longitude,0)"
},
{
"alpha_fraction": 0.6421621441841125,
"alphanum_fraction": 0.6432432532310486,
"avg_line_length": 35.84000015258789,
"blob_id": "d0ccc6738126a0bce717af434a320524de050d53",
"content_id": "f568c08070ef8ef0b3e9dc06d7ce9301e78321ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 25,
"path": "/tutorial/sampleapp/NWSProvider/NWSDataProvider.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "import requests\nfrom sampleapp.models import *\n\ndef getforecastByCity(cityobj):\n try:\n gridpoint = getGridPoint(cityobj.latitude,cityobj.longitude)\n url='https://api.weather.gov/gridpoints/'+ str(gridpoint.Id)+'/'+str(gridpoint.GridX)+','+ str(gridpoint.GridY)+'/forecast'\n response = requests.get(url)\n jsondata = response.json()\n temp = jsondata['properties']['periods'][0]['temperature']\n city = City(cityobj.name,cityobj.latitude,cityobj.longitude,temp)\n return city\n except:\n return None\n\n\ndef getGridPoint(lat,lng):\n try:\n url='https://api.weather.gov/points/'+ str(lat) +','+ str(lng)\n response = requests.get(url)\n jsondata = response.json()\n gridpoint = GridPoint(jsondata['properties']['gridId'],jsondata['properties']['gridX'],jsondata['properties']['gridY'])\n return gridpoint\n except:\n return None\n "
},
{
"alpha_fraction": 0.4959128201007843,
"alphanum_fraction": 0.6975476741790771,
"avg_line_length": 15.681818008422852,
"blob_id": "cf8759e5d54d63d9c54ceea9aa2509c72005f2db",
"content_id": "28f5835aec9815394cc625cab510b49e5df9305e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 22,
"path": "/tutorial/requirements.txt",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "asgiref==3.4.1\ncertifi==2021.5.30\ncharset-normalizer==2.0.6\ncoverage==5.5\nDjango==3.2.7\ndjango-geojson==3.2.0\ndjango-leaflet==0.28.1\ndjango-mssql==1.8\ndjango-utils-six==2.0\ndocopt==0.6.2\nidna==3.2\njsonfield==3.1.0\nmssql-django==1.0\nPillow==8.3.2\npipreqs==0.4.10\npsycopg2==2.9.1\npyodbc==4.0.32\npytz==2021.1\nrequests==2.26.0\nsqlparse==0.4.2\nurllib3==1.26.7\nyarg==0.1.9\n"
},
{
"alpha_fraction": 0.6882352828979492,
"alphanum_fraction": 0.6882352828979492,
"avg_line_length": 31.653846740722656,
"blob_id": "90c470af531fb6633f2c7918424bf2f04e05fa38",
"content_id": "e752dc919f7e132b63742ec53d338d1a08c206be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 850,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 26,
"path": "/tutorial/sampleapp/views.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom sampleapp.models import City\nfrom sampleapp.database.citiesCrud import *\nfrom sampleapp.NWSProvider.NWSDataProvider import *\nimport json\n# Create your views here.\ndef welcome(request):\n return render(request, 'website/welcome.html',{'cities': getCities()})\n\ndef getforecast(request):\n jsondata = {}\n try:\n cityName = request.GET.get('cityName').strip()\n # get city object db by city name\n Cityobj = getByCityName(cityName)\n \n data = getforecastByCity(Cityobj)\n tempObj ={'cityName':data.name,'lat':data.latitude,'long':data.longitude,'temp':data.temperature} \n \n \n jsondata = json.dumps(tempObj)\n except:\n print(\"error\")\n\n return HttpResponse(jsondata, content_type='application/json')\n\n"
},
{
"alpha_fraction": 0.6864111423492432,
"alphanum_fraction": 0.6916376352310181,
"avg_line_length": 34.8125,
"blob_id": "551cae52647c574ef736987b570c8c8fd71645b0",
"content_id": "8865e7cd5404ace18f55c3738c15c795d7a1ca1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 574,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 16,
"path": "/tutorial/sampleapp/tests/test_views.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "from unittest import TestCase\nfrom sampleapp.views import *\nfrom django.test.client import RequestFactory\n\nclass viewsTestCase(TestCase):\n def setUp(self):\n # Every test needs access to the request factory.\n self.factory = RequestFactory()\n\n def test_getforecast(self):\n # Create an instance of a GET request.\n request = self.factory.get('getforecast',{'cityName':'New York'})\n\n # Test my_view() as if it were deployed at /customer/details\n response = getforecast(request)\n self.assertEqual(response.status_code, 200)\n\n"
},
{
"alpha_fraction": 0.5909090638160706,
"alphanum_fraction": 0.6699604988098145,
"avg_line_length": 32.733333587646484,
"blob_id": "416af130599de4985092f498f5d4fb7c86c3cfa5",
"content_id": "734734cd29253fc0c47b7fbfd00c25b6a2eef1e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 15,
"path": "/tutorial/sampleapp/tests/test_models.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "from unittest import TestCase\nfrom sampleapp.models import City,GridPoint\n\nclass testmodelsTestCase(TestCase):\n def test_grid(self):\n obj = GridPoint('12',1234,123)\n self.assertEqual(obj.Id,'12')\n self.assertEqual(obj.GridX,1234)\n self.assertEqual(obj.GridY,123)\n\n def test_city(self):\n obj = City('New York',40.1234,72.124)\n self.assertEqual(obj.name,'New York')\n self.assertEqual(obj.latitude,40.1234)\n self.assertEqual(obj.longitude,72.124)\n"
},
{
"alpha_fraction": 0.5955497622489929,
"alphanum_fraction": 0.5994764566421509,
"avg_line_length": 25.310344696044922,
"blob_id": "95d57e76f4ae588df58e528c27ff4200094aedb7",
"content_id": "46e97046e9dd07c591178a96fa40e724d745541d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 764,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 29,
"path": "/tutorial/sampleapp/database/citiesCrud.py",
"repo_name": "Nikita-3013/NWS",
"src_encoding": "UTF-8",
"text": "from django.db import connection\nfrom sampleapp.models import *\n\ndef getCities():\n cursor = connection.cursor()\n cityList=[]\n try:\n cursor.execute('SELECT * FROM public.\"cities\"')\n \n result_set = cursor.fetchall()\n for row in result_set:\n cityList.append(City(row[0]))\n finally:\n cursor.close()\n return cityList\n\ndef getByCityName(cityName):\n cursor = connection.cursor()\n CityObj=City(cityName)\n try:\n cursor.execute('SELECT * FROM public.\"cities\" WHERE public.\"cities\".\"city\"=%s',[cityName])\n \n result_set = cursor.fetchall()\n \n for row in result_set:\n CityObj=City(cityName,row[1],row[2])\n finally:\n cursor.close()\n return CityObj\n\n"
}
] | 10 |
MehrdadMoghimi/Files
|
https://github.com/MehrdadMoghimi/Files
|
3c7d977d88b47088c8957fddccaa62f4d322eef8
|
8b253718de6b1340ef71726e9e9297e69e8398cc
|
9a5faf0642103559f2434ab258b37796a910c2f2
|
refs/heads/master
| 2023-08-13T13:20:45.823033 | 2021-10-10T19:40:22 | 2021-10-10T19:40:22 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5970303416252136,
"alphanum_fraction": 0.6132343411445618,
"avg_line_length": 41.63380432128906,
"blob_id": "962396869f4022ed59c2ae65ecb7ec5273d44f9c",
"content_id": "b0b5c822273809416643e28384b938711a6304e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15490,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 355,
"path": "/Sener.py",
"repo_name": "MehrdadMoghimi/Files",
"src_encoding": "UTF-8",
"text": "from scipy.ndimage.measurements import label\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nfrom arch import arch_model\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport sys\r\nimport time\r\n\r\n\r\ndef safe_space_PM(returns, VARs):\r\n PM = sum((returns < 0) * (returns > VARs) * (returns - VARs))\r\n return PM\r\n\r\n\r\ndef violation_space_PM(returns, VARs):\r\n e = VARs - returns\r\n violations = e > 0\r\n labels = label(violations)\r\n num_of_clusters = labels[1]\r\n clusters = [e[labels[0] == i] for i in range(1, num_of_clusters + 1)]\r\n cluster_centers = [np.mean(np.where(labels[0] == i)[0]) for i in range(1, num_of_clusters + 1)]\r\n cluster_quantity = [np.prod(1 + c) for c in clusters]\r\n PM = 0\r\n for i in range(num_of_clusters):\r\n for j in range(i + 1, num_of_clusters):\r\n PM = PM + (cluster_quantity[i] * cluster_quantity[j] - 1) / (cluster_centers[j] - cluster_centers[i])\r\n return PM\r\n\r\n\r\ndef penalization_measure(returns, VARs, alpha):\r\n PM = ((1 - alpha / 100.0) * violation_space_PM(returns, VARs) + (alpha / 100.0) * safe_space_PM(returns,\r\n VARs)) / np.sum(\r\n returns < 0)\r\n return PM\r\n\r\n\r\ndef HELP(returns):\r\n arch = arch_model(returns, vol='Garch', p=1, o=0, q=1, dist='skewt')\r\n arch_fit = arch.fit(disp='off', last_obs='2017-12-31')\r\n arch_forecast = arch_fit.forecast(start='2018-1-1')\r\n cond_mean = arch_forecast.mean['2018':]\r\n cond_var = arch_forecast.variance['2018':]\r\n q = arch.distribution.ppf([0.01, 0.05], parameters=arch_fit.params[-2:])\r\n print(q)\r\n value_at_risk = -cond_mean.values - np.sqrt(cond_var).values * q[None, :]\r\n value_at_risk = pd.DataFrame(value_at_risk, columns=['1%', '5%'], index=cond_var.index)\r\n return value_at_risk\r\n\r\n\r\ndef forecast_std(returns, weights, window_length, test_len, volatility_model='GARCH', dist='Normal'):\r\n # volatility_model = {GARCH, EGARCH, GJR-GARCH}\r\n # dist = {Normal, t, skewt, ged}\r\n portfolio_returns = returns.dot(weights)\r\n portfolio_returns = portfolio_returns * 100.0\r\n forecast_std_arch = np.zeros(test_len)\r\n forecast_mean_arch = np.zeros(test_len)\r\n print('\\n' + volatility_model + ':')\r\n if volatility_model == 'GJR-GARCH':\r\n for j in range(0, test_len):\r\n loc = returns.shape[0] - test_len + j\r\n progressBar(j, test_len, bar_length=20)\r\n window = portfolio_returns.iloc[loc - window_length:loc]\r\n arch = arch_model(window, p=1, o=1, q=1)\r\n arch_fit = arch.fit(disp=\"off\")\r\n arch_forecast = arch_fit.forecast(horizon=1)\r\n forecast_mean_arch[j] = arch_forecast.mean.iloc[-1, 0]\r\n forecast_std_arch[j] = np.sqrt(arch_forecast.variance.iloc[-1, 0])\r\n else:\r\n for j in range(0, test_len):\r\n loc = returns.shape[0] - test_len + j\r\n progressBar(j, test_len, bar_length=20)\r\n window = portfolio_returns.iloc[loc - window_length:loc]\r\n arch = arch_model(window, mean='AR', dist=dist, vol=volatility_model, p=1, q=1)\r\n arch_fit = arch.fit(disp=\"off\")\r\n arch_forecast = arch_fit.forecast(horizon=1)\r\n forecast_mean_arch[j] = arch_forecast.mean.iloc[-1, 0]\r\n forecast_std_arch[j] = np.sqrt(arch_forecast.variance.iloc[-1, 0])\r\n forecast_mean_arch = forecast_mean_arch / 100.0\r\n forecast_std_arch = forecast_std_arch / 100.0\r\n return forecast_mean_arch, forecast_std_arch\r\n\r\n\r\ndef forecast_mean(portfolio_returns, window_length):\r\n portfolio_returns = portfolio_returns * 100.0\r\n test_len = portfolio_returns.shape[0] - window_length\r\n forecast = np.zeros(test_len)\r\n print('\\nForecast mean ARIMA:')\r\n for j in range(0, test_len):\r\n progressBar(j, test_len, bar_length=20)\r\n window = portfolio_returns[j:j + window_length]\r\n arima = ARIMA(window, order=(1, 1, 1))\r\n arima_fit = arima.fit(disp=0)\r\n arima_output = arima_fit.forecast()\r\n forecast[j] = arima_output[0]\r\n forecast = forecast / 100.0\r\n return forecast\r\n\r\n\r\ndef calculate_Var_Covar_VAR(returns, weights, window_length, test_len, alpha):\r\n t = time.time()\r\n std_VaR = np.zeros(test_len)\r\n print('\\nVar Covar:')\r\n for j in range(0, test_len):\r\n loc = returns.shape[0] - test_len + j\r\n progressBar(j, test_len, bar_length=20)\r\n window = returns.iloc[loc - window_length:loc, :]\r\n cov_matrix = window.cov()\r\n avg_rets = window.mean()\r\n port_mean = avg_rets.dot(weights)\r\n port_stdev = np.sqrt(weights.T.dot(cov_matrix).dot(weights))\r\n std_VaR[j] = norm.ppf(alpha / 100.0, port_mean, port_stdev)\r\n print('\\nVar_Covar time: {}'.format(round(time.time() - t, 2)))\r\n return std_VaR\r\n\r\n\r\ndef calculate_RiskMetrics_VAR(returns, weights, test_len, alpha):\r\n t = time.time()\r\n lambda_risk_metric = 0.94\r\n portfolio_returns = returns.dot(weights)\r\n forecast_std_risk_metric = pd.Series(portfolio_returns.squeeze()).ewm(alpha=1 - lambda_risk_metric).std().shift(periods=1)\r\n forecast_std_test = forecast_std_risk_metric[-test_len:]\r\n risk_metric_VaR = norm.ppf(alpha / 100.0) * forecast_std_test\r\n print('\\nRiskMetrics time: {}'.format(round(time.time() - t, 2)))\r\n return risk_metric_VaR\r\n\r\n\r\ndef calculate_GARCH_VAR(returns, weights, window_length, test_len, alpha):\r\n t = time.time()\r\n forecast_mean_garch, forecast_std_garch = forecast_std(returns, weights, window_length, test_len, volatility_model='GARCH')\r\n garch_VaR = forecast_mean_garch + norm.ppf(alpha / 100.0) * forecast_std_garch\r\n print('\\nGARCH time: {}'.format(round(time.time() - t, 2)))\r\n return garch_VaR\r\n\r\n\r\ndef calculate_EGARCH_VAR(returns, weights, window_length, test_len, alpha):\r\n t = time.time()\r\n forecast_mean_egarch, forecast_std_egarch = forecast_std(returns, weights, window_length, test_len, volatility_model='EGARCH')\r\n egarch_VaR = forecast_mean_egarch + norm.ppf(alpha / 100.0) * forecast_std_egarch\r\n print('\\nEGARCH time: {}'.format(round(time.time() - t, 2)))\r\n return egarch_VaR\r\n\r\n\r\ndef calculate_GJR_GARCH_VAR(returns, weights, window_length, test_len, alpha):\r\n t = time.time()\r\n forecast_mean_tarch, forecast_std_tarch = forecast_std(returns, weights, window_length, test_len, volatility_model='GJR-GARCH')\r\n harch_VaR = forecast_mean_tarch + norm.ppf(alpha / 100.0) * forecast_std_tarch\r\n print('\\nGJR GARCH time: {}'.format(round(time.time() - t, 2)))\r\n return harch_VaR\r\n\r\n\r\ndef calculate_Historical_VAR(returns, weights, window_length, test_len, alpha):\r\n t = time.time()\r\n hist_VaR = np.zeros((test_len, 1))\r\n portfolio_returns = returns.dot(weights)\r\n print('\\nHistorical:')\r\n for j in range(test_len):\r\n progressBar(j, test_len, bar_length=20)\r\n window = portfolio_returns[j:j + window_length]\r\n hist_VaR[j] = np.percentile(window, alpha)\r\n print('\\nHistorical time: {}'.format(round(time.time() - t, 2)))\r\n return hist_VaR\r\n\r\n\r\ndef calculate_Filtered_Historical_VAR(returns, weights, window_length, test_len, alpha, forecast_mean, forecast_std):\r\n t = time.time()\r\n f_hist_VaR = np.zeros((test_len, 1))\r\n portfolio_returns = returns.dot(weights)\r\n print('\\nFiltered Historical:')\r\n for j in range(test_len):\r\n loc = returns.shape[0] - test_len + j\r\n progressBar(j, test_len, bar_length=20)\r\n window = portfolio_returns.iloc[loc - window_length:loc]\r\n MEAN = forecast_mean[j]\r\n STD = forecast_std[j]\r\n filtered_window = MEAN + (window - np.mean(window)) * (STD / np.std(window))\r\n f_hist_VaR[j] = np.percentile(filtered_window, alpha)\r\n print('\\nFiltered Historical time: {}'.format(round(time.time() - t, 2)))\r\n return f_hist_VaR\r\n\r\n\r\ndef calculate_MonteCarlo_VAR(returns, weights, window_length, test_len, alpha):\r\n t = time.time()\r\n n_samples = 10000\r\n mc_VaR = np.zeros(test_len)\r\n print('\\nMonte Carlo:')\r\n for j in range(0, test_len):\r\n loc = returns.shape[0] - test_len + j\r\n progressBar(j, test_len, bar_length=20)\r\n window = returns.iloc[loc - window_length:loc, :]\r\n log_return = np.random.multivariate_normal(window.mean() - (window.std() ** 2) / 2, window.cov(), size=n_samples)\r\n port_simulations = (np.exp(log_return) - 1).dot(weights)\r\n mc_VaR[j] = np.percentile(port_simulations, alpha)\r\n print('\\nMonte Carlo time: {}'.format(round(time.time() - t, 2)))\r\n return mc_VaR\r\n\r\n\r\ndef calculate_CAViaR_Sym_VAR():\r\n return 0\r\n\r\n\r\ndef calculate_CAViaR_Asym_VAR():\r\n return 0\r\n\r\n\r\ndef calculate_CAViaR_indirect_GARCH_VAR():\r\n return 0\r\n\r\n\r\ndef calculate_CAViaR_adaptive_VAR():\r\n return 0\r\n\r\n\r\ndef calculate_EVT_VAR():\r\n return 0\r\n\r\n\r\ndef calculate_var_models(returns, weights, window_length, test_len, alpha, forecast_mean, forecast_std):\r\n var_models = pd.DataFrame()\r\n # var_models['CAViaR_Sym'] = calculate_CAViaR_Sym_VAR(portfolio_returns, window_length, alpha)\r\n # var_models['CAViaR_Asym'] = calculate_CAViaR_Asym_VAR(portfolio_returns, window_length, alpha)\r\n # var_models['CAViaR_indirect_GARCH'] = calculate_CAViaR_indirect_GARCH_VAR(portfolio_returns, window_length, alpha)\r\n # var_models['CAViaR_adaptive'] = calculate_CAViaR_adaptive_VAR(portfolio_returns, window_length, alpha)\r\n var_models['RiskMetrics'] = calculate_RiskMetrics_VAR(returns, weights, test_len, alpha)\r\n var_models['Variance - Covariance'] = calculate_Var_Covar_VAR(returns, weights, round(window_length/10), test_len, alpha)\r\n var_models['Historical'] = calculate_Historical_VAR(returns, weights, round(window_length/4), test_len, alpha)\r\n var_models['Filtered Historical'] = calculate_Filtered_Historical_VAR(returns, weights, window_length, test_len, alpha, forecast_mean, forecast_std)\r\n var_models['Monte Carlo'] = calculate_MonteCarlo_VAR(returns, weights, round(window_length/10), test_len, alpha)\r\n var_models['GARCH'] = calculate_GARCH_VAR(returns, weights, window_length, test_len, alpha)\r\n var_models['E-GARCH'] = calculate_EGARCH_VAR(returns, weights, window_length, test_len, alpha)\r\n # var_models['GJR_GARCH'] = calculate_GJR_GARCH_VAR(portfolio_returns, window_length, alpha)\r\n # var_models['EVT'] = calculate_EVT_VAR(portfolio_returns, window_length, alpha)\r\n return var_models\r\n\r\n\r\ndef calculate_var_models_pm(test_returns, var_models, alpha):\r\n var_models_pm = pd.DataFrame(columns=['name', 'PM', 'ratio'])\r\n for column in var_models.columns:\r\n var_models_pm = var_models_pm.append(\r\n {'name': column, 'PM': penalization_measure(test_returns, var_models[column], alpha)}, ignore_index=True)\r\n var_models_pm['ratio'] = var_models_pm['PM'] / sum(var_models_pm['PM'])\r\n var_models_pm.set_index('name', inplace=True, drop=True)\r\n return var_models_pm\r\n\r\n\r\ndef plot_all(test_returns, var_models, ratio=1.6):\r\n for column in var_models.columns:\r\n plot(test_returns.values, var_models[column].values, file_name=column, ratio=ratio)\r\n\r\n\r\ndef predictive_ability_test(test_returns, var_models, alpha, loss_func, beta=0.001):\r\n var_models_error = var_models.subtract(test_returns, axis=0)\r\n # loss is a function of errors, it can be abs or power of 2\r\n if loss_func == 'mse':\r\n var_models_loss = np.power(var_models_error, 2)\r\n elif loss_func == 'abs':\r\n var_models_loss = np.abs(var_models_error)\r\n elif loss_func == 'sarma':\r\n var_models_loss = pd.DataFrame(index=var_models.index, columns=var_models.columns)\r\n for column in var_models.columns:\r\n var_model = var_models[column]\r\n sarma = []\r\n for i in range(len(var_model)):\r\n if test_returns[i] < var_model[i]:\r\n sarmaLoss = (test_returns[i] - var_model[i]) ** 2\r\n else:\r\n sarmaLoss = beta*np.abs(test_returns[i] - var_model[i])\r\n sarma.append(sarmaLoss)\r\n var_models_loss[column] = sarma\r\n elif loss_func == 'regulatory':\r\n var_models_loss = ((np.repeat(test_returns.values.reshape(-1, 1), var_models.shape[1], axis=1) < var_models) * 1 - alpha / 100) * var_models_error\r\n elif loss_func == 'quantile':\r\n var_models_loss = pd.DataFrame(index=var_models.index, columns=var_models.columns)\r\n for column in var_models.columns:\r\n var_model = var_models[column]\r\n QL = []\r\n for i in range(len(var_model) - 1):\r\n if test_returns[i] < var_model[i]:\r\n QuantileLoss = (test_returns[i] - var_model[i]) ** 2\r\n else:\r\n QuantileLoss = (test_returns[i + 1:].quantile(alpha / 100) - var_model[i]) ** 2\r\n QL.append(QuantileLoss)\r\n QL.append((test_returns[-1] - var_model[-1]) ** 2)\r\n var_models_loss[column] = QL\r\n else:\r\n return \"loss function must be one of quantile, mse, abs or regulatory\"\r\n kappa = var_models_loss.div(np.sum(var_models_loss, axis=1), axis=0)\r\n W = np.sum(kappa > (1 / var_models_loss.shape[1]))\r\n p = 0.5\r\n T = var_models.shape[0]\r\n W_hat = (W - p * T) / np.sqrt(p * (1 - p) * T)\r\n PAT = W_hat.to_frame(name='W_hat')\r\n PAT['p-value'] = [norm.cdf(x) for x in PAT['W_hat']]\r\n return PAT\r\n\r\n\r\ndef plot(returns, VARs, file_name=None, ratio=1.6):\r\n # Re-add the time series index\r\n r = pd.Series(returns.squeeze())\r\n q = pd.Series(VARs.squeeze())\r\n\r\n sns.set_context(\"paper\")\r\n sns.set_style(\"whitegrid\", {\"font.family\": \"serif\", \"font.serif\": \"Computer Modern Roman\", \"text.usetex\": True})\r\n\r\n fig, ax = plt.subplots(figsize=(10*ratio, 10))\r\n ax.spines[\"top\"].set_linewidth(2)\r\n ax.spines[\"top\"].set_color(\"black\")\r\n ax.spines[\"bottom\"].set_linewidth(2)\r\n ax.spines[\"bottom\"].set_color(\"black\")\r\n ax.spines[\"left\"].set_linewidth(2)\r\n ax.spines[\"left\"].set_color(\"black\")\r\n ax.spines[\"right\"].set_linewidth(2)\r\n ax.spines[\"right\"].set_color(\"black\")\r\n ax.tick_params(axis='x', labelsize=20)\r\n ax.tick_params(axis='y', labelsize=20) \r\n ax.set_xlabel('Days', fontsize=20)\r\n ax.set_ylabel('Returns', fontsize=20)\r\n ax.grid(False)\r\n \r\n # Hits\r\n if len(r[r <= q]) > 0:\r\n r[r <= q].plot(ax=ax, color=\"red\", marker=\"o\", ls=\"None\")\r\n for h in r[r <= q].index:\r\n plt.axvline(h, color=\"black\", alpha=0.4, linewidth=1, zorder=0)\r\n\r\n # Positive returns\r\n if len(r[q < r]) > 0:\r\n r[q < r].plot(ax=ax, color=\"green\", marker=\"o\", ls=\"None\")\r\n\r\n # Negative returns but no hit\r\n if len(r[(q <= r) & (r <= 0)]) > 0:\r\n r[(q <= r) & (r <= 0)].plot(ax=ax, color=\"orange\", marker=\"o\", ls=\"None\")\r\n\r\n # VaR\r\n q.plot(ax=ax, grid=False, color=\"black\", rot=0)\r\n \r\n plt.tight_layout()\r\n \r\n #sns.despine()\r\n if file_name is None:\r\n plt.show()\r\n else:\r\n plt.savefig(file_name + '.pdf', dpi=300)\r\n plt.close(\"all\")\r\n\r\n\r\ndef progressBar(value, end_value, bar_length=20):\r\n percent = float(value) / end_value\r\n arrow = '-' * int(round(percent * bar_length) - 1) + '>'\r\n spaces = ' ' * (bar_length - len(arrow))\r\n\r\n sys.stdout.write(\"\\rCompleted: [{0}] {1}%\".format(arrow + spaces, int(round(percent * 100))))\r\n sys.stdout.flush()\r\n"
},
{
"alpha_fraction": 0.4043442904949188,
"alphanum_fraction": 0.43175745010375977,
"avg_line_length": 36.954750061035156,
"blob_id": "1f4c81485e356ed36359ffcd16176f9fd0ed68dc",
"content_id": "f693ca7d103c50c124bd75b57c15d34c6a5d4633",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8609,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 221,
"path": "/VaRBacktest.py",
"repo_name": "MehrdadMoghimi/Files",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 10 13:18:10 2018\r\n\r\n@author: zhli6157\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\n\r\n# ==============================================================================\r\n# Kupiec Uncondition Coverage Backtesting, Proportion of Failures(POF)\r\n# Defined as UCoverage\r\n# UCoverage(Returns, Value at Risk, Confidence Level of VaR)\r\n# ==============================================================================\r\n\r\ndef UCoverage(Returns, VaR, ConfidenceLevel):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Number_of_Fail = len(Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]])\r\n N = Number_of_Fail\r\n T = len(Compare)\r\n t = (1 - N / T) ** (T - N) * (N / T) ** N\r\n c = ((ConfidenceLevel) ** (T - N)) * ((1 - ConfidenceLevel) ** N)\r\n Likelihood_Ratio = 2 * np.log(t) - 2 * np.log(c)\r\n return Likelihood_Ratio, 1-stats.chi2.cdf(Likelihood_Ratio, 1)\r\n\r\n\r\n# ==============================================================================\r\ndef FailRate(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Number_of_Fail = len(Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]])\r\n N = Number_of_Fail\r\n T = len(Compare)\r\n FailRate = N / T\r\n return FailRate\r\n\r\n\r\n# ==============================================================================\r\n# Christoffersen's Interval Forecast Tests, Conditional Coverage Backtesting\r\n# Defined as LRCCI\r\n# LRCCI(Returns, Value at Risk, Confidence Level of VaR)\r\n# ==============================================================================\r\ndef LRCCI(Returns, VaR):\r\n LRCC = pd.concat([Returns, VaR], axis=1)\r\n TF = LRCC.iloc[:, 0] > LRCC.iloc[:, 1]\r\n n00 = 0\r\n n10 = 0\r\n n01 = 0\r\n n11 = 0\r\n for i in range(len(TF) - 1):\r\n if TF[i] == True and TF[i + 1] == True:\r\n n00 = n00 + 1\r\n for m in range(len(TF) - 1):\r\n if TF[m] == False and TF[m + 1] == True:\r\n n10 = n10 + 1\r\n for q in range(len(TF) - 1):\r\n if TF[q] == True and TF[q + 1] == False:\r\n n01 = n01 + 1\r\n for f in range(len(TF) - 1):\r\n if TF[f] == False and TF[f + 1] == False:\r\n n11 = n11 + 1\r\n\r\n pi0 = n01 / (n00 + n01)\r\n pi1 = n11 / (n10 + n11)\r\n pi = (n01 + n11) / (n00 + n01 + n10 + n11)\r\n Numeritor = ((1 - pi) ** (n00 + n10)) * (pi ** (n01 + n11))\r\n Denominator = ((1 - pi0) ** (n00)) * (pi0 ** n01) * ((1 - pi1) ** (n10)) * (pi1 ** n11)\r\n LRCCI = -2 * np.log(Numeritor / Denominator)\r\n return LRCCI, 1-stats.chi2.cdf(LRCCI, 1)\r\n\r\n\r\n# ==============================================================================\r\n# Regulator's Loss Function Family\r\n# Mathmatical Reference: The role of the loss function in value-at-risk comparisons\r\n# The score for the complete sample is the sum of each individual point\r\n# ==============================================================================\r\n# Lopez's quadratic (RQL)\r\n# Defined as RQL\r\n# RQL(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef RQL(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Compare = Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]]\r\n quadratic = 1 + (Compare.iloc[:, 1] - Compare.iloc[:, 0]) ** 2\r\n RQL_mean = np.mean(quadratic)\r\n RQL_sum = np.sum(quadratic)\r\n return RQL_mean, RQL_sum\r\n\r\n\r\n# ==============================================================================\r\n# Linear (RL)\r\n# Defined as RL\r\n# RL(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef RL(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Compare = Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]]\r\n quadratic = (Compare.iloc[:, 1] - Compare.iloc[:, 0])\r\n RL_mean = np.mean(quadratic)\r\n RL_sum = np.sum(quadratic)\r\n return RL_mean, RL_sum\r\n\r\n\r\n# ==============================================================================\r\n# Quadratic (RQ)\r\n# Defined as RQ\r\n# RQ(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef RQ(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Compare = Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]]\r\n quadratic = (Compare.iloc[:, 1] - Compare.iloc[:, 0])**2\r\n RQ_mean = np.mean(quadratic)\r\n RQ_sum = np.sum(quadratic)\r\n return RQ_mean, RQ_sum\r\n\r\n\r\n# ==============================================================================\r\n# Caporin_1 (RC_1)\r\n# Defined as RC_1\r\n# RC_1(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef RC_1(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Compare = Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]]\r\n quadratic = np.abs(1-np.abs(Compare.iloc[:, 0]/Compare.iloc[:, 1]))\r\n RC1_mean = np.mean(quadratic)\r\n RC1_sum = np.sum(quadratic)\r\n return RC1_mean, RC1_sum\r\n\r\n\r\n# ==============================================================================\r\n# Caporin_2 (RC_2)\r\n# Defined as RC_2\r\n# RC_2(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef RC_2(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Compare = Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]]\r\n quadratic = (np.abs(Compare.iloc[:, 0]) - np.abs(Compare.iloc[:, 1])) ** 2 / (np.abs(Compare.iloc[:, 1]))\r\n RC2_mean = np.mean(quadratic)\r\n RC2_sum = np.sum(quadratic)\r\n return RC2_mean, RC2_sum\r\n\r\n\r\n# ==============================================================================\r\n# Caporin_3 (RC_3)\r\n# Defined as RC_3\r\n# RC_3(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef RC_3(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n Compare = Compare[Compare.iloc[:, 0] < Compare.iloc[:, 1]]\r\n quadratic = (np.abs(Compare.iloc[:, 1] - Compare.iloc[:, 0]))\r\n RC3_mean = np.mean(quadratic)\r\n RC3_sum = np.sum(quadratic)\r\n return RC3_mean, RC3_sum\r\n\r\n\r\n# ==============================================================================\r\n# Firm's Loss Function Family\r\n# Mathmatical Reference: The role of the loss function in value-at-risk comparisons\r\n# ==============================================================================\r\n# ==============================================================================\r\n# Caporin_1 (FC_1)\r\n# Defined as FC_1\r\n# FC_1(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef FC_1(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n quadratic = np.abs(1 - np.abs(Compare.iloc[:, 0] / Compare.iloc[:, 1]))\r\n FC1_mean = np.mean(quadratic)\r\n FC1_sum = np.sum(quadratic)\r\n return FC1_mean, FC1_sum\r\n\r\n\r\n# ==============================================================================\r\n# Caporin_2 (FC_2)\r\n# Defined as FC_2\r\n# FC_2(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef FC_2(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n quadratic = (np.abs(Compare.iloc[:, 0]) - np.abs(Compare.iloc[:, 1]) ** 2) / np.abs(Compare.iloc[:, 1])\r\n FC2_mean = np.mean(quadratic)\r\n FC2_sum = np.sum(quadratic)\r\n return FC2_mean, FC2_sum\r\n\r\n\r\n# ==============================================================================\r\n# Caporin_3 (FC_3)\r\n# Defined as FC_3\r\n# FC_3(Returns, Value at Risk)\r\n# ==============================================================================\r\ndef FC_3(Returns, VaR):\r\n Compare = pd.concat([Returns, VaR], axis=1)\r\n quadratic = np.abs(Compare.iloc[:, 1] - Compare.iloc[:, 0])\r\n FC3_mean = np.mean(quadratic)\r\n FC3_sum = np.sum(quadratic)\r\n return FC3_mean, FC3_sum\r\n\r\n\r\n# ==============================================================================\r\n# Quantile Loss Function\r\n# Reference: The Use of GARCH Models in VaR Estimation.\r\n# Defined as QL\r\n# Ql(Returns, Value at Risk, Condidence Level of VaR)\r\n# ==============================================================================\r\ndef QL(Returns, VaR, ConfidenceLevel):\r\n QL = []\r\n for i in range(len(VaR)-1):\r\n if Returns[i] < VaR[i]:\r\n QuantileLoss = (Returns[i] - VaR[i]) ** 2\r\n else:\r\n QuantileLoss = (Returns[i + 1:].quantile(1 - ConfidenceLevel) - VaR[i]) ** 2\r\n QL.append(QuantileLoss)\r\n QL.append((Returns[-1] - VaR[-1]) ** 2)\r\n QL_Score = np.sum(QL)\r\n return QL_Score\r\n"
}
] | 2 |
Horbatenko/genetic_algorithm
|
https://github.com/Horbatenko/genetic_algorithm
|
7dbad17f269aeeb61b9fbe835833ffbf4dbfa9b7
|
3a32ffbb3b6aad2b35bf58292e2210e94280ac77
|
760d00d0768b403a3ee481635007cc980d983f33
|
refs/heads/master
| 2020-04-25T01:23:17.375607 | 2019-03-05T19:42:23 | 2019-03-05T19:42:23 | 172,406,815 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5842185020446777,
"alphanum_fraction": 0.5927541851997375,
"avg_line_length": 33.457515716552734,
"blob_id": "51d64c93d2b42861f21fa03854d09a460630617f",
"content_id": "0349f6dfc77cfba2eeb887ee0d516b9720509584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5272,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 153,
"path": "/app.py",
"repo_name": "Horbatenko/genetic_algorithm",
"src_encoding": "UTF-8",
"text": "import pygame\nimport random\nfrom sty import fg\nfrom functions import (\n generate_cities, generate_individuals,\n get_route_from_individual, get_cities_ids_from_individual,\n create_child, calculate_score\n)\nfrom settings import (\n SCREEN_SIZE, BACKGROUND_COLOR,\n FONT_STYLE, FONT_SIZE, FONT_COLOR,\n START_POINT_POS, START_POINT_COLOR, START_POINT_RADIUS\n)\n\n\npygame.font.init()\n\n\nclass App:\n def __init__(self, cities_num, population_size, next_round_passers_num, mutation_chance):\n if not cities_num >= 2:\n raise ValueError('Wrong value for cities_num (cities_num >= 2)')\n if not population_size > 2:\n raise ValueError('Wrong value for population_size (population_size > 2)')\n if not 0 < next_round_passers_num < population_size:\n raise ValueError('Wrong value for next_round_passers_num (0 < val < population_size)')\n if not 0 <= mutation_chance <= 1:\n raise ValueError('Wrong value for mutation_chance (0 < val < 1)')\n\n self.screen = pygame.display.set_mode(SCREEN_SIZE)\n self.screen.fill(BACKGROUND_COLOR)\n self.font = pygame.font.SysFont(FONT_STYLE, FONT_SIZE)\n pygame.display.set_caption('Genetic algo')\n\n self.cities = generate_cities(cities_num)\n self.generation = Generation(population_size, next_round_passers_num, mutation_chance, self.cities)\n self.routes_to_be_displayed = []\n\n def run(self):\n\n running = True\n\n while running:\n generation_winner = self.generation.get_generation_winners()[0]\n self.add_route( # display the best route from generation\n get_route_from_individual(generation_winner),\n {\n 'route': get_cities_ids_from_individual(generation_winner),\n 'score': generation_winner['score']\n }\n )\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n self.refresh_screen()\n\n self.generation.make_new_generation()\n\n def draw_cities(self):\n for city in self.cities:\n text_surface = self.font.render(\n str(city['id']), False, FONT_COLOR\n )\n self.screen.blit(text_surface, city['pos'])\n\n def draw_start_point(self):\n pygame.draw.circle(\n self.screen, START_POINT_COLOR, START_POINT_POS, START_POINT_RADIUS\n )\n\n def draw_routes(self):\n for route_obj in self.routes_to_be_displayed:\n pygame.draw.lines(\n self.screen, route_obj['color'], True,\n [START_POINT_POS] + route_obj['route'], 2\n )\n\n def add_route(self, route, log_info=None):\n # if there is no route like the given one\n if route not in [route_obj['route'] for route_obj in self.routes_to_be_displayed]:\n color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n route_obj = {\n 'route': route,\n 'color': color,\n }\n self.routes_to_be_displayed.append(route_obj)\n if log_info:\n log = fg(*color) + str(log_info) + fg.rs\n print(log)\n\n def refresh_screen(self):\n self.screen.fill(BACKGROUND_COLOR)\n self.draw_start_point()\n self.draw_routes()\n self.draw_cities()\n\n pygame.display.flip()\n\n\nclass Generation:\n def __init__(self, population_size, next_round_passers_num, mutation_chance, cities):\n self.generation_number = 0\n self.mutation_chance = mutation_chance\n self.population_size = population_size\n self.individuals = generate_individuals(self.population_size, cities)\n self.next_round_passers_num = next_round_passers_num\n\n def calc_individuals_score(self):\n for individual in self.individuals:\n individual['score'] = calculate_score(\n get_route_from_individual(individual)\n )\n\n def get_generation_winners(self):\n self.calc_individuals_score()\n\n self.individuals = sorted(\n self.individuals, key=lambda i: i['score']\n )[:self.next_round_passers_num]\n\n return self.individuals\n\n def make_new_generation(self):\n if random.uniform(0, 1) <= self.mutation_chance:\n self.mutate()\n\n current_population_size = len(self.individuals)\n parents = self.individuals[:]\n\n while current_population_size != self.population_size:\n parent1, parent2 = random.choices(parents, k=2)\n self.individuals.append(\n create_child(parent1, parent2)\n )\n\n current_population_size += 1\n\n self.generation_number += 1\n\n def mutate(self):\n individual = random.choice(self.individuals)\n genome = individual['genome']\n gene1, gene2 = random.choices(genome, k=2)\n i1 = genome.index(gene1)\n i2 = genome.index(gene2)\n genome[i1], genome[i2] = genome[i2], genome[i1]\n\n def log_individuals_info(self):\n for individual in self.individuals:\n print(individual['genome'], individual['score'])\n print('------------------------')\n"
},
{
"alpha_fraction": 0.4576271176338196,
"alphanum_fraction": 0.498305082321167,
"avg_line_length": 18.66666603088379,
"blob_id": "5e9d26ecb4fd8a9fc2ee1749a25bfe44d6f2ee03",
"content_id": "e121c7b9e44618abc2bcb0bc6c78c32ffaae913c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 15,
"path": "/main.py",
"repo_name": "Horbatenko/genetic_algorithm",
"src_encoding": "UTF-8",
"text": "from app import App\n\n\ndef main():\n app = App(\n cities_num=10, # this >= 2\n population_size=30, # this > 2\n next_round_passers_num=10, # 2 <= this < population_size\n mutation_chance=1 # 0 <= this <= 1\n )\n app.run()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.4878048896789551,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 15.399999618530273,
"blob_id": "a887f09eb7cbd1992cc23a6b45e9860b208b3831",
"content_id": "7758a776df2259d24b4977a98c78851903ebcadf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "Horbatenko/genetic_algorithm",
"src_encoding": "UTF-8",
"text": "pygame==1.9.4\npython-dateutil==2.8.0\nsix==1.12.0\nsty==1.0.0b9\ntext-unidecode==1.2\n"
},
{
"alpha_fraction": 0.5693836808204651,
"alphanum_fraction": 0.5932405591011047,
"avg_line_length": 25.19791603088379,
"blob_id": "2d1cf41534eb5960b8cdca0ce1f44a7a9db56fc8",
"content_id": "142c80516cc501ba306cfd830ebbe4ed7e8ccf9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2515,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 96,
"path": "/functions.py",
"repo_name": "Horbatenko/genetic_algorithm",
"src_encoding": "UTF-8",
"text": "from random import randint, sample, choice\nfrom math import sqrt\nfrom settings import SCREEN_SIZE, START_POINT_POS, FONT_SIZE\n\n\ndef generate_cities(cities_num):\n return [\n {\n 'id': i,\n 'pos': ( # 'minus FONT_SIZE' - to print numbers not out of the screen\n randint(0, SCREEN_SIZE[0] - FONT_SIZE),\n randint(0, SCREEN_SIZE[1] - FONT_SIZE)\n )\n } for i in range(cities_num)\n ]\n\n\ndef generate_individuals(population_size, cities):\n cities_num = len(cities)\n return [\n {\n 'genome': sample(cities, cities_num),\n 'score': 0,\n } for i in range(population_size)\n ]\n\n\ndef calculate_score(points, distance=0, last_point=START_POINT_POS):\n current_point = points.pop(0)\n\n distance += get_distance_between_points(last_point, current_point)\n\n if not points:\n distance += get_distance_between_points(current_point, START_POINT_POS) # start -> points -> start\n return distance\n\n return calculate_score(points, distance, last_point=current_point)\n\n\ndef get_distance_between_points(p1, p2):\n return sqrt(\n (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2\n )\n\n\ndef get_route_from_individual(individual):\n return [city['pos'] for city in individual['genome']]\n\n\ndef get_cities_ids_from_individual(individual):\n return [city['id'] for city in individual['genome']]\n\n\ndef create_child(parent1, parent2):\n # https://www.youtube.com/watch?v=DJ-yBmEEkgA\n def build_cycles(p1, p2, start_index):\n cycle1 = {}\n cycle2 = {}\n\n while start_index not in cycle1:\n cycle1[start_index] = p1[start_index]\n\n p2_val = p2[start_index]\n\n cycle2[start_index] = p2_val\n\n start_index = p1.index(p2_val)\n\n return cycle1, cycle2\n\n parent_genome_1 = parent1['genome']\n parent_genome_2 = parent2['genome']\n\n cycles_set1 = []\n cycles_set2 = []\n\n for index, val in enumerate(parent_genome_1):\n if any([index in cycle for cycle in cycles_set1]): # element already in cycles\n continue\n\n cycle1, cycle2 = build_cycles(parent_genome_1, parent_genome_2, index)\n\n cycles_set1.append(cycle1)\n cycles_set2.append(cycle2)\n\n child = {\n 'score': 0,\n 'genome': [None] * len(parent_genome_1)\n }\n\n for c1, c2 in zip(cycles_set1, cycles_set2):\n c = choice([c1, c2])\n for key in c:\n child['genome'][key] = c[key]\n\n return child\n"
},
{
"alpha_fraction": 0.6969696879386902,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 21,
"blob_id": "617e316e3ecc4599eaa8534cebbcf0f2ae5c5f2f",
"content_id": "6a5f8c090e7f9945d808d29801d404ec8d3489e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 66,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 3,
"path": "/README.md",
"repo_name": "Horbatenko/genetic_algorithm",
"src_encoding": "UTF-8",
"text": "# how to run\n1. pip install -r requirements.txt\n2. python main.py\n"
},
{
"alpha_fraction": 0.4840182662010193,
"alphanum_fraction": 0.6484017968177795,
"avg_line_length": 18.81818199157715,
"blob_id": "482c58fbc5a188ac1142ec84f365890a42690f85",
"content_id": "578cbb59fa4a4bc32ce06d29d3ee74de3452e2cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 11,
"path": "/settings.py",
"repo_name": "Horbatenko/genetic_algorithm",
"src_encoding": "UTF-8",
"text": "\nBACKGROUND_COLOR = (0, 0, 0)\n\nSCREEN_SIZE = (500, 500)\n\nFONT_STYLE = 'Comic Sans MS'\nFONT_SIZE = 35\nFONT_COLOR = (200, 200, 200)\n\nSTART_POINT_POS = (50, 450)\nSTART_POINT_COLOR = (119, 178, 194)\nSTART_POINT_RADIUS = 15\n"
}
] | 6 |
hilalcinel/Machine-Learning-Breast-Cancer-Analysis
|
https://github.com/hilalcinel/Machine-Learning-Breast-Cancer-Analysis
|
5e0b6cb120a080d10007861b1c5df6569a688f03
|
3dc45ba685da6aa86f8e1584fab6da9c4532a8bb
|
18deb35eab6a447f961fdc123deac9c08e688d66
|
refs/heads/main
| 2023-07-15T15:32:06.369171 | 2021-08-26T08:51:15 | 2021-08-26T08:51:15 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6399403214454651,
"alphanum_fraction": 0.6521503329277039,
"avg_line_length": 30.224576950073242,
"blob_id": "6eb828dc2ccff5e0d8d38749db451e97ba92e480",
"content_id": "5307eb90927396992b5d55173468a586598f62d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7371,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 236,
"path": "/BreastCancer.py",
"repo_name": "hilalcinel/Machine-Learning-Breast-Cancer-Analysis",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis, LocalOutlierFactor\nfrom sklearn.decomposition import PCA \n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndata = pd.read_csv(\"data.csv\")\ndata.drop(['Unnamed: 32','id'], inplace=True, axis=1)\n\ndata=data.rename(columns = {\"diagnosis\":\"target\"})\n\nsns.countplot(data[\"target\"])\nprint(data.target.value_counts())\n\ndata[\"target\"] = [1 if i.strip() == \"M\" else 0 for i in data.target]\n\nprint(len(data))\nprint(data.head())\nprint(\"Data Shape\", data.shape)\n\ndata.info()\ndescribe = data.describe()\n\"\"\"\nmissing value: none\n\"\"\"\n#%% \ncorr_martix = data.corr()\ncorr_matrix2 = data.corr()[\"target\"]\nsns.clustermap(corr_martix, annot=True, fmt= \".2f\")\nplt.title(\"Correlation Between Features\")\nplt.show()\n\nthreshold = 0.5\nfiltre = np.abs(corr_martix[\"target\"]) > threshold\ncorr_features = corr_martix.columns[filtre].tolist()\nsns.clustermap(data[corr_features].corr(), annot = True, fmt = \".2f\")\nplt.title(\"Correlation Between Features with Corr Threshold 0.75\")\n\ndata_melted = pd.melt(data, id_vars=\"target\", \n var_name = \"features\", \n value_name = \"value\")\nplt.figure()\nsns.boxplot(x = \"features\", y = \"value\", hue=\"target\", data = data_melted)\nplt.xticks(rotation = 90)\nplt.show()\n \nsns.pairplot(data[corr_features], diag_kind = \"kde\", markers = \"+\", hue=\"target\")\nplt.show()\n\n #%% outlier\ny = data.target\nx = data.drop([\"target\"], axis = 1)\ncolumns = x.columns.tolist()\n\nclf = LocalOutlierFactor()\nY_pred = clf.fit_predict(x)\nX_score = clf.negative_outlier_factor_\n\noutlier_score = pd.DataFrame()\noutlier_score[\"score\"] = X_score\n#threshold\n\nthreshold = -2.5\nfiltre = outlier_score[\"score\"] < threshold\noutlier_index = outlier_score[filtre].index.tolist()\n\nplt.figure()\nplt.scatter(x.iloc[outlier_index,0], x.iloc[outlier_index,1], color = \"blue\", s= 50, label=\"Outliers\")\nplt.scatter(x.iloc[:,0], x.iloc[:,1], color = \"k\", s = 3, label=\"Data Points\")\n\nradius = (X_score.max() - X_score)/(X_score.max() - X_score.min())\noutlier_score[\"radius\"] = radius\nplt.scatter(x.iloc[:,0], x.iloc[:,1], s = 1000*radius, edgecolors = \"r\", facecolors = \"none\", label = \"Outlier Scores\")\nplt.legend()\nplt.show()\n\nx = x.drop(outlier_index)\ny = y.drop(outlier_index).values\n\n#%% Train test split\ntest_size = 0.3\nX_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size = test_size, random_state= 42)\n\n#%% \nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\nX_train_df = pd.DataFrame(X_train, columns = columns)\nX_train_df_describe = X_train_df.describe()\nX_train_df[\"target\"] = Y_train \n\ndata_melted = pd.melt(X_train_df, id_vars=\"target\", \n var_name = \"features\", \n value_name = \"value\")\n\nplt.figure()\nsns.boxplot(x = \"features\", y = \"value\", hue=\"target\", data = data_melted)\nplt.xticks(rotation = 90)\nplt.show()\n\nsns.pairplot(X_train_df[corr_features], diag_kind = \"kde\", markers = \"+\", hue=\"target\")\nplt.show()\n\n#%% \n\nknn = KNeighborsClassifier(n_neighbors=2)\nknn.fit(X_train, Y_train)\ny_pred = knn.predict(X_test)\ncm = confusion_matrix(Y_test, y_pred)\nacc = accuracy_score(Y_test, y_pred) \nscore = knn.score(X_test, Y_test)\nprint(\"Score: \", score)\nprint(\"CM: \", cm)\nprint(\"Basic KNN Acc: \", acc)\n\n#%% \ndef KNN_Best_Params(X_train, X_test, Y_train, Y_test):\n \n k_range = list(range(1,31))\n weight_options = [\"uniform\", \"distance\"]\n print()\n param_grid = dict(n_neighbors = k_range, weights = weight_options)\n \n knn = KNeighborsClassifier()\n grid = GridSearchCV(knn, param_grid, cv = 10, scoring=\"accuracy\")\n grid.fit(X_train,Y_train)\n \n print(\"Best training score: {} with parameters: {}\".format(grid.best_score_, grid.best_params_))\n print()\n \n knn = KNeighborsClassifier(**grid.best_params_)\n knn.fit(X_train,Y_train)\n \n y_pred_test = knn.predict(X_test)\n y_pred_train = knn.predict(X_train)\n \n cm_test = confusion_matrix(Y_test, y_pred_test)\n cm_train = confusion_matrix(Y_train, y_pred_train)\n \n acc_test = accuracy_score(Y_test, y_pred_test)\n acc_train = accuracy_score(Y_train, y_pred_train)\n print(\"Test Score: {}, Train Score: {}\".format(acc_test, acc_train))\n print()\n print(\"CM Test: \",cm_test)\n print(\"CM Train: \",cm_train)\n \n return grid\n\ngrid = KNN_Best_Params(X_train, X_test, Y_train, Y_test)\n\n#%% \n\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(x)\n\npca = PCA(n_components=2)\npca.fit(X_scaled)\nX_reduced_pca = pca.transform(X_scaled)\npca_data = pd.DataFrame(X_reduced_pca, columns = [\"p1\",\"p2\"])\npca_data[\"target\"] = y\nsns.scatterplot(x = \"p1\", y = \"p2\", hue = \"target\", data = pca_data)\nplt.title(\"PCA: p1 vs p2\")\n\nX_train_pca, X_test_pca, Y_train_pca, Y_test_pca = train_test_split(X_reduced_pca, y, test_size = test_size, random_state= 42)\ngrid_pca = KNN_Best_Params(X_train_pca, X_test_pca, Y_train_pca, Y_test_pca)\n\n#visualize\ncmap_light = ListedColormap(['orange','cornflowerblue'])\ncmap_bold = ListedColormap(['darkorange', 'darkblue'])\n\nh= .05\nX = X_reduced_pca\nx_min, x_max = X[:,0].min() - 1, X[:,0].min() + 1\ny_min, y_max = X[:,1].min() - 1, X[:,1].min() + 1\nxx,yy =np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\nZ = grid_pca.predict(np.c_[xx.ravel(), yy.ravel()])\n\nZ = Z.reshape(xx.shape)\nplt.figure()\nplt.pcolormesh(xx, yy, Z, cmap = cmap_light)\n\nplt.scatter(X[:,0], X[:,1], c=y, cmap=cmap_bold, \n edgecolor= 'k', s=20)\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max()) \nplt.title(\"%i-Class classification (k = %i, weights = '%s')\"\n %(len(np.unique(y)), grid_pca.best_estimator_.n_neighbors, grid_pca.best_estimator_.weights)) \n\n#%% \nnca = NeighborhoodComponentsAnalysis(n_components= 2, random_state = 42)\nnca.fit(X_scaled, y)\nX_reduced_nca = nca.transform(X_scaled)\nnca_data = pd.DataFrame(X_reduced_nca, columns = [\"p1\",\"p2\"])\nnca_data[\"target\"] = y\nsns.scatterplot(x = \"p1\", y = \"p2\", hue = \"target\", data = nca_data)\nplt.title(\"NCA: p1 vs p2\")\n\n\nX_train_nca, X_test_nca, Y_train_nca, Y_test_nca = train_test_split(X_reduced_nca, y, test_size = test_size, random_state= 42)\ngrid_nca = KNN_Best_Params(X_train_nca, X_test_nca, Y_train_nca, Y_test_nca)\n\n#visualize\ncmap_light = ListedColormap(['orange','cornflowerblue'])\ncmap_bold = ListedColormap(['darkorange', 'darkblue'])\n\nh= .2\nX = X_reduced_nca\nx_min, x_max = X[:,0].min() - 1, X[:,0].min() + 1\ny_min, y_max = X[:,1].min() - 1, X[:,1].min() + 1\nxx,yy =np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\nZ = grid_nca.predict(np.c_[xx.ravel(), yy.ravel()])\n\nZ = Z.reshape(xx.shape)\nplt.figure()\nplt.pcolormesh(xx, yy, Z, cmap = cmap_light)\n\nplt.scatter(X[:,0], X[:,1], c=y, cmap=cmap_bold, \n edgecolor= 'k', s=20)\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max()) \nplt.title(\"%i-Class classification (k = %i, weights = '%s')\"\n %(len(np.unique(y)), grid_nca.best_estimator_.n_neighbors, grid_nca.best_estimator_.weights)) \n\n"
}
] | 1 |
ed-creator/PassTheBooking
|
https://github.com/ed-creator/PassTheBooking
|
45bea75f605cd981c58c852601ffd50c833b37d8
|
79044642a05ff10c61edb8cd917cef0bc640eb17
|
f14cbd75046adfa5bd7accbf78bd9361f5431841
|
refs/heads/master
| 2021-06-20T09:29:57.910715 | 2017-07-16T16:19:17 | 2017-07-16T16:19:17 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6503759622573853,
"alphanum_fraction": 0.6541353464126587,
"avg_line_length": 27,
"blob_id": "431fd887fc2c3932351c528ca7e3949ef6e9ab48",
"content_id": "233b47572ee09b04eafcd8415d75aa5e61db978d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 19,
"path": "/client/urls.py",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include, url\nfrom registration.backends.hmac.views import RegistrationView\n\nfrom .forms import MyCustomUserForm\n\nfrom . import views\n\napp_name = 'client'\nurlpatterns = [\n url(r'^register/$',\n RegistrationView.as_view(\n form_class=MyCustomUserForm\n ),\n name='registration_register',\n ),\n url(r'^accounts/', include('registration.backends.hmac.urls')),\n url(r'^(?P<client_id>[0-9]+)/$', views.detail, name='detail'),\n url(r'^$', views.index, name='index')\n]\n"
},
{
"alpha_fraction": 0.8260869383811951,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 22,
"blob_id": "8bd2f7bb19688153c773b3e1b5fc4cbe2abd3a0b",
"content_id": "ceeedc07661448052435e0c5d8f4557fc65b4981",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 138,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 6,
"path": "/client/admin.py",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom .models import Client\n\n\nadmin.site.register(Client)\n"
},
{
"alpha_fraction": 0.6994219422340393,
"alphanum_fraction": 0.7167630195617676,
"avg_line_length": 31.4375,
"blob_id": "d756f8ba6488da47e6eecf8208b819ee0d581b5c",
"content_id": "749e0321d5b46eb5f1ef0184aaf7aecdedcf0b4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 519,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 16,
"path": "/client/views.py",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404, render\nfrom django.http import Http404, HttpResponse\nfrom .models import Client\n\n\ndef index(request):\n current_user = request.user\n context = {\n 'current_user': current_user,\n }\n return render(request, 'client/index.html', context)\n\n# doesnt make sense this view should not exist\ndef detail(request, client_id):\n current_user = get_object_or_404(Client, pk=client_id)\n return render(request, 'client/detail.html', {'current_user': current_user})\n"
},
{
"alpha_fraction": 0.546012282371521,
"alphanum_fraction": 0.7300613522529602,
"avg_line_length": 17.11111068725586,
"blob_id": "aa3aa5910e6f82edb4587fe6076df87eac3214d6",
"content_id": "624ed043916a96e84c40c1b7f4690bdfaee95b7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "bintrees==2.0.7\nDjango==1.11.3\npytz==2017.2\nrequests==2.13.0\nsix==1.10.0\nWebTest==2.0.16\ndjango-webtest==1.7.7\ndjango-phonenumber-field==1.3.0\ndjango-registration\n"
},
{
"alpha_fraction": 0.7043010592460632,
"alphanum_fraction": 0.7043010592460632,
"avg_line_length": 19.66666603088379,
"blob_id": "72f21d81fb4374aab2345ca4aa4dee6e6d8ccd89",
"content_id": "70556fe81f60ed45e1569794e8b7923fc8819e2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 186,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 9,
"path": "/client/forms.py",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "from registration.forms import RegistrationForm\n\nfrom .models import Client\n\n\nclass MyCustomUserForm(RegistrationForm):\n class Meta:\n model = Client\n fields = '__all__'\n"
},
{
"alpha_fraction": 0.5243129134178162,
"alphanum_fraction": 0.6004228591918945,
"avg_line_length": 22.649999618530273,
"blob_id": "97dcd246efdc7b6990f7679853cd7c7f9da56e6e",
"content_id": "43b13ae960a4a41fa8fc51595526ce19b0d667b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 20,
"path": "/client/migrations/0004_auto_20170713_1459.py",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.3 on 2017-07-13 13:59\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('client', '0003_auto_20170713_1457'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='client',\n name='title',\n field=models.CharField(blank=True, max_length=200, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5405982732772827,
"alphanum_fraction": 0.5940170884132385,
"avg_line_length": 22.399999618530273,
"blob_id": "b6df57ed5183c6ad91e1654aacd731159ac9111b",
"content_id": "346200f77810f3bd556de6ec2688259a753f6faa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 20,
"path": "/client/migrations/0003_auto_20170713_1457.py",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.3 on 2017-07-13 13:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('client', '0002_client_title'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='client',\n name='title',\n field=models.CharField(default='2', max_length=200, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.8014705777168274,
"avg_line_length": 39.79999923706055,
"blob_id": "c03e34aab0a7b0e9150781baaab16cad0a52a6ca",
"content_id": "16ef2d72b653f1c71d1b3173e87deceeb4c73de5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 408,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 10,
"path": "/client/models.py",
"repo_name": "ed-creator/PassTheBooking",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom django.utils import timezone\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils.translation import ugettext_lazy as _\n\nclass Client(AbstractUser):\n phone_number = PhoneNumberField()\n title = models.CharField(max_length=200, null=True, blank=True)\n balance = models.FloatField(default=0)\n"
}
] | 8 |
PandoLab/SmashMic
|
https://github.com/PandoLab/SmashMic
|
98ec60e340252c7b84387b6523a11711004ac63d
|
393ce1801e774e6183553f37778033ce7b0724b1
|
377ac173435c1588b8d939c361787da68b87b2f8
|
refs/heads/master
| 2021-05-06T12:59:23.254654 | 2019-01-28T16:30:54 | 2019-01-28T16:30:54 | 113,206,067 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47937867045402527,
"alphanum_fraction": 0.4925013482570648,
"avg_line_length": 21.91411018371582,
"blob_id": "ec73f27dc6190a1bd06d4a5ee6d7c0563129911e",
"content_id": "f25b07717adb61641b52663be2d80792c441c994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3734,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 163,
"path": "/Mods/Games/smash_melee.py",
"repo_name": "PandoLab/SmashMic",
"src_encoding": "UTF-8",
"text": "\"\"\"\nauthor: BrendanMoore42\ndate: Jan 12, 2019\n\nGamecube Controller Mod\n\nSuper Smash Bros. Melee Controller Add-On\n\"\"\"\n# to import controller and key classes\n# import time\n# from ..DirectKeys.directkeys import *\n\n# Super Smash Bros Melee\nclass AddOn():\n\n def __init__(self):\n self.buttons = {'a': ['jab', 'punch', 'slap', 'strike'],\n 'b': ['b', 'special', ],\n 'x': ['x', 'jump', 'colour'],\n 'y': self.buttons['x'],\n 'L': ['L', 'shield', ],\n 'R': self.buttons['L'],\n 'z': ['Z', 'light shield', 'grab'],\n 'up': ['up'],\n 'down': ['down', 'crouch'],\n 'left': ['left', 'walk', 'run'],\n 'right': ['right', 'walk', 'run'],\n 'd_up': ['d-pad up', 'taunt'],\n 'd_down': ['d-pad down'],\n 'd_left': ['d-pad left'],\n 'd_right': ['d-pad right'],\n 'c_up': ['see up', 'up smash'],\n 'c_down': ['see down', 'down smash', ],\n 'c_left': ['see left', 'left smash'],\n 'c_right': ['see right', 'right smash']}\n\n add_moves = {self.jab: self.buttons['a'], self.crouch: ['down'],\n 'shield': self.shield, 'grab': self.grab,\n 'wait': self.wait}\n\n\n #Each function is a macro for a specific move\n def wait(self, wait):\n time.sleep(wait)\n\n\n def jump(self):\n PressKey(T)\n time.sleep(0.1)\n ReleaseKey(T)\n\n\n def up(self):\n PressKey(UP)\n time.sleep(0.25)\n ReleaseKey(UP)\n\n\n def left(self):\n PressKey(LEFT)\n time.sleep(0.1)#To alter timing of presses\n ReleaseKey(LEFT)\n\n\n def right(self):\n PressKey(RIGHT)\n time.sleep(0.1)\n ReleaseKey(RIGHT)\n\n\n def crouch(self):\n PressKey(DOWN)\n time.sleep(0.1)\n ReleaseKey(DOWN)\n\n\n def jab(self):\n PressKey(A)\n time.sleep(0.05)\n ReleaseKey(A)\n\n\n def djab(self):\n PressKey(A)\n ReleaseKey(A)\n time.sleep(0.05)\n PressKey(A)\n ReleaseKey(A)\n\n\n def shield(self):\n PressKey(R)\n time.sleep(2)\n ReleaseKey(R)\n\n\n def grab(self):\n PressKey(Z)\n time.sleep(0.05)\n ReleaseKey(Z)\n\n\n def Rsmash(self):\n PressKey(RIGHT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(RIGHT)\n ReleaseKey(A)\n\n\n def Lsmash(self):\n PressKey(LEFT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(LEFT)\n ReleaseKey(A)\n\n def laser(self):\n PressKey(B)\n time.sleep(0.05)\n ReleaseKey(B)\n\n def shdl(self):\n PressKey(Y)\n PressKey(B)\n ReleaseKey(B)\n PressKey(B)\n ReleaseKey(B)\n\n\n def shine(self):\n PressKey(DOWN)\n PressKey(B)\n time.sleep(0.05)\n ReleaseKey(DOWN)\n ReleaseKey(B)\n\n\n def wd_left(self):\n PressKey(X)\n time.sleep(0.05)\n PressKey(DOWN), PressKey(LEFT)\n PressKey(R)\n ReleaseKey(R), ReleaseKey(DOWN), ReleaseKey(LEFT)\n\n\n def wd_right(self):\n PressKey(X)\n time.sleep(0.05)\n PressKey(DOWN), PressKey(RIGHT)\n PressKey(R)\n ReleaseKey(R), ReleaseKey(DOWN), ReleaseKey(RIGHT)\n\n\n\n# moves = \"hey up smash then hold shield for 4 seconds\"\n# move = \"smash\"\n# direction = \"up\" # if not defined will default to last direction called\n# modifier = \"hold\"\n# mod_move = \"sheild\"\n# mod_time = 4\n#\n# player = AddOn(move=move, direction=direction, modifier=modifier, mod_move=mod_move, mod_time=mod_time) # for debugs"
},
{
"alpha_fraction": 0.5104166865348816,
"alphanum_fraction": 0.518750011920929,
"avg_line_length": 25.670940399169922,
"blob_id": "aed06e857e09a1f5b4876dbf866fd916eef00a62",
"content_id": "32046adcbd540ff4defc0968300b388b0f028892",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6240,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 234,
"path": "/Mods/Controllers/Gamecube.py",
"repo_name": "PandoLab/SmashMic",
"src_encoding": "UTF-8",
"text": "\"\"\"\nauthor: BrendanMoore42\ndate: Jan 12, 2019\n\nStandard moveset Mod- Gamecube Controller for Dolphin\n\n# add game here and in import statement\nGames Supported Currently:\nSuper Smash Bros. Melee\n\"\"\"\nimport time\n# from Mods.Controllers.DirectKeys.directkeys import *\nfrom Mods.Games.smash_melee import AddOn as ssbm\n\n# Add to button list to modify/add phrases\nbuttons = {'a': ['a'],\n 'b': ['b'],\n 'x': ['x'],\n 'y': ['y'],\n 'L': ['L'],\n 'R': ['R'],\n 'z': ['Z'],\n 'up': ['up'],\n 'down': ['down'],\n 'left': ['left'],\n 'right': ['up'],\n 'd_up': ['d-pad up'],\n 'd_down': ['d-pad down'],\n 'd_left': ['d-pad left'],\n 'd_right': ['d-pad up'],\n 'c_up': ['see up'],\n 'c_down': ['see down'],\n 'c_left': ['see left'],\n 'c_right': ['see up'],\n }\n\n# Add games\ngames = [ssbm, ]\n\nclass GC_Controller():\n def __init__(self, game, moves, execute=True):\n self.game = game\n self.moves = moves\n self.new_moves = moves.split(' ')\n self.execute = execute\n self.modifier = None\n self.mod_move = None\n self.mod_time = None\n\n # int value is where controller looks for number to convert\n # example: hold shield for 4 seconds\n self.modifiers = {'wait', 'hold', 'press', 'side', 'smash', 'tilt', 'tap', 'mash', 'half', 'trigger'}\n # self.available_moves = {self.a_press: buttons[\"a\"], self.b_press: buttons[\"b\"],\n # self.down_press: buttons['down'], self.up_press: buttons['up'],\n # self.left_press: buttons['left'], self.right_press: ['right'],\n # self.down_pad_press: buttons['d_down'], self.up_pad_press: buttons['d_up'],\n # self.left_pad_press: buttons['d_left'], self.right_pad_press: ['d_right'],\n # self.down_c_press: buttons['c_down'], self.up_c_press: buttons['c_up'],\n # self.left_c_press: buttons['c_left'], self.right_c_press: ['c_right'],\n # self.hold: buttons['hold']}\n if self.execute:\n #execute move\n [i() for i, x in self.available_moves.items() for move in self.new_moves if move in x]\n\n def check_modifiers(move, incoming):\n \"\"\"\n Assign values for modified moves\n :param move: Modifier move to set modifier terms\n :param incoming: Move list coming in from main function\n :return:\n \"\"\"\n modifier = move\n\n # get specified number\n try:\n # returns int value from text\n word_index = moves.index(word)\n # get value for modifier to check where number should be\n mod_value = [i for i, x in self.modifiers.items() if word in x]\n # search through the incoming moves to find number to convert\n modifier_index = moves[word_index + mod_value]\n # return the value to modify move\n modifier_out = num_to_int[modifier_index]\n except:\n # defaults to 1 if translation fails\n modifier_out = 1\n\n\n return modifier, mod_move, mod_time\n\n if self.execute:\n for move in self.new_moves:\n if move in self.modifiers:\n self.modifier, self.mod_move, self.mod_time = check_modifiers(move=move, incoming=self.moves)\n\n\n\n [i() for i, x in self.available_moves.items() if move in x]\n\n\n def execute_moves(self):\n\n if self.move in self.modifiers:\n\n [i() for i, x in self.available_moves.items() if self.move in x]\n\n\n\n #Each function is a macro for a specific move\n def wait(self, wait):\n time.sleep(wait)\n\n\n def a_press(self, ):\n\n PressKey(A)\n time.sleep(0.1)\n ReleaseKey(A)\n\n\n def up(self):\n PressKey(UP)\n time.sleep(0.25)\n ReleaseKey(UP)\n\n\n def left(self):\n PressKey(LEFT)\n time.sleep(0.1)#To alter timing of presses\n ReleaseKey(LEFT)\n\n\n def right(self):\n PressKey(RIGHT)\n time.sleep(0.1)\n ReleaseKey(RIGHT)\n\n\n def crouch(self):\n PressKey(DOWN)\n time.sleep(0.1)\n ReleaseKey(DOWN)\n\n\n def jab(self):\n PressKey(A)\n time.sleep(0.05)\n ReleaseKey(A)\n\n\n def djab(self):\n PressKey(A)\n ReleaseKey(A)\n time.sleep(0.05)\n PressKey(A)\n ReleaseKey(A)\n\n\n def shield(self):\n PressKey(R)\n time.sleep(2)\n ReleaseKey(R)\n\n\n def grab(self):\n PressKey(Z)\n time.sleep(0.05)\n ReleaseKey(Z)\n\n\n def Rsmash(self):\n PressKey(RIGHT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(RIGHT)\n ReleaseKey(A)\n\n\n def Lsmash(self):\n PressKey(LEFT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(LEFT)\n ReleaseKey(A)\n\n def b_press(self):\n PressKey(B)\n time.sleep(0.05)\n ReleaseKey(B)\n\n def shdl(self):\n PressKey(Y)\n PressKey(B)\n ReleaseKey(B)\n PressKey(B)\n ReleaseKey(B)\n\n\n def shine(self):\n PressKey(DOWN)\n PressKey(B)\n time.sleep(0.05)\n ReleaseKey(DOWN)\n ReleaseKey(B)\n\n\n def wd_left(self):\n PressKey(X)\n time.sleep(0.05)\n PressKey(DOWN), PressKey(LEFT)\n PressKey(R)\n ReleaseKey(R), ReleaseKey(DOWN), ReleaseKey(LEFT)\n\n\n def wd_right(self):\n PressKey(X)\n time.sleep(0.05)\n PressKey(DOWN), PressKey(RIGHT)\n PressKey(R)\n ReleaseKey(R), ReleaseKey(DOWN), ReleaseKey(RIGHT)\n\n # def wombo_combo(self, *moves):\n # print('sup')\n #\n # wombo_combo()\n# #\n# moves = \"hey up smash then hold shield for 4 seconds\"\n# move = \"smash\"\n# direction = \"up\" # if not defined will default to last direction called\n# modifier = \"hold\"\n# mod_move = \"sheild\"\n# mod_time = 4\n#\n# player = GC_Controller(game=smash_melee, moves=moves, execute=False)"
},
{
"alpha_fraction": 0.788040280342102,
"alphanum_fraction": 0.7927767634391785,
"avg_line_length": 87.84210205078125,
"blob_id": "70bac995af970112a450c62fba8c61cedd0d29cb",
"content_id": "71e5e39f9f0e3b0f6423a5bcc8a7e464d98b12cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1689,
"license_type": "no_license",
"max_line_length": 518,
"num_lines": 19,
"path": "/README.md",
"repo_name": "PandoLab/SmashMic",
"src_encoding": "UTF-8",
"text": "# SmashCom \nSmashCom is a custom vocal controller that takes user speech and converts the instructions to pre-set button inputs. Inspired by Super Smash Bros. and games with a deep lexicon, SmashCom can correctly interpret and use modifiers in colloquialisms to string more complex actions together. \n\n### Modifications and Community\nSmashCom is currently designed and actionable for the Nintendo Gamecube, however, can be modified to work with any emulator by development of AddOn controller packs located in the Mods folder. If you want to try creating a NES, SNES, Playstation Dualshock or other controller scheme, or add game-specific language packs, please clone the repository and if it works we'll add it to the project. \n\n### Example: Smash Bros. Melee\nThe Smash Bros community has developed a wide list of unique (and borrowed from other fighters) move names that are used to describe gameplay. Terms like waveshine, flutterhush, or thunders describe both moves and combos - a series of buttons to be pressed in a particular order and timing. \n\nSmashCom will pick out words and then initaite moves/combos that it hears in your instructions, in the order it hears them. For example, \"Fox, run right, multishine and jump\", will input the sequence: analog stick right-->press down-B jump cancel x3-->press X to jump. It will only pay attention to the smash bros. specific instructions. SmashCom will apply modifiers like \"hold\", \"wait', or \"mash\" with the specified number of seconds or button presses to create a whole different kind of dynamic gaming experience. \n\n## Setup\n\nCurrent build: 1.0.6\nOS Supported: Windows 10. \nRequirements: \nPython 3.6+ \nSpeech Recognition\nKeyboard\n\n"
},
{
"alpha_fraction": 0.5055100321769714,
"alphanum_fraction": 0.5114231705665588,
"avg_line_length": 32.44044876098633,
"blob_id": "1f21bf402dfa37e6281eba0eda585a06ff6b08f0",
"content_id": "fde0a1e2922fc6ddad77a182dd74965bbb782519",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14882,
"license_type": "no_license",
"max_line_length": 204,
"num_lines": 445,
"path": "/Mods/Controllers/Controller.py",
"repo_name": "PandoLab/SmashMic",
"src_encoding": "UTF-8",
"text": "\"\"\"\nauthor: BrendanMoore42\ndate: Jan 12, 2019\n\nStandard button moveset\n\nCan be ported to custom consoles and game-specific packs\n\"\"\"\nimport re\nimport time\nfrom time import sleep\nimport itertools as it\nfrom threading import Timer\n# import multiprocessing as mp\n\n#need to figure out this input problem\nfrom pynput.keyboard import Key, Controller\nfrom pyautogui import press, keyDown, keyUp, typewrite, hotkey\n# from DirectKeys.directkeys import *\n# from Mods.Controllers.gamecube import GC_Controller\n\n\n# Approved mods: add here to add quick links to the controller\nmods = {'gc': {'GC_Controller': {'ssbm': 'Super Smash Bros. Melee', }},\n 'nes': {'NES': {'smb': 'Super Mario Bros.'}},\n 'pc': {'PC': {'rl': 'Rocket League', }},\n }\n\nclass MyController():\n def __init__(self, moves,):\n self.moves = moves.lower() # incoming moveset\n self.new_moves = moves.split(' ') # moves split into list\n self._move = None # primary action to be executed\n self._secondary_move = None # secondary action for combinations\n self._direction = 'Centre' # direction modifier, defaults to last called, else stick is centred\n self._modifier = None # special action that changes the execution of primary move\n self._mod_move = None # extra move if required\n self._mod_value = 0 # can represent number operations for seconds or multiple inputs as required\n self.execute = True # debug for testing\n print('Class initialized', self.moves)\n\n # Add to button/analog list to modify/add inputs\n self.buttons = {'button': ['a_press', 'b_press', 'x', 'y', 'l1', 'l2', 'r1' 'r2', 'z']}\n self.analog = {'analog': ['stick', 'dpad', 'cstick']}\n\n # Add custom macro/move sequence/function keys here to list of trigger phrases\n self.mod_phrases = {'dpad': ['d-pad', 'd pad', ],\n 'cstick': ['c stick', 'c-stick', 'see stick', 'cystic', ],\n 'a_press': ['a button', ],\n 'b_press': ['b button', 'bee button'],\n 'y': ['why button'],\n 'l1': ['l trigger'],\n 'r1': ['r trigger'],\n 'action_split': ['then', 'than', 'and then', 'and than'],\n 'test_trigram': ['ride the bull', ],\n 'test_QUADGRAM': ['enter the konami code']}\n\n # Add number values here that mess up in translation\n self.num_to_replace = {'1': ['one', 'once',],# 'half', 'quarter'],\n '2': ['two', 'twice', 'double', ], '3': ['three', 'thrice'],\n '4': 'four', '5': 'five', '6': 'six',\n '7': 'seven', '8': 'eight', '9': 'nine',\n '10': 'ten', '11': 'eleven'}\n\n # Translate incoming moves to respective keys and modifier values\n self._replace_numbers() # Replace numbers with string to integer value: ie, 'seven' to '7'\n self._replace_phrases() # Replace mod phrases in self.moves with key phrases\n\n # Example: hold up for four seconds\n self.modifiers = {'inputs': ['wait', 'hold', 'press', 'hit',],\n 'multiplier': ['times', 'once', 'twice', 'thrice', 'half', 'quarter'], # if half, make 1 and then half as a boolean in function call when hold is called\n 'pointer': ['side', 'smash', 'tilt', 'flick'],\n 'direction': ['up', 'down', 'left', 'right', 'centre', 'center'],\n 'other': ['tap', 'mash', 'half', 'combine',\n 'seconds', 'second', 'wiggle', ],\n 'action': ['run', 'go', 'walk'],\n 'buttons': self.buttons['button'],\n 'analog': self.analog['analog'],}\n\n # add custom functions here with a list of terms\n self.available_moves = {self.button_press: self.buttons['button'],\n self.analog_input: self.analog['analog'],\n 'modifiers': list(it.chain.from_iterable(self.modifiers.values()))}\n\n self._direct_buttons = {'a_press': 'a',\n 'b_press': 'b'}\n\n # Execute moves\n if self.execute:\n print(f'Print test: {self.moves}')\n # Split moves on the actionable split phrases\n for move in self.moves.split('action_split'):\n self._execute_moves(move.lstrip())\n\n\n\n # # debug for stopping during tests\n # if self.execute:\n # # look for modifiers first\n # for move in self.new_moves:\n #\n #\n # try:\n # if move in list(it.chain.from_iterable(self.modifiers.values())):\n # print(move + '!')\n # self._set_modifiers(move=move)\n\n # for action, button in self.available_moves.items():\n # # move will execute the function, or branch further if modifier present\n # if move in button:\n # action()\n # self._set_modifiers(move=move)\n # if self._modifier:\n # self._execute_moves(move=self.moves, direction=self._direction, mod_move=self.mod_move, mod_time=self.mod_time)\n # iterate through each move and look for match in controller/modifier dictionaries\n # except:\n # print('not a modifier')\n #execute move\n #awesome one liner that won't work --> speech modifiers in the way\n # [i(direction=self._direction, mod_move=self.mod_move, mod_time=self.mod_time) for i, x in self.available_moves.items() for move in self.new_moves if move in x]\n else:\n print('First pass next')\n\n # turn all str int to int\n def _replace_numbers(self):\n \"\"\"Large numbers will typically be ready to convert to int, but numbers 0-10 sometimes\n translate as strings. Any alphanumeric values are converted to int strings.\"\"\"\n # Find and replace numbers\n for i, x in self.num_to_replace.items():\n for move in self.new_moves:\n loc = self.new_moves.index(move)\n if move in x:\n self.new_moves[loc] = i\n\n # Join new moves into moves list. Moves lists is still checked for modifiers as a whole string.\n self.moves = ' '.join(move for move in self.new_moves)\n\n\n def _replace_phrases(self):\n \"\"\"\n Takes the moves list and replaces phrases with key pair for move execution. Works on any length.\n \"\"\"\n\n for key, value in self.mod_phrases.items():\n pattern = re.compile(\"(%s)\" % \"|\".join(map(re.escape, value)))\n self.moves = re.sub(pattern, key, self.moves)\n self.new_moves = self.moves.split(' ')\n print(self.new_moves)\n\n\n def _set_direction(self, direction):\n self._direction = direction\n\n\n def _clear_modifiers(self):\n \"\"\"Reset modifiers if none present\"\"\"\n self._direction = 'Centre'\n self._modifier = None\n self._mod_move = None\n self._mod_value = 0\n\n\n def _set_modifiers(self, moves, verbose=False):\n \"\"\"\n Assign values for modified moves and directions.\n If no mods returns None for variables\n :param move: Modifier move to set modifier terms\n :param incoming: Move list coming in from main function\n :return:\n \"\"\"\n\n def convert_to_int(action):\n \"\"\"Converts string values to integers and adds it to the self.mod_value\"\"\"\n try:\n if float(action):\n self._mod_value = float(action)\n except:\n pass\n\n for mod, mods in self.available_moves.items():\n # clear mod_value for each pass\n self._clear_modifiers()\n for action in moves.split(' '):\n convert_to_int(action)\n if action in mods:\n try:\n print(action)\n if action in self.modifiers['buttons']:\n self._move = action\n if action in self.modifiers['analog']:\n self._move = action\n if action in self.modifiers['multiplier']:\n self._mod_move = action\n if action in self.modifiers['inputs']:\n self._mod_move = action\n if action in self.modifiers['pointer']:\n pass\n if action in self.modifiers['direction']:\n self._direction = action\n if action in self.modifiers['other']:\n if action == 'seconds':\n self._modifier = action\n if action in self.modifiers['action']:\n pass\n except:\n self._clear_modifiers()\n\n\n\n if verbose:\n print(f'\\nMove: {self._move}',\n f'Direction: {self._direction}',\n f'Modifier: {self._modifier}',\n f'Mod Move: {self._mod_move}',\n f'Mod Value: {self._mod_value}',\n )\n\n\n def _execute_moves(self, moves):\n \"\"\"Set modifiers and moves and times\"\"\"\n print('Phrase: ' + moves + '\\n')\n\n # Set move modifiers\n self._set_modifiers(moves, verbose=True)\n\n # Execute moves, that type error? we can ignore that\n try:\n # print(self._move)\n [i() for i, x in self.available_moves.items() if self._move in x]\n except:\n pass\n\n\n\n def button_press(self):\n \"\"\"Takes cleaned input phrase and performs button presses\"\"\"\n print(f'Button to press: {self._move}')\n self._move = self._direct_buttons[self._move]\n\n\n def press_key():\n\n if self._mod_move == 'times':\n buttons = self._move * int(self._mod_value)\n print('sending inputs')\n\n\n if self._mod_move == 'hold':\n\n def hold_press(button):\n # keyDown(self._direction)\n press(button)\n # keyUp(self._direction)\n\n # for debug/measuring fn accuracy\n start = time.time()\n\n timer = RepeatedTimer(0.01, hold_press, self._move)\n try:\n sleep(self._mod_value)\n finally:\n timer.stop()\n\n # for debug/measuring fn accuracy\n end = time.time()\n # for debug / measuring fn accuracy\n print(f'total time wasted: {end-start}')\n\n if self._mod_move == 'wait':\n time.sleep(self._mod_value)\n\n if not self._mod_value:\n self._mod_value = 0.25 # standard button press\n\n press_key()\n\n\n def analog_input(self):\n # print('Analog JAM :0')\n #\n # if self._mod_move == 'press':\n #\n # pass\n #\n # if self._mod_move == 'hold':\n #\n # def hold_press(button):\n # press(button)\n #\n # # for debug/measuring fn accuracy\n # start = time.time()\n #\n # timer = RepeatedTimer(0.01, hold_press, self._move)\n # try:\n # sleep(self._mod_value)\n # finally:\n # timer.stop()\n #\n # # for debug/measuring fn accuracy\n # end = time.time()\n # # for debug / measuring fn accuracy\n # print(f'total time wasted: {end - start}')\n\n pass\n\n\n def a_press(self, ):\n\n PressKey(A)\n time.sleep(0.1)\n ReleaseKey(A)\n\n\n def b_press(self, ):\n\n PressKey(A)\n time.sleep(0.1)\n ReleaseKey(A)\n\n\n def r_press(self):\n PressKey(R)\n time.sleep(2)\n ReleaseKey(R)\n\n\n def l_press(self):\n PressKey(L)\n time.sleep(2)\n ReleaseKey(L)\n\n\n def z_press(self):\n PressKey(Z)\n time.sleep(0.05)\n ReleaseKey(Z)\n\n\n def up(self):\n PressKey(UP)\n time.sleep(0.25)\n ReleaseKey(UP)\n\n\n def left(self):\n PressKey(LEFT)\n time.sleep(0.1)#To alter timing of presses self.modifier\n ReleaseKey(LEFT)\n\n\n def right(self):\n PressKey(RIGHT)\n time.sleep(0.1)\n ReleaseKey(RIGHT)\n\n\n def down(self):\n PressKey(DOWN)\n time.sleep(0.1)\n ReleaseKey(DOWN)\n\n\n def up_pad_press(self):\n PressKey(A)\n time.sleep(0.05)\n ReleaseKey(A)\n\n\n def down_pad_press(self):\n PressKey(A)\n ReleaseKey(A)\n time.sleep(0.05)\n PressKey(A)\n ReleaseKey(A)\n\n\n def c_up_press(self):\n PressKey(RIGHT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(RIGHT)\n ReleaseKey(A)\n\n\n def c_down_press(self):\n PressKey(RIGHT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(RIGHT)\n ReleaseKey(A)\n\n\n def c_left_press(self):\n PressKey(RIGHT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(RIGHT)\n ReleaseKey(A)\n\n\n def c_right_press(self):\n PressKey(RIGHT)\n PressKey(A)\n time.sleep(0.25)\n ReleaseKey(RIGHT)\n ReleaseKey(A)\n\n\nclass RepeatedTimer(object):\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n#\n# moves = \"press stick left for four seconds then d-pad up twice then hold b button for six seconds and then then stick up right then like flick c stick down then ride the bull then enter the konami code\"\nmoves1 = \"press b button 2 times\"\n# move = \"stick\"\n# direction = \"left\" # if not defined will default to last direction called\n# modifier = \"press\"\n# mod_move = \"shield\"\n# mod_time = 10\n#\n# time.sleep(2)\nplayer = MyController(moves=moves1)\n\n# press('d', pause=20)\n\n"
},
{
"alpha_fraction": 0.4224834740161896,
"alphanum_fraction": 0.42358559370040894,
"avg_line_length": 31.783132553100586,
"blob_id": "9d16cc1ce8aaab7cc1785352cdfbf808127c22bf",
"content_id": "45106c765e1b7e9a2c13adafc09a672e85616f48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2722,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 83,
"path": "/Mods/Games/buttontest.py",
"repo_name": "PandoLab/SmashMic",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@brendanmoore42\n\"\"\"\nfrom directkeys import *\nimport time\nfrom moves import *\nimport speech_recognition as sr\nfrom directkeys import sp\n\n#extra modifiers for next update\n#one button options\n#options = [\"jump\", \"left\", \"right\", \n# \"crouch\", \"down\", \"up\",\n# \"a\", \"b\", \"grab\", \"shield\", \n# \"jab\", \"smash\"]\n\n#modifiers that influence timings\n#mods = [\"hold\", \"smash\", \"tilt\", \n# \"double\", \"once\", \"twice\", \n# \"triple\", \"quadruple\", \"wait\"]\n\n#instantiate Recognizer class\nr = sr.Recognizer()\n\nmoves = []\ngo = sp\n\n#opens the microphone to listen to all incoming speech\nwhile True:\n with sr.Microphone() as source:\n audio = []\n moves = []\n new_moves = []\n try:\n print(\"Show me your moves...:\")\n #microphone is listening\n audio = r.listen(source)\n moves.append(r.recognize_google(audio))\n #print what the recognizer hears\n print(moves)\n #splits the moves and checks for associations \n new_moves = [words for segments in moves for words in segments.split()]\n print(new_moves)\n for action in new_moves:\n try:\n if action == \"jump\":\n jump()\n elif action == \"laser\":\n laser()\n elif action == \"double-laser\":\n shdl()\n elif action == \"up\":\n up()\n elif action == \"left\":\n left()\n elif action == \"right\":\n right()\n elif action == \"down\" or action == \"crouch\":\n crouch()\n elif action == \"punch\" or action == \"Jab\":\n jab()\n elif action == \"double\" and action == \"Jab\":\n djab()\n elif action == \"Shield\":\n shield()\n elif action == \"grab\":\n grab()\n elif action == \"right smash\":\n Rsmash()\n elif action == \"left smash\":\n Lsmash()\n elif action == \"shine\":\n shine()\n elif action == \"wave\":\n wd_left()\n else:\n pass\n except sr.UnknownValueError:\n print(\"Could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results; {0}\".format(e))\n except:\n pass\n\n"
},
{
"alpha_fraction": 0.5402077436447144,
"alphanum_fraction": 0.5479030609130859,
"avg_line_length": 24.490196228027344,
"blob_id": "e2a25cbb15ae64f97ec68e2637bbeb4e934e1cc3",
"content_id": "4d8ed105b23969f6e67fa8749a5fe11b591b93bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2599,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 102,
"path": "/smashcom.py",
"repo_name": "PandoLab/SmashMic",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\"\"\"\nauthor: @brendanmoore42\ndate: Jan 11, 2019\n\nSmashComm: Control the game.\n\"\"\"\nimport sys\nimport time\nimport keyboard\nimport speech_recognition as sr\nfrom Mods.Controllers.Controller import MyController\n# from Mods.DirectKeys.directkeys import *\n\n#instantiate Recognizer class\nr = sr.Recognizer()\nversion = '1.0.6'\n\n\nclass SmashCom():\n \"\"\"\n To run SmashCom: Creates new SmashCom object, be sure to specify mods or games if any, or defaults to\n standard controller.\n \"\"\"\n\n def __init__(self): #, controller, game):\n # self.controller = controller\n # self.game = game\n self.lets_go()\n\n\n def lets_go(self):\n \"\"\"\n Press key to trigger microphone recursively after capture\n \"\"\"\n print('Press \"r\" to record.')\n while True:\n try:\n # Record audio\n if keyboard.is_pressed('r'):\n self.show_me_your_moves()\n break\n # Quit program\n if keyboard.is_pressed('q'):\n return False\n except:\n break\n self.lets_go()\n\n\n def show_me_your_moves(self):\n \"\"\"\n Opens microphone to take speech then send to controller for function\n \"\"\"\n with sr.Microphone() as source:\n audio = []\n moves = []\n new_moves = []\n try:\n print(\"Show me your moves! \")\n #microphone is listening\n audio = r.listen(source, timeout=20, phrase_time_limit=15)\n print('Translating...')\n moves.append(r.recognize_google(audio))\n print(moves)\n\n # run main fn\n print('sup')\n player = MyController(moves=moves)#, move=move, direction=direction, modifier=modifier, mod_move=mod_move, mod_time=mod_time)\n player.\n # execute_moves(moves=moves)\n except:\n pass\n\n# For Testing\n# main function\ndef main(args):\n \"\"\"\n args[1] = Game\n args[2] = Controller\n\n Run:\n $ python smashcom.py melee\n\n to specifiy controller, add as argument:\n $ python smashcom.py melee gc\n \"\"\"\n\n # create file names to use in functions\n game = args[1]\n controller = args[2]\n\n if controller.lower() in mods.keys():\n print(controller)\n print(controller, game)\n\n #debug_init = SmashCom(controller=controller, game=game)\n\nif __name__ == '__main__':\n # temp_args = ['smashcom.py', 'ssbm']\n # main(temp_args)\n SmashCom()"
}
] | 6 |
angelng1412/erlenmeyer
|
https://github.com/angelng1412/erlenmeyer
|
a55c2352369c3cc9ccf1e30b81ba5202d173c7f4
|
e5b863e03fdacf625ed6eaee2b72632447a449f7
|
22351649408a35e4bbd348e42eda931a9d11337b
|
refs/heads/master
| 2021-06-30T09:57:06.278724 | 2017-09-20T23:18:48 | 2017-09-20T23:18:48 | 104,254,542 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5890804529190063,
"alphanum_fraction": 0.5890804529190063,
"avg_line_length": 17,
"blob_id": "bc3050248996198241579e5bf6a59e2f79ae4ac0",
"content_id": "1de5c341a0a7deed09a06a0e3db43cbf844d4409",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 348,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 19,
"path": "/app.py",
"repo_name": "angelng1412/erlenmeyer",
"src_encoding": "UTF-8",
"text": "from flask import Flask\n\nmy_app = Flask(__name__)\n\n@my_app.route('/')\ndef first():\n return \"This is the first page!\"\n\n@my_app.route('/second')\ndef second():\n return \"This is the second page!\"\n\n@my_app.route('/third')\ndef third():\n return \"This is the third page!\"\n\nif __name__ == '__main__':\n my_app.debug = True\n my_app.run()\n\n \n"
}
] | 1 |
MridulaBontha/HackCU-2020
|
https://github.com/MridulaBontha/HackCU-2020
|
93df72ce552db884948d651357bbe5c0f837aa05
|
8ff3b2c02436ad85a70c5715205e2d5d58f5c793
|
175a4b3485eca68851849fe7564327f4652f4c34
|
refs/heads/master
| 2021-01-14T01:31:27.226791 | 2020-02-23T17:46:17 | 2020-02-23T17:46:17 | 242,557,642 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5933333039283752,
"alphanum_fraction": 0.6213333606719971,
"avg_line_length": 25.55555534362793,
"blob_id": "3e8390fbd65c27d7ecb44968505a1877538d0375",
"content_id": "52babf99aaf21813067fc77f4580e3c7d674ac7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 750,
"license_type": "no_license",
"max_line_length": 197,
"num_lines": 27,
"path": "/anonymous_Webpage.py",
"repo_name": "MridulaBontha/HackCU-2020",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request\r\nfrom twitter_bot import *\r\n\r\napp = Flask(__name__)\r\nimport nltk\r\nnltk.download('vader_lexicon')\r\n\r\napi = create_api()\r\n\r\n\r\[email protected](\"/\")\r\ndef hello():\r\n return '<form action=\"/echo\" method=\"POST\"><input style=\"height:400px; width:600px\" name=\"text\" placeholder=\"Enter your content anonymously\"><br><br><input type=\"submit\" value=\"Submit\"></form>'\r\n\r\n\r\[email protected](\"/echo\", methods=['POST'])\r\ndef echo():\r\n with open(\"content.txt\",\"w\") as content_file:\r\n content = request.form['text']\r\n content_file.write(content)\r\n content_file.close()\r\n api.update_status(content)\r\n return \"You said: \" + content\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host=\"10.219.134.189\",port=\"8000\")\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6804123520851135,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 22.75,
"blob_id": "a6e084dd80e4c9f4d3eac170d7a67021360ed82d",
"content_id": "7e531b629f2736725f562c42e27e2cc15e4fd5f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 4,
"path": "/twitter_keys.py",
"repo_name": "MridulaBontha/HackCU-2020",
"src_encoding": "UTF-8",
"text": "consumer_key = \"XXXX\"\r\nconsumer_secret = \"XXX\"\r\naccess_token = \"XXX\"\r\naccess_token_secret = \"XXX\""
},
{
"alpha_fraction": 0.772455096244812,
"alphanum_fraction": 0.7844311594963074,
"avg_line_length": 43.54545593261719,
"blob_id": "2f9b354092bfd785ab0420853ac441051bc76809",
"content_id": "347f3b21a902f8af78eac104e856dcb820780d4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 191,
"num_lines": 11,
"path": "/README.md",
"repo_name": "MridulaBontha/HackCU-2020",
"src_encoding": "UTF-8",
"text": "# HackCU-2020\"\r\n\r\nProject 1- Sleepy face detector\r\n\r\nA real time eye tracking app that tells if the driver's eyes are closed or open. This tool could be used as a potential application for monitoring drowsy drivers and alert them with an alarm.\r\n\r\nProject 2- Go anonymous\r\n\r\nA web based application for users to post anonymously on Twitter using a twitter bot and perform sentiment analytics on the received replies on that tweet.\r\n\r\nThis is also supported as a windows based standalone application.\r\n"
},
{
"alpha_fraction": 0.604651153087616,
"alphanum_fraction": 0.6088795065879822,
"avg_line_length": 35.91999816894531,
"blob_id": "fc8bff28e908e31a642511c8a6b9aeeeb921dd9a",
"content_id": "b68ae556e3682f8e7985d449d4231e106325565a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 946,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 25,
"path": "/read_tweets.py",
"repo_name": "MridulaBontha/HackCU-2020",
"src_encoding": "UTF-8",
"text": "import sys\r\n\r\nimport tweepy\r\nfrom nltk.sentiment import SentimentIntensityAnalyzer\r\n\r\nfrom twitter_bot import create_api\r\n\r\napi = create_api()\r\nsid = SentimentIntensityAnalyzer()\r\n\r\ndef read_tweet_replies(content):\r\n with open(\"replies.txt\", \"w\") as tweet_file:\r\n new_tweets = api.user_timeline(screen_name=\"AnonymousBot15\")\r\n for tweet in new_tweets:\r\n if tweet.text == content:\r\n replies = tweepy.Cursor(api.search, q='to:{}'.format(\"AnonymousBot15\"),\r\n since_id=tweet.id, tweet_mode='extended').items()\r\n for reply in replies:\r\n tweet_file.write(str(reply.full_text+\",\"+max(sid.polarity_scores(str(reply.full_text)),key=sid.polarity_scores(str(reply.full_text)).get))+\"\\n\")\r\n tweet_file.close()\r\n\r\nif __name__ == '__main__':\r\n file = open(\"content.txt\")\r\n content = file.read().strip()\r\n read_tweet_replies(content)"
}
] | 4 |
pfalcon/cppmicroweb
|
https://github.com/pfalcon/cppmicroweb
|
818ed0eb12afe7acec1c455b6612da1bab139cbb
|
68d3c2ca4fa50fa304ff9a00233e0027e079fe00
|
dee3fa09f0ee14d6449b58ff446d276dc37632bb
|
refs/heads/master
| 2020-12-24T16:50:00.233690 | 2013-01-21T18:16:01 | 2013-01-21T18:16:01 | 7,581,334 | 3 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5548149347305298,
"alphanum_fraction": 0.559550940990448,
"avg_line_length": 28.848167419433594,
"blob_id": "9cc5624b1595b734184f4961c84c221d02e3957b",
"content_id": "5f1cc3c7b33b716f1c8a8092fab1a33a26a2d334",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5701,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 191,
"path": "/jinja2cpp.py",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "import sys\nimport re\n\nimport jinja2\nimport jinja2.meta\nfrom jinja2 import nodes\nfrom jinja2.nodes import *\nfrom jinja2.compiler import CodeGenerator, CompilerExit, Frame\nfrom jinja2.ext import Extension\n\n\nclass ArgStmtExtension(Extension):\n\n tags = set(['arg'])\n\n def parse(self, parser):\n token = next(parser.stream)\n tokens = []\n while parser.stream.current.type != 'block_end':\n tokens.append(next(parser.stream).value)\n# print tokens\n node = parser.free_identifier(lineno=token.lineno)\n node.name = (\"\".join(tokens[:-1]).encode(\"utf8\"), tokens[-1])\n return node\n\n\nclass MyCodeGenerator(CodeGenerator):\n\n def __init__(self, *args, **kwargs):\n CodeGenerator.__init__(self, *args, **kwargs)\n self.args = {}\n self.prototype = None\n\n def visit_Template(self, node, frame=None):\n self.writeline('#include \"microweb.hpp\"')\n #self.writeline(\"using namespace std;\")\n self.writeline(\"\")\n self.prototype = \"void %s(ostream& out\" % self.name\n comma = True\n for argnode in node.find_all(InternalName):\n type, name = argnode.name\n self.args[name] = type\n if comma:\n self.prototype += \", \"\n self.prototype += \"const \" + type + \"& \" + name\n comma = True\n self.prototype += \")\"\n self.writeline(self.prototype)\n\n have_extends = node.find(nodes.Extends) is not None\n eval_ctx = EvalContext(self.environment, self.name)\n frame = Frame(eval_ctx)\n frame.inspect(node.body)\n frame.toplevel = frame.rootlevel = True\n frame.require_output_check = have_extends and not self.has_known_extends\n\n self.writeline(\"{\")\n self.indent()\n self.blockvisit(node.body, frame)\n self.outdent()\n self.writeline(\"}\")\n self.writeline(\"\")\n\n def visit_For(self, node, frame):\n# print node\n iter_var = node.target.name\n seq_var = node.iter.name\n seq_type = self.args[seq_var]\n seq_container, item_type = self.parse_container_type(seq_type)\n self.writeline(\"for (const %s& %s: %s) {\" % (item_type, iter_var, seq_var))\n self.indent()\n self.blockvisit(node.body, frame)\n self.outdent()\n self.writeline(\"}\")\n# print list(node.iter_child_nodes())\n\n def visit_Output(self, node, frame):\n body = []\n for child in node.nodes:\n try:\n const = child.as_const(frame.eval_ctx)\n body.append(const)\n except:\n body.append(child)\n self.writeline(\"out\")\n for item in body:\n self.write(\" << \")\n if type(item) == type(u\"\"):\n self.write(self.c_string(item))\n else:\n self.visit(item, frame)\n self.write(\";\")\n\n def visit_Call(self, node, frame, forward_caller=False):\n self.visit(node.node, frame)\n self.write('(')\n extra_kwargs = forward_caller and {'caller': 'caller'} or None\n self.signature(node, frame, extra_kwargs)\n self.write(')')\n\n def visit_Getattr(self, node, frame):\n self.visit(node.node, frame)\n self.write(\".\" + node.attr)\n\n def visit_Getitem(self, node, frame):\n self.visit(node.node, frame)\n self.write(\"[\")\n self.visit(node.arg, frame)\n self.write(\"]\")\n\n def visit_TemplateData(self, node, frame):\n# self.write(repr(node.as_const(frame.eval_ctx)))\n 1 / 0\n print \"hello\"\n return \"!!!!!!!!!!!!!!!\"\n\n def visit_Name(self, node, frame):\n if node.ctx == 'store' and frame.toplevel:\n frame.toplevel_assignments.add(node.name)\n self.write(node.name)\n frame.assigned_names.add(node.name)\n\n def visit_InternalName(self, node, frame):\n# type, name = node.name\n# self.args[name] = type\n# print self.args\n pass\n\n def visit_Const(self, node, frame):\n val = node.value\n if isinstance(val, str):\n self.write(self.c_string(val))\n else:\n self.write(repr(val))\n\n\n def signature(self, node, frame, extra_kwargs=None):\n comma = False\n for arg in node.args:\n if comma:\n self.write(', ')\n self.visit(arg, frame)\n comma = True\n\n def blockvisit(self, nodes, frame):\n try:\n for node in nodes:\n self.visit(node, frame)\n except CompilerExit:\n pass\n\n\n def c_string(self, s):\n s = repr(s.encode(\"utf8\"))\n s = s[1:-1]\n s = s.replace('\"', '\\\\\"')\n return '\"' + s + '\"'\n\n def parse_container_type(self, type):\n m = re.match(r\"(.+)<(.+)>$\", type)\n assert m\n return m.group(1), m.group(2)\n\n# def write(self, sth):\n# sys.stdout.write(sth)\n\n\nenv = jinja2.Environment(extensions=[ArgStmtExtension])\n\ntpl = open(sys.argv[1])\nast = env.parse(tpl.read())\n#print ast\n\ntpl_name = sys.argv[1].rsplit('.', 1)[0]\n# Use underscore to avoid \"*.tpl.cpp\" being compiled in \"*.tpl\" \"binary\"\n# and thus overwriting original template\nf = open(tpl_name + \"_tpl.cpp\", \"w\")\ncodegen = MyCodeGenerator(ast.environment, tpl_name, sys.argv[1], f)\ncodegen.visit(ast)\nf.close()\nf = open(tpl_name + \".tpl.hpp\", \"w\")\nf.write(codegen.prototype + \";\\n\")\nf.close()\n\nf = open(sys.argv[1] + \".render\", \"w\")\nm = re.match(r\".+?\\((.+)\\)$\", codegen.prototype)\nargs = m.group(1).split(\", \")\narg_names = [x.rsplit(\" \", 1)[1] for x in args]\nf.write('#include \"%s\"\\n' % (sys.argv[1] + \".hpp\"));\nf.write(\"%s(%s);\\n\" % (tpl_name, \", \".join(arg_names)))\nf.close()\n"
},
{
"alpha_fraction": 0.5369496941566467,
"alphanum_fraction": 0.5487421154975891,
"avg_line_length": 26.65217399597168,
"blob_id": "c79bb8b4a6f0dee92627e3b753c616221e0638fc",
"content_id": "4c79e89e9c9c089f2f703c14b8cc6801bbebf154",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1272,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 46,
"path": "/mongoose-handler.cpp",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <string.h>\n#include \"mongoose.h\"\n#include \"microweb.hpp\"\n//using namespace std;\n\nstatic void *callback(enum mg_event event,\n struct mg_connection *conn) {\n const struct mg_request_info *request_info = mg_get_request_info(conn);\n\n if (event == MG_NEW_REQUEST) {\n string path(request_info->uri);\n cout << path << endl;\n ostringstream buf;\n route(buf, path);\n#if 0\n// printf(\"%s\\n\", request_info->uri);\n// char content[1024];\n int content_length = snprintf(content, sizeof(content),\n \"Hello from mongoose! Remote port: %d\",\n request_info->remote_port);\n#endif\n mg_printf(conn,\n \"HTTP/1.1 200 OK\\r\\n\"\n \"Content-Type: text/plain\\r\\n\"\n \"Content-Length: %d\\r\\n\" // Always set Content-Length\n \"\\r\\n\"\n \"%s\",\n buf.str().size(), buf.str().c_str());\n // Mark as processed\n return (void*)\"\";\n } else {\n return NULL;\n }\n}\n\nint main(void) {\n struct mg_context *ctx;\n const char *options[] = {\"listening_ports\", \"8080\", NULL};\n\n ctx = mg_start(&callback, NULL, options);\n getchar(); // Wait until user hits \"enter\"\n mg_stop(ctx);\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6041909456253052,
"alphanum_fraction": 0.6076833605766296,
"avg_line_length": 30.814815521240234,
"blob_id": "9374a05ab35548b82e894c3d090777a8a44af146",
"content_id": "9958f74b4d96fd8213d6b6d8cd5df7357a15060d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 859,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 27,
"path": "/smarter_string.hpp",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "#ifndef _SMARTER_STRING_HPP\n#define _SMARTER_STRING_HPP\n#include <string>\n#include <boost/algorithm/string.hpp>\n\nclass String: public std::string\n{\npublic:\n String() : std::string() {};\n String(char *s) : std::string(s) {};\n String(const char *s) : std::string(s) {};\n String(const std::string& s) : std::string(s) {};\n String(const String& s) : std::string(s) {};\n template<class InputIterator> String(InputIterator begin, InputIterator end) : std::string(begin, end) {}\n\n operator int() { return atoi(c_str()); }\n String strip() { return boost::trim_copy(*this); }\n String substr(int beg, int end = 0)\n {\n if (end <= 0)\n end = this->length() + end - 1;\n return this->std::string::substr(beg, end);\n }\n// vector<string> split() { return boost::trim_copy(*this); }\n};\n\n#endif //_SMARTER_STRING_HPP\n"
},
{
"alpha_fraction": 0.5095304250717163,
"alphanum_fraction": 0.5099953413009644,
"avg_line_length": 21.17525863647461,
"blob_id": "3acc1a9deb6f183711a30206a56a7b8e9ea4e7e9",
"content_id": "0df8fe9790f3f26af2f19251d4d2279e4978c2be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2151,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 97,
"path": "/anyvalue-standalone.hpp",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "#include <vector>\n#include <list>\n#include <map>\n#include \"smarter_string.hpp\"\n#include <iostream>\n#include \"ostream_container.hpp\"\n\nclass Val;\ntypedef std::vector<Val> Vector;\ntypedef std::list<Val> List;\ntypedef std::map<Val, Val> Map;\n\nclass Val\n{\npublic:\n enum Type {UNK, EMPTY, INT, STRING, VECTOR, LIST, MAP};\n int _type;\n int _int;\n String _string;\n Vector _vector;\n List _list;\n Map _map;\n\n Val() : _type(EMPTY) {}\n Val(int v) : _type(INT), _int(v) {}\n Val(const char *v) : _type(STRING), _string(v) {}\n Val(String v) : _type(STRING), _string(v) {}\n Val(Vector v) : _type(VECTOR), _vector(v) {}\n Val(List v) : _type(LIST), _list(v) {}\n Val(Map v) : _type(MAP), _map(v) {}\n\n class IndexError : std::exception\n {\n const char *msg;\n public:\n IndexError(const char *s) : msg(s) {}\n };\n\n Val& operator [](int i) {\n switch (_type) {\n case Val::STRING:\n if (i >= _string.length())\n throw IndexError(\"string index out of range\");\n {\n Val* a = new Val(_string.substr(i, 1));\n return *a;\n }\n case Val::VECTOR:\n return _vector.Vector::operator[](i);\n case Val::MAP:\n return _map.Map::operator[](i);\n default:\n throw IndexError(\"Undefined operator[int]\");\n }\n }\n\n Val& operator [](String s) {\n return _map.Map::operator[](s);\n }\n};\n\nbool operator<(const Val& x, const Val& y)\n{\n switch (x._type) {\n case Val::STRING:\n return x._string < y._string;\n }\n\n return true;\n}\n\nstd::ostream& operator<<(std::ostream& out, const Val& val)\n{\n switch (val._type) {\n case Val::EMPTY:\n out << \"None\";\n break;\n case Val::INT:\n out << val._int;\n break;\n case Val::STRING:\n out << val._string;\n break;\n case Val::VECTOR:\n out << val._vector;\n break;\n case Val::LIST:\n out << val._list;\n break;\n case Val::MAP:\n out << val._map;\n break;\n default:\n out << \"UNK<\" << val._type << \">\";\n }\n return out;\n}\n"
},
{
"alpha_fraction": 0.5547652840614319,
"alphanum_fraction": 0.5625889301300049,
"avg_line_length": 19.08571434020996,
"blob_id": "c1515e3f18731228a45c37f89512c2318347fec7",
"content_id": "e144439b05e4c6c67079e8e811c607e1e848622c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1406,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 70,
"path": "/dispatcher.cpp",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "#include \"microweb.hpp\"\n#include \"pcre++.h\"\n\nusing namespace pcrepp;\n\nclass UrlParams\n{\nprotected:\n const Pcre& pcre_;\npublic:\n UrlParams(const Pcre& pcre) : pcre_(pcre) {};\n string named(const string& name, const string& def=NULL) const { return pcre_.named(name, def); }\n string no(int idx) const { return pcre_.get_match(idx); }\n};\n//typedef Pcre& UrlParams;\n\nvoid index(ostream& out, UrlParams p)\n{\n string eol = \"\\n\";\n out << \"foo\" + eol;\n}\n\nvoid num(ostream& out, UrlParams p)\n{\n out << \"Number is: \" << hex << 42 << ' ' << dec << p.named(\"val\", \"NaN\") << endl;\n}\n\nvoid iter(ostream& out, UrlParams p)\n{\n int val = p.named(\"val\", \"10\");\n vector<int> vec;\n for (int i = 0; i < val; i++)\n vec.append(i);\n #include \"iter.tpl.render\"\n}\n\ntypedef void (*handler_t)(ostream& out, UrlParams p);\n\nstruct Route\n{\n const char *regexp;\n handler_t handler;\n};\n\nRoute routes[] = {\n {\"/num/(?<val>[0-9]+)?\", num},\n {\"/iter/(?<val>[0-9]+)?\", iter},\n {\"/\", index},\n {NULL}\n};\n\nvoid route(ostream& out, const string& path)\n{\n for (Route *r = routes; r->regexp; r++) {\n Pcre regexp(r->regexp, PCRE_ANCHORED);\n if (regexp.search(path)) {\n r->handler(out, UrlParams(regexp));\n break;\n }\n }\n}\n\n#if 0\nint main(int argc, char *argv[])\n{\n ostringstream buf;\n route(buf, argv[1]);\n cout << buf.str();\n}\n#endif\n"
},
{
"alpha_fraction": 0.7151898741722107,
"alphanum_fraction": 0.7310126423835754,
"avg_line_length": 20.066667556762695,
"blob_id": "4157d2a847e2d82ed3bed181caa377ec49e17676",
"content_id": "3b927d24b35a116127e048ae1a65783f3663f448",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 15,
"path": "/Makefile",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "CXX = g++-4.7\nCXXFLAGS = -std=gnu++11\nLDFLAGS = -L.\nLDLIBS = -lpcre++ -lpcre -lpthread -ldl\n\n%.tpl.cpp: %.tpl\n\tpython jinja2cpp.py $^\n\ndispatcher: dispatcher.cpp mongoose-handler.cpp mongoose/mongoose.o iter.tpl.cpp\n\nmongoose/mongoose.o: mongoose/mongoose.c\n\ndispatcher: microweb.hpp\n\nparse_iwlist: parse_iwlist.cpp\n"
},
{
"alpha_fraction": 0.5712000131607056,
"alphanum_fraction": 0.5752000212669373,
"avg_line_length": 20.18644142150879,
"blob_id": "af0ead0d3837d6d789e3bd9765a87c7cb6564c42",
"content_id": "ce1404bd9db6b2a1f304527eb31fb5424d5e4aa5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1250,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 59,
"path": "/anyvalue-variant.hpp",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "#include <vector>\n#include <list>\n#include <map>\n#define BOOST_VARIANT_NO_FULL_RECURSIVE_VARIANT_SUPPORT\n#include <boost/variant.hpp>\n#include \"smarter_string.hpp\"\n#include <iostream>\n#include \"ostream_container.hpp\"\n\ntypedef boost::make_recursive_variant<\n int,\n double,\n// std::string,\n String,\n std::vector< boost::recursive_variant_ >,\n std::list< boost::recursive_variant_ >,\n std::map< boost::recursive_variant_, boost::recursive_variant_ >\n>::type Val_;\n\nclass Val;\ntypedef std::vector<Val> Vector;\ntypedef std::list<Val> List;\ntypedef std::map<Val, Val> Map;\n\nclass Val\n{\npublic:\n Val_ _v;\n Val() : _v() {}\n// Val2(Val2& v) : Val((Val)v) {}\n Val(int v) : _v(v) {}\n Val(const char *v) : _v(v) {}\n Val(String v) : _v(v) {}\n// Val(Val& v) : _v(v) {}\n Val(const Map& v) : _v(v) {}\n// Val2(Map v) : _v(v) {}\n\n Val& operator [](String s) {\n Map m = boost::get<Map>(_v);\n\n// return Val2(m[s]);\n }\n};\n\nbool operator<(const Val& x, const Val& y)\n{\n switch (x._v.which()) {\n case 2:\n return boost::get<String>(x._v) < boost::get<String>(y._v);\n }\n\n return true;\n}\n\nstd::ostream& operator <<(std::ostream& out, const Val& val)\n{\n out << val._v;\n return out;\n}\n"
},
{
"alpha_fraction": 0.6950146555900574,
"alphanum_fraction": 0.696480929851532,
"avg_line_length": 17.432432174682617,
"blob_id": "412d915f66ccb66ad23a8faa547d53ace89c99f4",
"content_id": "d8da7df598a0975abbff46b9a5e6dc3d58b16ef2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 37,
"path": "/microweb.hpp",
"repo_name": "pfalcon/cppmicroweb",
"src_encoding": "UTF-8",
"text": "#ifndef _MICROWEB_HPP\n#define _MICROWEB_HPP\n#include <string>\n#include <iostream>\n#include <sstream>\n#include <iterator>\n#include <vector>\n#include <stdlib.h>\n#include <ostream_container.hpp>\n#include <anyvalue.hpp>\n#include \"pcre++.h\"\n\n//using namespace std;\nusing std::ostream;\nusing std::ostringstream;\nusing std::cout;\nusing std::dec;\nusing std::hex;\nusing std::endl;\n\nusing pcrepp::Pcre;\n\n#if 0\ntemplate <typename T>\nclass vector : public std::vector<T>\n{\npublic:\n vector& append(T item) { this->push_back(item); return *this; }\n// vector& append(const T&)\n};\n#else\nusing std::vector;\n#endif\n\nvoid route(ostream& out, const String& path);\n\n#endif //_MICROWEB_HPP\n"
}
] | 8 |
Lupino/aiogear
|
https://github.com/Lupino/aiogear
|
4938dbcc0f5cab8f13f6d1053efb886e8dc87985
|
b8ab4d279ff1b9a37d3a50aa04d4f533a8ea2239
|
e8e4d724fb549612426e091cfbf60e06f4515db0
|
refs/heads/master
| 2016-08-03T15:26:35.803515 | 2013-12-31T07:22:04 | 2013-12-31T07:22:04 | 13,896,307 | 3 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6860465407371521,
"alphanum_fraction": 0.6860465407371521,
"avg_line_length": 20.5,
"blob_id": "1f407ac01748eacc3c7b39fdb1945d904cb220c3",
"content_id": "3ff9ae4bca0188bcd65ae19a8a3eb42701f160a6",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 4,
"path": "/aiogear/__init__.py",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "from .worker import Worker\nfrom .client import Client\n\n__all__ = ['Worker', 'Client']\n"
},
{
"alpha_fraction": 0.6625258922576904,
"alphanum_fraction": 0.693581759929657,
"avg_line_length": 27.41176414489746,
"blob_id": "96ed5bddcf537f2e88a6362a11ef5b662c515eb9",
"content_id": "1714f10af724e6db7fcd8aaa05da10c586a1deda",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 966,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 34,
"path": "/examples/client.py",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "import logging\nimport sys\nimport os\ntry:\n from aiogear import Client\nexcept ImportError:\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n from aiogear import Client\nfrom aiogear.common import logger\n\nimport asyncio\nlogger.setLevel(logging.DEBUG)\nFORMAT = '%(asctime)-15s - %(message)s'\nformater = logging.Formatter(FORMAT)\nch = logging.StreamHandler()\nch.setFormatter(formater)\nlogger.addHandler(ch)\n\ndef main():\n client = Client()\n # start the gearman server\n # sudo gearmand -d -p 4730\n # sudo gearmand -d -p 4731\n # sudo gearmand -d -p 4732\n yield from client.add_server('localhost', 4730)\n yield from client.add_server('localhost', 4731)\n yield from client.add_server('localhost', 4732)\n for i in range(1000):\n task = yield from client.do('echo', b'ffddkk', background=False)\n result = yield from task.result\n print(result)\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n"
},
{
"alpha_fraction": 0.5703834891319275,
"alphanum_fraction": 0.571270227432251,
"avg_line_length": 31.688405990600586,
"blob_id": "29a18b19754064e63748e9da7501e37ee9b8ddb6",
"content_id": "a98018b7269e4a64660797876229ba9044764b0f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4511,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 138,
"path": "/aiogear/worker.py",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "import asyncio\nfrom . import common\n\n__all__ = ['Worker']\n\nclass WorkerAgent(common.BaseAgent):\n __slots__ = ['_worker']\n def __init__(self, worker, reader, writer, extra = {}):\n common.BaseAgent.__init__(self, reader, writer, extra)\n self._worker = worker\n\n @asyncio.coroutine\n def add_func(self, func_name, timeout = 0):\n if timeout > 0:\n yield from self.send(common.CAN_DO_TIMEOUT,\n {'func_name': func_name, 'timeout': timeout})\n else:\n yield from self.send(common.CAN_DO, {'func_name': func_name})\n\n @asyncio.coroutine\n def work(self):\n yield from self.send(common.GRAB_JOB)\n cmd_type, cmd_args = yield from self.read()\n if cmd_type == common.NO_JOB:\n yield from self.sleep()\n elif cmd_type == common.JOB_ASSIGN:\n func_name = common.to_str(cmd_args['func_name'])\n if self._worker.has_func(func_name):\n yield from self._worker.run_func(func_name, Job(self, cmd_args))\n else:\n yield from self.send(common.CANT_DO, {'func_name': cmd_args['func_name']})\n\n @asyncio.coroutine\n def set_client_id(self, client_id):\n yield from self.send(common.SET_CLIENT_ID, {'client_id': client_id})\n\n def sleep(self):\n yield from self.send(common.PRE_SLEEP)\n return (yield from self.read())\n\nclass Job(object):\n def __init__(self, agent, cmd_args):\n self._agent = agent\n for key, val in cmd_args.items():\n if key != 'workload':\n val = common.to_str(val)\n\n setattr(self, key, val)\n\n self.handle = self.job_handle\n\n @asyncio.coroutine\n def send(self, cmd_type, data=None):\n if data:\n yield from self._agent.send(cmd_type,\n {'job_handle': self.handle, 'workload': data})\n else:\n yield from self._agent.send(cmd_type, {'job_handle': self.handle})\n\n @asyncio.coroutine\n def complete(self, data):\n yield from self.send(common.WORK_COMPLETE, data)\n\n @asyncio.coroutine\n def fail(self):\n yield from self.send(common.WORK_FAIL)\n\n @asyncio.coroutine\n def status(self, numerator, denominator):\n yield from self._agent.send(common.WORK_STATUS, {\n 'job_handle': self.handle,\n 'numerator': numerator,\n 'denominator': denominator\n })\n\n @asyncio.coroutine\n def data(self, data):\n yield from self.send(common.WORK_DATA, data)\n\n @asyncio.coroutine\n def warning(self, data):\n yield from self.send(common.WORK_WARNING, data)\n\n @asyncio.coroutine\n def exception(self, data):\n yield from self.send(common.WORK_EXCEPTION, data)\n\nclass Worker(object):\n __slots__ = ['_agents', '_funcs', '_sem']\n def __init__(self, max_tasks=5):\n self._agents = []\n self._funcs = {}\n self._sem = asyncio.Semaphore(max_tasks)\n\n def work(self):\n for agent in self._agents:\n def start(t=None, agent=None):\n if t:\n agent = t.agent\n exception = t.exception()\n if isinstance(exception, ConnectionResetError):\n self._agents.remove(agent)\n return\n task = asyncio.Task(agent.work())\n task.agent = agent\n task.add_done_callback(start)\n start(agent=agent)\n\n @asyncio.coroutine\n def add_func(self, func_name, callback, timeout=0):\n self._funcs[func_name] = callback\n for agent in self._agents:\n yield from agent.add_func(func_name, timeout)\n\n def has_func(self, func_name):\n if func_name in self._funcs:\n return True\n return False\n\n @asyncio.coroutine\n def run_func(self, func_name, job):\n yield from self._sem.acquire()\n func = self._funcs[func_name]\n task = asyncio.Task(func(job))\n task.add_done_callback(lambda t: self._sem.release())\n return task\n\n @asyncio.coroutine\n def add_server(self, host, port, ssl = False):\n reader, writer = yield from asyncio.open_connection(host, port, ssl=ssl)\n agent = WorkerAgent(self, reader, writer,\n {'host': host, 'port': port, 'ssl': ssl})\n self._agents.append(agent)\n\n @asyncio.coroutine\n def set_client_id(self, client_id):\n for agent in self._agents:\n yield from agent.set_client_id(client_id)\n"
},
{
"alpha_fraction": 0.5566037893295288,
"alphanum_fraction": 0.5660377144813538,
"avg_line_length": 12.1875,
"blob_id": "c6e5fd43045fd46f74e36bd5571b442a0df8beb9",
"content_id": "8c8ce532baf314d01600e1b9c764a491e67b12bc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 212,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 16,
"path": "/README.md",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "gearman client/worker for asyncio\n==============================\n\n\nRequirements\n------------\n\n- Python 3.3\n\n- asyncio http://code.google.com/p/tulip/\n\n\nLicense\n-------\n\ngearman is offered under the BSD license.\n\n"
},
{
"alpha_fraction": 0.6088607311248779,
"alphanum_fraction": 0.6177214980125427,
"avg_line_length": 29.384614944458008,
"blob_id": "2643087401ce96a195a6e083c77e435019e0a558",
"content_id": "0683f00d3bfffcb38ed9aafe5e122b4b795b7574",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 790,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 26,
"path": "/setup.py",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "import os\nfrom setuptools import setup, find_packages\n\nversion = '0.0.1'\n\ninstall_requires = ['asyncio']\n\ndef read(f):\n return open(os.path.join(os.path.dirname(__file__), f)).read().strip()\n\nsetup(name='aiogear',\n version=version,\n description=('gearman client/worker for asyncio'),\n long_description=read('README.md'),\n classifiers=[\n 'License :: OSI Approved :: BSD License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.3'],\n author='Li Meng Jun',\n author_email='[email protected]',\n # url='https://github.com/fafhrd91/asynchttp/',\n license='BSD',\n packages=find_packages(),\n install_requires = install_requires,\n include_package_data = True)\n"
},
{
"alpha_fraction": 0.5415226817131042,
"alphanum_fraction": 0.5513320565223694,
"avg_line_length": 33.77131652832031,
"blob_id": "7c2cfa77dd0b0c86804a7c56ed31c5a346f9e648",
"content_id": "72edae43ace2e7a1733eb41c4f582b05fb7aa985",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8971,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 258,
"path": "/aiogear/common.py",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "import struct\nimport asyncio\nimport logging\n\n__all__ = ['BaseAgent', 'to_bytes', 'to_str', 'GearmanError', 'ProtocolError']\n\nlogger = logging.getLogger('aiogear')\n\nCAN_DO = 1 # REQ Worker\nCANT_DO = 2 # REQ Worker\nRESET_ABILITIES = 3 # REQ Worker\nPRE_SLEEP = 4 # REQ Worker\n# (unused) = 5 # - -\nNOOP = 6 # RES Worker\nSUBMIT_JOB = 7 # REQ Client\nJOB_CREATED = 8 # RES Client\nGRAB_JOB = 9 # REQ Worker\nNO_JOB = 10 # RES Worker\nJOB_ASSIGN = 11 # RES Worker\nWORK_STATUS = 12 # REQ Worker\n # RES Client\nWORK_COMPLETE = 13 # REQ Worker\n # RES Client\nWORK_FAIL = 14 # REQ Worker\n # RES Client\nGET_STATUS = 15 # REQ Client\nECHO_REQ = 16 # REQ Client/Worker\nECHO_RES = 17 # RES Client/Worker\nSUBMIT_JOB_BG = 18 # REQ Client\nERROR = 19 # RES Client/Worker\nSTATUS_RES = 20 # RES Client\nSUBMIT_JOB_HIGH = 21 # REQ Client\nSET_CLIENT_ID = 22 # REQ Worker\nCAN_DO_TIMEOUT = 23 # REQ Worker\nALL_YOURS = 24 # REQ Worker\nWORK_EXCEPTION = 25 # REQ Worker\n # RES Client\nOPTION_REQ = 26 # REQ Client/Worker\nOPTION_RES = 27 # RES Client/Worker\nWORK_DATA = 28 # REQ Worker\n # RES Client\nWORK_WARNING = 29 # REQ Worker\n # RES Client\nGRAB_JOB_UNIQ = 30 # REQ Worker\nJOB_ASSIGN_UNIQ = 31 # RES Worker\nSUBMIT_JOB_HIGH_BG = 32 # REQ Client\nSUBMIT_JOB_LOW = 33 # REQ Client\nSUBMIT_JOB_LOW_BG = 34 # REQ Client\nSUBMIT_JOB_SCHED = 35 # REQ Client\nSUBMIT_JOB_EPOCH = 36 # REQ Client\n\nCOMMAND_NAMES = {\n CAN_DO : 'CAN_DO',\n CANT_DO : 'CANT_DO',\n RESET_ABILITIES : 'RESET_ABILITIES',\n PRE_SLEEP : 'PRE_SLEEP',\n\n NOOP : 'NOOP',\n SUBMIT_JOB : 'SUBMIT_JOB',\n JOB_CREATED : 'JOB_CREATED',\n GRAB_JOB : 'GRAB_JOB',\n NO_JOB : 'NO_JOB',\n JOB_ASSIGN : 'JOB_ASSIGN',\n WORK_STATUS : 'WORK_STATUS',\n\n WORK_COMPLETE : 'WORK_COMPLETE',\n\n WORK_FAIL : 'WORK_FAIL',\n\n GET_STATUS : 'GET_STATUS',\n ECHO_REQ : 'ECHO_REQ',\n ECHO_RES : 'ECHO_RES',\n SUBMIT_JOB_BG : 'SUBMIT_JOB_BG',\n ERROR : 'ERROR',\n STATUS_RES : 'STATUS_RES',\n SUBMIT_JOB_HIGH : 'SUBMIT_JOB_HIGH',\n SET_CLIENT_ID : 'SET_CLIENT_ID',\n CAN_DO_TIMEOUT : 'CAN_DO_TIMEOUT',\n ALL_YOURS : 'ALL_YOURS',\n WORK_EXCEPTION : 'WORK_EXCEPTION',\n\n OPTION_REQ : 'OPTION_REQ',\n OPTION_RES : 'OPTION_RES',\n WORK_DATA : 'WORK_DATA',\n\n WORK_WARNING : 'WORK_WARNING',\n\n GRAB_JOB_UNIQ : 'GRAB_JOB_UNIQ',\n JOB_ASSIGN_UNIQ : 'JOB_ASSIGN_UNIQ',\n SUBMIT_JOB_HIGH_BG : 'SUBMIT_JOB_HIGH_BG',\n SUBMIT_JOB_LOW : 'SUBMIT_JOB_LOW',\n SUBMIT_JOB_LOW_BG : 'SUBMIT_JOB_LOW_BG',\n SUBMIT_JOB_SCHED : 'SUBMIT_JOB_SCHED',\n SUBMIT_JOB_EPOCH : 'SUBMIT_JOB_EPOCH'\n}\n\nNULL_CHAR = b'\\x00'\nMAGIC_REQ = NULL_CHAR + b'REQ'\nMAGIC_RES = NULL_CHAR + b'RES'\n\nPARAM_FOR_COMMAND = {\n CAN_DO: ['func_name'],\n CANT_DO: ['func_name'],\n RESET_ABILITIES: [],\n PRE_SLEEP: [],\n NOOP: [],\n SUBMIT_JOB: ['func_name', 'unique', 'workload'],\n JOB_CREATED: ['job_handle'],\n GRAB_JOB: [],\n\n NO_JOB: [],\n JOB_ASSIGN: ['job_handle', 'func_name', 'workload'],\n WORK_STATUS: ['job_handle', 'numerator', 'denominator'],\n WORK_COMPLETE: ['job_handle', 'workload'],\n WORK_FAIL: ['job_handle'],\n GET_STATUS: ['job_handle'],\n ECHO_REQ: ['workload'],\n ECHO_RES: ['workload'],\n SUBMIT_JOB_BG: ['func_name', 'unique', 'workload'],\n ERROR: ['error_code', 'error_text'],\n\n STATUS_RES: ['job_handle', 'known', 'running', 'numerator', 'denominator'],\n SUBMIT_JOB_HIGH: ['func_name', 'unique', 'workload'],\n SET_CLIENT_ID: ['client_id'],\n CAN_DO_TIMEOUT: ['func_name', 'timeout'],\n ALL_YOURS: [],\n WORK_EXCEPTION: ['job_handle', 'workload'],\n OPTION_REQ: ['option_name'],\n OPTION_RES: ['option_name'],\n WORK_DATA: ['job_handle', 'workload'],\n WORK_WARNING: ['job_handle', 'workload'],\n\n GRAB_JOB_UNIQ: [],\n JOB_ASSIGN_UNIQ: ['job_handle', 'func_name', 'unique', 'workload'],\n SUBMIT_JOB_HIGH_BG: ['func_name', 'unique', 'workload'],\n SUBMIT_JOB_LOW: ['func_name', 'unique', 'workload'],\n SUBMIT_JOB_LOW_BG: ['func_name', 'unique', 'workload']\n}\n\nCOMMAND_HEADER_SIZE = 12\n\nclass ProtocolError(Exception):\n pass\n\nclass GearmanError(Exception):\n def __init__(self, error_code, error_text, extra):\n self.error_code = error_code\n self.error_text = error_text\n self.extra = extra\n\ndef to_bytes(string):\n if isinstance(string, str):\n return bytes(string, 'UTF8')\n else:\n string = b'' if string is None else string\n return string\n\ndef to_str(byte):\n if isinstance(byte, bytes):\n return str(byte, 'UTF8')\n else:\n return byte\n\ndef pack_binary_command(cmd_type, cmd_args={}, is_response=False):\n magic = MAGIC_RES if is_response else MAGIC_REQ\n excepted_cmd_params = PARAM_FOR_COMMAND.get(cmd_type, None)\n\n data_items = [cmd_args[param] for param in excepted_cmd_params]\n\n binary_payload = NULL_CHAR.join(map(to_bytes, data_items))\n payload_size = len(binary_payload)\n packing_format = '!4sII%ds' % payload_size\n\n return struct.pack(packing_format, magic, cmd_type, payload_size,\n binary_payload)\n\ndef parse_binary_command(in_buffer, is_response=True):\n in_buffer_size = len(in_buffer)\n magic = None\n cmd_type = None\n cmd_args = None\n cmd_len = 0\n excepted_packet_size = None\n\n if in_buffer_size < COMMAND_HEADER_SIZE:\n return cmd_type, cmd_args, cmd_len\n\n magic, cmd_type, cmd_len = struct.unpack('!4sII',\n in_buffer[:COMMAND_HEADER_SIZE])\n\n received_bad_response = is_response and bool(magic != MAGIC_RES)\n received_bad_request = not is_response and bool(magic != MAGIC_REQ)\n if received_bad_response or received_bad_request:\n raise ProtocolError('Malformed Magic')\n excepted_cmd_params = PARAM_FOR_COMMAND.get(cmd_type, None)\n if excepted_cmd_params is None:\n raise ProtocolError('Received unkonw binary command: %s' % cmd_type)\n\n excepted_packet_size = COMMAND_HEADER_SIZE + cmd_len\n\n if in_buffer_size < excepted_packet_size:\n return None, None, 0\n\n binary_payload = in_buffer[COMMAND_HEADER_SIZE:excepted_packet_size]\n split_arguments = []\n\n if len(excepted_cmd_params) > 0:\n split_arguments = binary_payload.split(NULL_CHAR,\n len(excepted_cmd_params) - 1)\n elif binary_payload:\n raise ProtocolError('Excepted no binary payload: %s' % cmd_type)\n\n if len(split_arguments) != len(excepted_cmd_params):\n raise ProtocolError('Received %d argument(s), excepting %d argument(s): %s'%\\\n len(split_arguments), len(excepted_cmd_params), cmd_type)\n\n cmd_args = dict(zip(excepted_cmd_params, split_arguments))\n\n return cmd_type, cmd_args, excepted_packet_size\n\nclass BaseAgent(object):\n __slots__ = ['_reader', '_writer', '_buffer', '_extra', '_lock']\n def __init__(self, reader, writer, extra = {}):\n self._reader = reader\n self._writer = writer\n self._buffer = []\n self._extra = extra\n self._lock = asyncio.Lock()\n\n @asyncio.coroutine\n def send(self, cmd_type, cmd_args={}, is_response = False):\n buf = pack_binary_command(cmd_type, cmd_args, is_response)\n self._writer.write(buf)\n logger.debug('Send[%s:%s]> CMD: %s | Buffer: %s'%(\\\n self._extra['host'], self._extra['port'],\n COMMAND_NAMES.get(cmd_type), buf))\n yield from self._writer.drain()\n\n @asyncio.coroutine\n def read(self, is_response = True):\n yield from self._lock\n buf = b''.join(self._buffer)\n while True:\n data = parse_binary_command(buf, is_response)\n if data[2] == 0:\n buf += yield from self._reader.read(12)\n else:\n break\n\n self._buffer = [buf[data[2]:]]\n if data[0]:\n logger.debug('Recv[%s:%s]> CMD: %s | Buffer: %s'%(\\\n self._extra['host'], self._extra['port'],\n COMMAND_NAMES.get(data[0]), buf[:data[2]]))\n self._lock.release()\n if data[0] == ERROR:\n raise GearmanError(data[1]['error_code'], data[1]['error_text'], self._extra)\n return data[0], data[1]\n"
},
{
"alpha_fraction": 0.6706706881523132,
"alphanum_fraction": 0.6986986994743347,
"avg_line_length": 25.289474487304688,
"blob_id": "53e8d20ea651854bd509f00429260e5a5695fdfa",
"content_id": "8f3a50d2c3e3c24d0c9d1f37820fe4055092ae0c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 999,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 38,
"path": "/examples/worker.py",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "import logging\nimport sys\nimport os\ntry:\n from aiogear import Worker\nexcept ImportError:\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n from aiogear import Worker\nfrom aiogear.common import logger\n\nimport asyncio\nlogger.setLevel(logging.DEBUG)\nFORMAT = '%(asctime)-15s - %(message)s'\nformater = logging.Formatter(FORMAT)\nch = logging.StreamHandler()\nch.setFormatter(formater)\nlogger.addHandler(ch)\n\ndef echo(job):\n print(job.workload)\n # yield from asyncio.sleep(10)\n yield from job.complete('cckk,haha')\n\ndef main():\n worker = Worker()\n # start the gearman server\n # sudo gearmand -d -p 4730\n # sudo gearmand -d -p 4731\n # sudo gearmand -d -p 4732\n yield from worker.add_server('localhost', 4730)\n yield from worker.add_server('localhost', 4731)\n yield from worker.add_server('localhost', 4732)\n yield from worker.add_func('echo', echo)\n worker.work()\n\nloop = asyncio.get_event_loop()\ntask = asyncio.Task(main())\nloop.run_forever()\n"
},
{
"alpha_fraction": 0.5706193447113037,
"alphanum_fraction": 0.5709969997406006,
"avg_line_length": 30.15294075012207,
"blob_id": "5bb90b9af25eecb2df3628590fd01b15cd41c8ee",
"content_id": "288c5a8d0d3c776ca327cbb8cb2226b29fe2d22b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2648,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 85,
"path": "/aiogear/client.py",
"repo_name": "Lupino/aiogear",
"src_encoding": "UTF-8",
"text": "import asyncio\nfrom . import common\nimport random\nfrom uuid import uuid1 as uuid\n\n__all__ = ['Client']\n\nclass ClientAgent(common.BaseAgent):\n level_normal = 'normal'\n level_low = 'low'\n level_high = 'high'\n\n @asyncio.coroutine\n def do(self, func_name, workload, unique = None, level = 'normal', background=False):\n if not unique:\n unique = str(uuid())\n payload = {\n 'func_name': func_name,\n 'unique': unique,\n 'workload': workload\n }\n if background:\n if level == self.level_low:\n yield from self.send(common.SUBMIT_JOB_LOW_BG, payload)\n elif level == self.level_high:\n yield from self.send(common.SUBMIT_JOB_HIGH_BG, payload)\n else:\n yield from self.send(common.SUBMIT_JOB_BG, payload)\n else:\n if level == self.level_low:\n yield from self.send(common.SUBMIT_JOB_LOW, payload)\n elif level == self.level_high:\n yield from self.send(common.SUBMIT_JOB_HIGH, payload)\n else:\n yield from self.send(common.SUBMIT_JOB, payload)\n\n cmd_type, cmd_args = yield from self.read()\n\n if cmd_type == common.JOB_CREATED:\n job_handle = cmd_args['job_handle']\n\n if background:\n return job_handle\n else:\n return Task(self, job_handle)\n\nclass Client(object):\n level_normal = ClientAgent.level_normal\n level_low = ClientAgent.level_low\n level_high = ClientAgent.level_high\n\n __slots__ = ['_agents']\n\n def __init__(self):\n self._agents = []\n\n @asyncio.coroutine\n def do(self, func_name, workload, unique = None, level = 'normal', background=False):\n agent = random.choice(self._agents)\n try:\n ret = yield from agent.do(func_name, workload, unique, level,\n background)\n except ConnectionResetError as e:\n self._agents.remove(self._agents)\n raise e\n\n return ret\n\n @asyncio.coroutine\n def add_server(self, host, port, ssl = False):\n reader, writer = yield from asyncio.open_connection(host, port, ssl=ssl)\n agent = ClientAgent(reader, writer,\n {'host': host, 'port': port, 'ssl': ssl})\n self._agents.append(agent)\n\n\nclass Task(object):\n __slots__ = ['_agent', 'job_handle']\n def __init__(self, agent, job_handle):\n self._agent = agent\n self.job_handle = job_handle\n @property\n def result(self):\n cmd_type, cmd_args = yield from self._agent.read()\n return cmd_type, cmd_args\n"
}
] | 8 |
MouradYounes/INF8775
|
https://github.com/MouradYounes/INF8775
|
f85ebb0e0e151c6b269e28d31443e4eefa26a9dc
|
d826d4f674516fa8bd28a29c641853ea4bc2bd68
|
baaef7f71eeb1cbeff7242b72f72989eef57c2d6
|
refs/heads/master
| 2022-12-25T04:45:50.015185 | 2020-10-10T02:40:00 | 2020-10-10T02:40:00 | 297,999,012 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5419673323631287,
"alphanum_fraction": 0.5601974725723267,
"avg_line_length": 29.264368057250977,
"blob_id": "b63d88228aecdc3ff88bf2587294507b8a28e71f",
"content_id": "317f7cc1b8137ec465034dda90fb58c722fc167d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2633,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 87,
"path": "/Remise TP1/tests.py",
"repo_name": "MouradYounes/INF8775",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport random\nimport math\nimport sys\nimport time\nimport csv\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport sys\n\nfrom brute_force import execute_brute_force\nfrom DpR import execute_DpR\nfrom utils import GRID_SIZE\n\n'''\nFonction qui calcule le test de puissance\n'''\ndef test_puissance(tablePath):\n data = pd.read_csv(tablePath)\n AlgoNames = [\"BF\", \"DPR\", \"seuil\"]\n colors = [\"b\", \"r\", \"g\"]\n plt.clf()\n fig = plt.figure() \n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel('log n')\n ax.set_ylabel('log (temps)')\n #Tests de puissances\n for i in range(3):\n x = np.array(np.log(data[\"taille\"][(6*i):(6*i)+6]))\n y = np.array(np.log(data[\"temps\"][(6*i):(6*i)+6]))\n ax.scatter(x, y, c = colors[i], label = data[\"methode\"][(6*i)])\n print(np.poly1d(np.polyfit(x, y, 1))(x))\n ax.plot(x, np.poly1d(np.polyfit(x, y, 1))(x),c= colors[i])\n print(np.polyfit(x, y, 1))\n \n ax.legend(loc=\"lower right\")\n plt.show()\n fig.savefig(\"Puissance\" + AlgoNames[i])\n\n'''\nFonction qui calcule le test de rapport\n'''\ndef test_rapport(tablePath):\n data = pd.read_csv(tablePath)\n## On change fx n**2 pour brute et nlog(n) pour recursif et seuil\n data[\"temps\"] = data[\"temps\"]/(data[\"taille\"]**2)\n AlgoNames = [\"BF\", \"DPR\", \"seuil\"]\n colors = [\"b\", \"r\", \"g\"]\n for i in range(3):\n plt.clf()\n fig = plt.figure() \n ax = fig.add_subplot(1,1,1)\n x = np.array(data[\"taille\"][(6*i):(6*i)+6])\n y = np.array(data[\"temps\"][(6*i):(6*i)+6])\n ax.scatter(x, y, c = colors[i], label = data[\"methode\"][(6*i)])\n print(np.polyfit(x, y, 1))\n \n ax.legend(loc=\"lower right\")\n plt.show()\n fig.savefig(\"Rapport\" + AlgoNames[i])\n\n'''\nFonction qui va calculer le test de constante\n'''\ndef test_constante(tablePath):\n data = pd.read_csv(tablePath)\n AlgoNames = [\"BF\", \"DPR\", \"seuil\"]\n colors = [\"b\", \"r\", \"g\"]\n\n for i in range(3):\n plt.clf()\n fig = plt.figure() \n ax = fig.add_subplot(1,1,1)\n## On change fx n**2 pour brute et nlog(n) pour recursif et seuil\n fx=(data[\"taille\"][(6*i):(6*i)+6])**2\n x = np.array(fx)\n y = np.array(data[\"temps\"][(6*i):(6*i)+6])\n ax.scatter(x, y, c = colors[i], label = data[\"methode\"][(6*i)])\n ax.plot(x, np.poly1d(np.polyfit(x, y, 1))(x),c= colors[i])\n print(np.poly1d(np.polyfit(x, y, 1))(x))\n \n ax.legend(loc=\"lower right\")\n plt.show()\n fig.savefig(\"constante\" + AlgoNames[i])\n"
},
{
"alpha_fraction": 0.5893238186836243,
"alphanum_fraction": 0.5939501523971558,
"avg_line_length": 32.4523811340332,
"blob_id": "3456a091cb82306778c018090524281dc71fa509",
"content_id": "60c57ef13e4157c63249e63a4bda58f88519b9f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2835,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 84,
"path": "/Remise TP1/main.py",
"repo_name": "MouradYounes/INF8775",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport random\nimport math\nimport sys\nimport time\nimport csv\n\nfrom brute_force import execute_brute_force\nfrom DpR import execute_DpR\nfrom utils import GRID_SIZE\nfrom pathlib import Path\n\nPATH = \"./\"\n\n'''\nUn point est représenté par un tuple (position_x, position_y)\nLa fonction generate_points génère une liste de N points.\n'''\ndef generate_points(FILE):\n with open(FILE) as f:\n #Skip first line\n next(f)\n mylist = [tuple(map(float, i.split(' '))) for i in f]\n return mylist\n\n\n'''\n--------------------------------------------------------------------\nATTENTION : Dans votre code vous devez utiliser le générateur gen.py\npour générer des points. Vous devez donc modifier ce code pour importer\nles points depuis les fichiers générés.\nDe plus, vous devez faire en sorte que l'interface du tp.sh soit\ncompatible avec ce code (par exemple l'utilisation de flag -e, -a, (p et -t)).\n--------------------------------------------------------------------\n '''\n \ndef main(argv):\n\n algos = [\"brute\", \"recursif\", \"seuil\"]\n \n if Path(argv[1]).is_file():\n POINTS = generate_points(argv[1])\n else:\n print(\"Les paramètres ne sont pas correcte ou vous n'avez pas entré un fichier valide.\")\n sys.exit()\n if not argv[0].lower() in algos:\n print(\"Les paramètres ne sont pas correcte Veuillez vous assurer d'avoir entrer l'une des options suivante en respéctant la casse\")\n print(\"brute | recursif | seuil\")\n sys.exit() \n\n \n sorted_points_x = sorted(POINTS, key=lambda x: x[0])\n sorted_points_y = sorted(POINTS, key=lambda x: x[1])\n if argv[0].lower() == \"brute\":\n #Exécuter l'algorithme force brute\n time_BF, min_DistanceBF = execute_brute_force(sorted_points_x)\n if '-t' in argv:\n print(\"Temps : \", time_BF)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_DistanceBF)\n \n \n elif argv[0].lower() == \"recursif\":\n #Exécuter l'algorithme Diviser pour régner avec un seuil élémentaire\n SEUIL_DPR = 3\n time_DPR, min_DistanceDPR = execute_DpR(sorted_points_x, sorted_points_y, SEUIL_DPR)\n if '-t' in argv:\n print(\"Temps : \", time_DPR)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_DistanceDPR)\n \n \n elif argv[0].lower() == \"seuil\":\n #Exécuter l'algorithme Diviser pour régner avec un seuil déterminé expérimentalement\n SEUIL_seuil = 21\n time_seuil, min_Distanceseuil = execute_DpR(sorted_points_x, sorted_points_y, SEUIL_seuil)\n if '-t' in argv:\n print(\"Temps : \", time_seuil)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_Distanceseuil)\n \n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n"
},
{
"alpha_fraction": 0.581944465637207,
"alphanum_fraction": 0.6041666865348816,
"avg_line_length": 25.629629135131836,
"blob_id": "13387ec08e08950e9766ec7a4bdc568f27e48832",
"content_id": "7591de92213d66591471a5b2dc094fae542e58e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 27,
"path": "/outil-automatisé/puissance.py",
"repo_name": "MouradYounes/INF8775",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\n\n\ndata = pd.read_csv(\"table.csv\")\n\nsortingNames = [\"BF\", \"DPR\", \"seuil\"]\ncolors = [\"b\", \"r\", \"g\"]\nplt.clf()\nfig = plt.figure() \nax = fig.add_subplot(1,1,1)\nax.set_xlabel('log n')\nax.set_ylabel('log (temps)')\n#Tests de puissances\nfor i in range(3):\n x = np.array(np.log(data[\"taille\"][(6*i):(6*i)+6]))\n y = np.array(np.log(data[\"temps\"][(6*i):(6*i)+6]))\n ax.scatter(x, y, c = colors[i], label = data[\"methode\"][(6*i)])\n print(np.poly1d(np.polyfit(x, y, 1))(x))\n ax.plot(x, np.poly1d(np.polyfit(x, y, 1))(x),c= colors[i])\n print(np.polyfit(x, y, 1))\n \nax.legend(loc=\"lower right\")\nplt.show()\nfig.savefig(\"Puissance\")\n\n"
},
{
"alpha_fraction": 0.5786046385765076,
"alphanum_fraction": 0.5832558274269104,
"avg_line_length": 30.632352828979492,
"blob_id": "8a1da8c92d71035e845cf2e9ef6de4ddb3968780",
"content_id": "969317d0a245e3f07febcfac64ef0c9c6c817497",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2164,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 68,
"path": "/TP1/main.py",
"repo_name": "MouradYounes/INF8775",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport random\nimport math\nimport sys\nimport time\nimport csv\n\nfrom brute_force import execute_brute_force\nfrom DpR import execute_DpR\nfrom utils import GRID_SIZE\n\nPATH = \"../Echantillons/\"\n\n'''\nUn point est représenté par un tuple (position_x, position_y)\nLa fonction generate_points génère une liste de N points.\n'''\ndef generate_points(FILE):\n with open(PATH+FILE) as f:\n #Skip first line\n next(f)\n mylist = [tuple(map(float, i.split(' '))) for i in f]\n return mylist\n\n\n'''\n--------------------------------------------------------------------\nATTENTION : Dans votre code vous devez utiliser le générateur gen.py\npour générer des points. Vous devez donc modifier ce code pour importer\nles points depuis les fichiers générés.\nDe plus, vous devez faire en sorte que l'interface du tp.sh soit\ncompatible avec ce code (par exemple l'utilisation de flag -e, -a, (p et -t)).\n--------------------------------------------------------------------\n '''\n\ndef main(argv):\n\n POINTS = generate_points(argv[1])\n sorted_points_x = sorted(POINTS, key=lambda x: x[0])\n sorted_points_y = sorted(POINTS, key=lambda x: x[1])\n \n if argv[0] == \"BF\":\n #Exécuter l'algorithme force brute\n time_BF, min_DistanceBF = execute_brute_force(sorted_points_x)\n if '-t' in argv:\n print(\"Temps : \", time_BF)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_DistanceBF)\n \n elif argv[0] == \"DPR\":\n #Exécuter l'algorithme Diviser pour régner\n SEUIL_DPR = 3\n time_DPR, min_DistanceDPR = execute_DpR(sorted_points_x, sorted_points_y, SEUIL_DPR)\n if '-t' in argv:\n print(\"Temps : \", time_DPR)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_DistanceDPR)\n \n elif argv[0] == \"seuil\":\n SEUIL_DPR = 4\n time_DPR, min_DistanceDPR = execute_DpR(sorted_points_x, sorted_points_y, SEUIL_DPR)\n if '-t' in argv:\n print(\"Temps : \", time_DPR)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_DistanceDPR)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])"
},
{
"alpha_fraction": 0.4931996464729309,
"alphanum_fraction": 0.5319170355796814,
"avg_line_length": 37.011322021484375,
"blob_id": "e32ff2449459a5491a60b8b73d3f36930cc99a07",
"content_id": "0762d1a74ff2a92fcf8d9ea40cf1ffc6b1b19ee2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10088,
"license_type": "no_license",
"max_line_length": 326,
"num_lines": 265,
"path": "/outil-automatisé/main.py",
"repo_name": "MouradYounes/INF8775",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport random\nimport math\nimport sys\nimport time\nimport csv\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport sys\n\nfrom brute_force import execute_brute_force\nfrom DpR import execute_DpR\nfrom utils import GRID_SIZE\n\nPATH = \"../Echantillons/\"\n\n'''\nUn point est représenté par un tuple (position_x, position_y)\nLa fonction generate_points génère une liste de N points.\n'''\ndef generate_points(FILE):\n with open(PATH+FILE) as f:\n #Skip first line\n next(f)\n mylist = [tuple(map(float, i.split(' '))) for i in f]\n return mylist\n\n'''\nFonction qui calcule le test de puissance\n'''\ndef test_puissance():\n data = pd.read_csv(\"table.csv\")\n\n sortingNames = [\"BF\", \"DPR\", \"seuil\"]\n colors = [\"b\", \"r\", \"g\"]\n plt.clf()\n fig = plt.figure() \n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel('log n')\n ax.set_ylabel('log (temps)')\n #Tests de puissances\n for i in range(3):\n x = np.array(np.log(data[\"taille\"][(6*i):(6*i)+6]))\n y = np.array(np.log(data[\"temps\"][(6*i):(6*i)+6]))\n ax.scatter(x, y, c = colors[i], label = data[\"methode\"][(6*i)])\n print(np.poly1d(np.polyfit(x, y, 1))(x))\n ax.plot(x, np.poly1d(np.polyfit(x, y, 1))(x),c= colors[i])\n print(np.polyfit(x, y, 1))\n \n ax.legend(loc=\"lower right\")\n plt.show()\n fig.savefig(\"Puissance\" + sortingNames[i])\n\n'''\nFonction qui calcule le test de rapport\n'''\ndef test_rapport():\n data = pd.read_csv(\"table.csv\")\n data[\"temps\"] = data[\"temps\"]/data[\"taille\"]\n sortingNames = [\"BF\", \"DPR\", \"seuil\"]\n colors = [\"b\", \"r\", \"g\"]\n for i in range(3):\n plt.clf()\n fig = plt.figure() \n ax = fig.add_subplot(1,1,1)\n x = np.array(data[\"taille\"][(6*i):(6*i)+6])\n y = np.array(data[\"temps\"][(6*i):(6*i)+6])\n ax.scatter(x, y, c = colors[i], label = data[\"methode\"][(6*i)])\n #ax.plot(x, np.poly1d(np.polyfit(x, y, 1))(x),c= colors[i])\n print(np.polyfit(x, y, 1))\n \n ax.legend(loc=\"lower right\")\n plt.show()\n fig.savefig(\"Rapport\" + sortingNames[i])\n\n'''\nFonction qui va calculer le test de constante\n'''\ndef test_constante():\n data = pd.read_csv(\"table.csv\")\n sortingNames = [\"BF\", \"DPR\", \"seuil\"]\n colors = [\"b\", \"r\", \"g\"]\n\n for i in range(3):\n plt.clf()\n fig = plt.figure() \n ax = fig.add_subplot(1,1,1)\n x = np.array(data[\"taille\"][(6*i):(6*i)+6])\n y = np.array(data[\"temps\"][(6*i):(6*i)+6])\n ax.scatter(x, y, c = colors[i], label = data[\"methode\"][(6*i)])\n #ax.plot(x, np.poly1d(np.polyfit(x, y, 1))(x),c= colors[i])\n print(np.polyfit(x, y, 1))\n \n ax.legend(loc=\"lower right\")\n plt.show()\n fig.savefig(\"constante\" + sortingNames[i])\n\n'''\nFonction qui va permettre de lire le data de csv\n'''\ndef read_data(file):\n brutefore_array100 = []\n brutefore_array1k = []\n brutefore_array10k = []\n brutefore_array30k = []\n brutefore_array50k = []\n brutefore_array100k = []\n\n DPR_array100 = []\n DPR_array1k = []\n DPR_array10k = []\n DPR_array30k = []\n DPR_array50k = []\n DPR_array100k = []\n\n seuil_array100 = []\n seuil_array1k = []\n seuil_array10k = []\n seuil_array30k = []\n seuil_array50k = []\n seuil_array100k = []\n\n with open(file, 'r') as file:\n next(file)\n reader = csv.reader(file)\n for row in reader:\n if \"BF\" in row[0]:\n if \"100_\" in row[0]:\n brutefore_array100.append(float(row[0].split(\"|\")[2]))\n elif \"1k\" in row[0]:\n brutefore_array1k.append(float(row[0].split(\"|\")[2]))\n elif \"10k\" in row[0]:\n brutefore_array10k.append(float(row[0].split(\"|\")[2]))\n elif \"30k\" in row[0]:\n brutefore_array30k.append(float(row[0].split(\"|\")[2]))\n elif \"50k\" in row[0]:\n brutefore_array50k.append(float(row[0].split(\"|\")[2])) \n elif \"100k\" in row[0]:\n brutefore_array100k.append(float(row[0].split(\"|\")[2]))\n elif \"DPR\" in row[0]:\n if \"100_\" in row[0]:\n DPR_array100.append(float(row[0].split(\"|\")[2]))\n elif \"1k\" in row[0]:\n DPR_array1k.append(float(row[0].split(\"|\")[2]))\n elif \"10k\" in row[0]:\n DPR_array10k.append(float(row[0].split(\"|\")[2]))\n elif \"30k\" in row[0]:\n DPR_array30k.append(float(row[0].split(\"|\")[2]))\n elif \"50k\" in row[0]:\n DPR_array50k.append(float(row[0].split(\"|\")[2])) \n elif \"100k\" in row[0]:\n DPR_array100k.append(float(row[0].split(\"|\")[2]))\n elif \"seuil\" in row[0]:\n if \"100_\" in row[0]:\n seuil_array100.append(float(row[0].split(\"|\")[2]))\n elif \"1k\" in row[0]:\n seuil_array1k.append(float(row[0].split(\"|\")[2]))\n elif \"10k\" in row[0]:\n seuil_array10k.append(float(row[0].split(\"|\")[2]))\n elif \"30k\" in row[0]:\n seuil_array30k.append(float(row[0].split(\"|\")[2]))\n elif \"50k\" in row[0]:\n seuil_array50k.append(float(row[0].split(\"|\")[2])) \n elif \"100k\" in row[0]:\n seuil_array100k.append(float(row[0].split(\"|\")[2])) \n\n method = [\"brute\",\"DPR\",\"seuil\"]\n lengthOfdata = [\"100\",\"1000\",\"10000\",\"30000\",\"50000\",\"100000\"]\n average_brute = [sum(brutefore_array100) / len(brutefore_array100), sum(brutefore_array1k) / len(brutefore_array1k), sum(brutefore_array10k) / len(brutefore_array10k), sum(brutefore_array30k) / len(brutefore_array30k), sum(brutefore_array50k) / len(brutefore_array50k), sum(brutefore_array100k) / len(brutefore_array100k)]\n average_DPR = [sum(DPR_array100) / len(DPR_array100), sum(DPR_array1k) / len(DPR_array1k), sum(DPR_array10k) / len(DPR_array10k), sum(DPR_array30k) / len(DPR_array30k), sum(DPR_array50k) / len(DPR_array50k), sum(DPR_array100k) / len(DPR_array100k)]\n average_seuil = [sum(seuil_array100) / len(seuil_array100), sum(seuil_array1k) / len(seuil_array1k), sum(seuil_array10k) / len(seuil_array10k), sum(seuil_array30k) / len(seuil_array30k), sum(seuil_array50k) / len(seuil_array50k), sum(seuil_array100k) / len(seuil_array100k)]\n table = []\n for i in range(18):\n if i < 6:\n table.append([method[0], lengthOfdata[i], average_brute[i]])\n elif i < 12:\n table.append([method[1], lengthOfdata[i-6], average_DPR[i-6]])\n elif i < 18:\n table.append([method[2], lengthOfdata[i-12], average_seuil[i-12]]) \n \n with open(\"table.csv\", \"w\") as f:\n f.write(\"methode\" + \",\" + \"taille\" + \",\" + \"temps\" + \"\\n\")\n for i in range(len(table)):\n f.write(table[i][0] + \",\" + table[i][1] + \",\" + str(table[i][2]) + \"\\n\")\n\n\n\n'''\n--------------------------------------------------------------------\nATTENTION : Dans votre code vous devez utiliser le générateur gen.py\npour générer des points. Vous devez donc modifier ce code pour importer\nles points depuis les fichiers générés.\nDe plus, vous devez faire en sorte que l'interface du tp.sh soit\ncompatible avec ce code (par exemple l'utilisation de flag -e, -a, (p et -t)).\n--------------------------------------------------------------------\n '''\n\ndef main(argv):\n \n POINTS = generate_points(argv[1])\n sorted_points_x = sorted(POINTS, key=lambda x: x[0])\n sorted_points_y = sorted(POINTS, key=lambda x: x[1])\n if argv[0].lower() == \"bf\":\n #Exécuter l'algorithme force brute\n time_BF, min_DistanceBF = execute_brute_force(sorted_points_x)\n if '-t' in argv:\n print(\"Temps : \", time_BF)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_DistanceBF)\n row=[\"BF\",argv[1],time_BF,min_DistanceBF]\n \n elif argv[0].lower() == \"dpr\":\n #Exécuter l'algorithme Diviser pour régner\n SEUIL_DPR = 3\n time_DPR, min_DistanceDPR = execute_DpR(sorted_points_x, sorted_points_y, SEUIL_DPR)\n if '-t' in argv:\n print(\"Temps : \", time_DPR)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_DistanceDPR)\n row=[\"DPR\",argv[1],time_DPR,min_DistanceDPR]\n \n elif argv[0].lower() == \"seuil\":\n SEUIL_seuil = 4\n time_seuil, min_Distanceseuil = execute_DpR(sorted_points_x, sorted_points_y, SEUIL_seuil)\n if '-t' in argv:\n print(\"Temps : \", time_seuil)\n if '-p' in argv:\n print(\"Plus petite distance: \", min_Distanceseuil)\n row=[\"seuil\",argv[1],time_seuil,min_Distanceseuil]\n \n with open('result.csv', 'a', newline='') as file:\n writer = csv.writer(file, delimiter='|')\n writer.writerow(row)\n\nif __name__ == \"__main__\":\n\n samples=[\"ex100_\",\"ex1k_\",\"ex10k_\",\"ex30k_\",\"ex50k_\",\"ex100k_\"]\n first_row=[\"method\",\"file name\", \"time\", \"result\"]\n\n \n## with open('result.csv', 'w', newline='') as file:\n## writer = csv.writer(file, delimiter='|')\n## writer.writerow(first_row)\n # for sample in samples:\n # for x in range(10):\n # i=x+1\n # filename=sample + str(i) + \".txt\"\n # main([\"BF\",filename,'-t','-p'])\n # for sample in samples:\n # for x in range(10):\n # i=x+1\n # filename=sample + str(i) + \".txt\"\n # main([\"DPR\",filename,'-t','-p'])\n## for sample in samples:\n## for x in range(10):\n## i=x+1\n## filename=sample + str(i) + \".txt\"\n## main([\"seuil\",filename,'-t','-p'])\n \n #read_data(\"/Users/mouradyounes/AUT2020_V1/INF8775/INF8775/outil-automatisé/result.csv\")\n test_constante()\n print(\"end\")\n"
}
] | 5 |
eignnx/lispish
|
https://github.com/eignnx/lispish
|
a24eaadc1310350093c4ece1796558415b4a0cc6
|
67232f370921c03c66dad270efdf140a250e0830
|
c3615e84d639227fd2ae2bf46c88c9329fc39363
|
refs/heads/master
| 2020-03-22T16:34:40.426128 | 2018-08-10T19:05:17 | 2018-08-10T19:05:17 | 140,336,200 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6283618807792664,
"alphanum_fraction": 0.6332518458366394,
"avg_line_length": 17.590909957885742,
"blob_id": "a5e43adf17ce9ce8795ed25581b1b72247c0c0bf",
"content_id": "e79802c565e2fc3f8efa2f2700cddd7abb3e0526",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 22,
"path": "/README.md",
"repo_name": "eignnx/lispish",
"src_encoding": "UTF-8",
"text": "# Lispish\nA Lisp clone written in Python.\n\n## Dependencies\nThis project requires `sly` for Python 3. Install it like this:\n```shell\npip3 install --user sly\n```\n\n## TODO\n### Builtin Functions to Add\n\n* `if`\n* `let`\n* `list`\n * Does not evaluate its arguments, returns an AST List\n* `eval`\n * Accepts an AST, calls Python-level `eval` on it\n* `=`/`eq?`\n* `map`/`filter`\n* `car`/`cdr`/`cons`\n* `quote`/`'`\n"
},
{
"alpha_fraction": 0.5866666436195374,
"alphanum_fraction": 0.5866666436195374,
"avg_line_length": 20.81818199157715,
"blob_id": "c9b72d5df46193f6aa03c20c7722ea22648e2650",
"content_id": "5e60e495b8c71ee547eff3604fb1c6a196d4762a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1200,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 55,
"path": "/parser.py",
"repo_name": "eignnx/lispish",
"src_encoding": "UTF-8",
"text": "import sly\nimport lexer\nfrom trees import Number, Symbol, List\nfrom decimal import Decimal\n\nclass LispishParser(sly.Parser):\n tokens = lexer.LispishLexer.tokens\n\n @_(\"expression\")\n def whole_program(self, p):\n return p.expression\n\n @_(\"NAME\")\n def symbol(self, inTok):\n return Symbol(inTok.NAME)\n\n @_(\"NUMBER\")\n def number(self, inTok):\n return Number(Decimal(inTok.NUMBER))\n\n @_(\"number\")\n def expression(self, p):\n return p.number\n\n @_(\"symbol\")\n def expression(self, p):\n return p.symbol\n\n @_(\"listy\")\n def expression(self, p):\n return p.listy\n\n @_(\"LPAREN expression_list RPAREN\")\n def listy(self, p):\n return List(p.expression_list)\n\n @_(\"expression\")\n def expression_list(self, p):\n return [p.expression]\n\n @_(\"expression SEP expression_list\")\n def expression_list(self, p):\n return [p.expression] + p.expression_list\n \n\nif __name__ == \"__main__\":\n lex = lexer.LispishLexer()\n parser = LispishParser()\n\n while True:\n inp = input(\"--> \")\n tokens = lex.tokenize(inp)\n res = parser.parse(tokens)\n print(repr(res))\n print(res)\n"
},
{
"alpha_fraction": 0.4721029996871948,
"alphanum_fraction": 0.4721029996871948,
"avg_line_length": 22.350000381469727,
"blob_id": "9988301b512cde2c7b0057c3257272f9ecf9f3e3",
"content_id": "1664371c305832f46bfc4d26f9b13cd79af9d47b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 466,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 20,
"path": "/lexer.py",
"repo_name": "eignnx/lispish",
"src_encoding": "UTF-8",
"text": "import sly\n\nclass LispishLexer(sly.Lexer):\n tokens = {\n LPAREN, RPAREN,\n NUMBER, NAME, SEP\n }\n\n LPAREN = r\"\\(\"\n RPAREN = r\"\\)\"\n # including decimals\n NUMBER = r\"-?((\\.\\d+)|(\\d+\\.\\d*)|(\\d+))\"\n # get-values, +, - are valid names \n NAME = r\"[a-zA-Z_\\-+*\\/=<>][\\w\\-=><]*['!?]*\"\n SEP = r\"\\s\" # whitespace separator\n\nif __name__ == \"__main__\":\n lex = LispishLexer()\n for tok in lex.tokenize(input(\"--> \")):\n print(tok)"
},
{
"alpha_fraction": 0.655339777469635,
"alphanum_fraction": 0.655339777469635,
"avg_line_length": 40.20000076293945,
"blob_id": "8d6a72e12bed92cf4de75ddd99fa09c209de20d7",
"content_id": "3492c5437a82324f0693b58ea9a182f5e0c56486",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 15,
"path": "/proc.py",
"repo_name": "eignnx/lispish",
"src_encoding": "UTF-8",
"text": "from env import Env\nfrom typing import Collection\n\nclass Proc:\n def __init__(self, formals: Collection[str], body, creation_env: Env):\n self.formals = formals # The names of the formal parameters\n self.body = body # The body AST\n self.creation_env = creation_env # A ref to the env where the proc was defined\n\n def apply(self, args):\n if len(self.formals) != len(args):\n raise Exception(\"Wrong number of arguments!\")\n local_bindings = dict(zip(self.formals, args))\n env = Env(local=local_bindings, parent=self.creation_env)\n return self.body.eval(env)\n"
},
{
"alpha_fraction": 0.5347609519958496,
"alphanum_fraction": 0.5362276434898376,
"avg_line_length": 28.05128288269043,
"blob_id": "58cd7b72c93c84322e8664e0bcb508d4c7dde058",
"content_id": "7241c0ed2b0b30c48ce2a9ab240bbb35acc8ccb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3409,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 117,
"path": "/trees.py",
"repo_name": "eignnx/lispish",
"src_encoding": "UTF-8",
"text": "\nclass Expression:\n def eval(self, env):\n raise NotImplementedError() \n \n\nclass Atom(Expression):\n def __init__(self, val):\n self.val = val \n\n def __str__(self):\n return \"{}\".format(self.val)\n\n def __repr__(self):\n cls_name = self.__class__.__name__\n return \"{name}({val})\".format(name=cls_name, val=self.val)\n\n def __eq__(self, other):\n return type(self) is type(other) and self.val == other.val\n\n\nclass Number(Atom):\n\n def eval(self, env):\n \"\"\"Defined to return another Number instance, NOT a Python number type\"\"\"\n return self\n\n def __int__(self):\n \"\"\"Conversion to Python int\"\"\"\n return int(self.val)\n\n def __float__(self):\n \"\"\"Conversion to Python float\"\"\"\n return float(self.val)\n\n ### Operator Overloads ###\n\n def __add__(self, other):\n return Number(self.val + other.val)\n\n def __sub__(self, other):\n return Number(self.val - other.val)\n\n def __mul__(self, other):\n return Number(self.val * other.val)\n\n def __truediv__(self, other):\n return Number(self.val / other.val)\n\n def __mod__(self, other):\n return Number(self.val % other.val)\n\n def __pow__(self, other):\n return Number(self.val ** other.val)\n\n\nclass Symbol(Atom):\n def eval(self, env):\n return env[str(self)]\n\nfrom proc import Proc\n\nclass List(Expression):\n def __init__(self, values):\n self.values = values\n\n def __str__(self):\n vals = \" \".join(str(v) for v in self.values)\n return \"({})\".format(vals)\n\n def __repr__(self):\n vals = \", \".join(repr(v) for v in self.values)\n return \"List({})\".format(vals)\n\n def __eq__(self, other):\n return type(self) is type(other) and self.values == other.values\n\n def __iter__(self):\n \"\"\"Allows a List to be iterated over directly\"\"\"\n return iter(self.values)\n\n def __getitem__(self, i):\n \"\"\"Defines array subscript notation on List instances\"\"\"\n return self.values[i]\n\n def eval(self, env) -> Expression:\n first = self.values[0]\n rest = self.values[1:]\n if type(first) is Proc:\n proc = first\n args = rest\n # apply takes a list comprehension \n # with every value in args evaluated.\n return proc.apply([a.eval(env) for a in args])\n elif type(first) is Symbol:\n if str(first) == \"define\":\n if type(rest[0]) is not Symbol:\n raise Error(\"First arg to define must be a symbol!\")\n if len(rest) != 2:\n raise Error(\"Define requires exactly 2 arguments!\")\n\n sym, value = rest # Split rest into two parts\n env.declare(str(sym)) # Declare the symbol\n value = value.eval(env) # Evaluate the value-to-be-assigned\n env[str(sym)] = value # Assign the value\n\n elif str(first) == \"lambda\":\n raise NotImplementedError(\"Define lambda!\")\n else: # it must be a user-defined symbol\n proc = first.eval(env)\n \n if type(proc) is not Proc:\n raise Error(\"Initial list element must be a Proc!\")\n\n new_self = List([proc] + rest)\n return new_self.eval(env)\n\n return None\n \n"
}
] | 5 |
c25l/self-modifying-rss-digest
|
https://github.com/c25l/self-modifying-rss-digest
|
2b394d49b9e8f6b9454887bfa09e39870db9f0fd
|
ce75dd9b8c8ae64e8cea00e5cc18677867bf86a4
|
874b9124e81f33e7efe6743f14153bc6dd46280c
|
refs/heads/master
| 2021-01-01T22:08:09.315052 | 2020-02-09T19:48:57 | 2020-02-09T19:48:57 | 239,364,034 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8066666722297668,
"alphanum_fraction": 0.8066666722297668,
"avg_line_length": 299,
"blob_id": "14129b28bdeda3a8cba97815dc59a382c5b16312",
"content_id": "965305a2eda565a4274d912625d739074772dc27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 300,
"license_type": "no_license",
"max_line_length": 299,
"num_lines": 1,
"path": "/README.md",
"repo_name": "c25l/self-modifying-rss-digest",
"src_encoding": "UTF-8",
"text": "this is yet another rss digest generator, but for the sake of compactness it writes the bloom filter it needs back into its own source code on each run. This is convenient for cron purposes because there are fewer paths bouncing around. Also it's just kinda neat to rewrite source code between runs.\n"
},
{
"alpha_fraction": 0.5127097368240356,
"alphanum_fraction": 0.5188103914260864,
"avg_line_length": 28.578947067260742,
"blob_id": "fe478a0f531f94264c0ef06280cd35bd144cd91d",
"content_id": "46ea2a1e4efe8f75c064b0fab32fc5722dd56107",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3934,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 133,
"path": "/self-modifying.py",
"repo_name": "c25l/self-modifying-rss-digest",
"src_encoding": "UTF-8",
"text": "import sys\nimport re\nimport feedparser\nimport smtplib\nimport hashlib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n\nconfig = {\"email_from\": \"\",\n \"email_to\": \"\",\n \"pass\":\"\",\n \"feeds\":[]}\n \n# this block will be rewritten every time because, you know, yolo. Don't touch it.\n# ------- START -------\ndata = [{},{}]\n# ------- END -------\nclass Bloom:\n def __init__(self):\n self.data = [{},{}]\n def __repr__(self):\n return str(self.data)\n def _hashes(self, other):\n carry = \"\"\n out = []\n for ii,_ in enumerate(self.data):\n hashed = hashlib.md5(bytes(other+carry, 'utf-8')).hexdigest()\n out.append(int(hashed,16)%(2**20))\n carry += \":\"\n return out\n def __add__(self, other):\n hashes = self._hashes(other)\n for ii, _ in enumerate(self.data):\n self.data[ii][hashes[ii]]=True\n return self\n \n def __getitem__(self, other):\n #returns whether or not it's unseen.\n hashes = self._hashes(other)\n maybe = True\n for ii, xx in enumerate(self.data):\n if hashes[ii] in xx:\n maybe = False\n return maybe\n \n def reset(self, data):\n self.data = data\n\nclass Feed:\n def __init__(self, feed):\n self.feed = feed\n self.items = []\n self.title = feed\n \n def parse(self):\n data =feedparser.parse(self.feed) \n if 'feed' in data and 'title' in data.feed:\n self.title = data.feed.title\n self.items = [\"<a href=\" + y['link'] +\">\"+y['title']+\"</a><br>\\n\" + y['summary'] + \"\\n<hr>\\n\" for y in data.entries]\n\n\n def unseen(self, bloom):\n self.parse()\n self.items = [z for z in self.items if bloom[z]]\n for xx in self.items:\n bloom += xx\n\n def html(self):\n outstr=\"<h2>\"+self.title+\"</h2><br>\\n\"\n for y in self.items:\n outstr+= y\n outstr+=\"<hr>\"\n return outstr\n\nclass Email:\n def __init__(self, e_to, e_from, e_pass):\n self.e_to = e_to\n self.e_from = e_from\n self.e_pass = e_pass\n self.server = 'smtp.gmail.com'\n self.port = 465\n self.text = \"\"\n\n def add_text(self, more):\n self.text += more\n\n def send(self):\n msg = MIMEMultipart('alternative')\n msg['Subject'] = \"Daily RSS rollup\"\n msg['From'] = \"\\\"RSS digest\\\" <\"+self.e_from+\">\"\n msg['To'] = self.e_to\n text = \"Hey, this is better as an html email!\"\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(self.text, 'html')\n msg.attach(part1)\n msg.attach(part2)\n smtpObj = smtplib.SMTP_SSL(self.server, self.port)\n smtpObj.login(self.e_from, self.e_pass)\n smtpObj.sendmail(self.e_from, self.e_to, msg.as_string()) \n\ndef self_modify(new_data):\n data = []\n with open(sys.argv[0]) as file:\n for line in file:\n data.append(line)\n start = [x for x,y in enumerate([re.search(r'# -{7} START -{7}',x) for x in data]) if y][0]\n end = [x for x,y in enumerate([re.search(r'# -{7} END -{7}',x) for x in data]) if y][0]\n written = False\n with open(sys.argv[0], 'w') as file:\n for ii,line in enumerate(data):\n if ii<=start or ii>=end:\n file.write(line )\n if ii >= start and not written:\n written = True\n file.write(new_data+\"\\n\")\n\ndef main():\n g= Bloom()\n g.reset(data)\n out = Email(config[\"email_to\"], config[\"email_from\"], config[\"pass\"])\n out.add_text(\"<html><head></head><body>\\n\")\n for feed in config[\"feeds\"]:\n feed = Feed(feed)\n feed.unseen(g)\n out.add_text(feed.html())\n out.add_text(\"</body></html>\\n\")\n out.send()\n self_modify(\"\"\"data = {}\"\"\".format(str(g)))\n \n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
marcolongus/cluster
|
https://github.com/marcolongus/cluster
|
703d4f473d63c66f525121ae89fd3edddcbf46e3
|
4e8d783ab5c5a8ce79bc35ae1b0f4debe0942220
|
bffb93afd92581a4284d36a8168246cf8e2615c6
|
refs/heads/main
| 2023-07-08T17:32:32.418977 | 2021-08-10T12:13:14 | 2021-08-10T12:13:14 | 394,640,253 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5910447835922241,
"alphanum_fraction": 0.6736318469047546,
"avg_line_length": 21.9761905670166,
"blob_id": "d3412e82f83f8e4f5aa928b27b27f37506a57ea8",
"content_id": "52b098c4819f1f8132e5d03bcfa5fe70124819f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1005,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 42,
"path": "/graficos.py",
"repo_name": "marcolongus/cluster",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\r\nimport numpy as np \r\nimport math\r\nfrom termcolor import colored,cprint\r\nimport os\r\n\r\nos.system('color')\r\n\r\n\r\nrho = 1000./math.pow(150,2)\r\n\r\nprint(\"Densidad: %f\" %rho),print()\r\nalpha = math.sqrt(1./rho)\r\n\r\nN = np.array([1000., 10_000.,100_000., 1_000_000.])\r\nprint(\"#agents :\", N)\r\nL = alpha*np.sqrt(N)\r\nprint(\"System size:\",L)\r\n\r\nsystem_memory_float = 40*N/1_000_000.\r\nsystem_memory_double = 2*40*N/1_000_000.\r\nsize_memory = L*L*48/1_000_000.\r\n\r\nplt.title(\"Aprox. Global use of memory\")\r\n\r\nplt.ylim(0,size_memory.max())\r\nplt.xlim(0,system_memory_double.max())\r\nplt.xlabel(\"System memory [Mb]\")\r\nplt.ylabel(\"Syze system memory [Mb]\")\r\n\r\nplt.plot(system_memory_float,size_memory, label =\"float\")\r\nplt.plot(system_memory_double,size_memory, label=\"double\")\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\nmetrica = 6e06\r\ndelta_t = 0.1\r\ntiempo = 10000\r\nsistema = 1000000\r\ntimepo_simulacion = (tiempo/delta_t)*sistema/metrica\r\nprint(\"tiempo de simulacion %1f\" %(timepo_simulacion/3600.))"
},
{
"alpha_fraction": 0.5863503813743591,
"alphanum_fraction": 0.5944652557373047,
"avg_line_length": 37.70248031616211,
"blob_id": "789103884d16ff22f450b375067387720ede5b60",
"content_id": "5582284392b8f459aeebeda2e900f475dfa185a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4832,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 121,
"path": "/agentes.cpp",
"repo_name": "marcolongus/cluster",
"src_encoding": "UTF-8",
"text": "/* Autor: Benjamín R. Marcolongo. FAMAF-UNC.\r\n *---------------------------------------------------------------------------------------------------------\r\n * Programa para evolucionar un sistema de N agentes en el tiempo.\r\n *---------------------------------------------------------------------------------------------------------\r\n * Agentes:\r\n *\t\ti. Las velocidades de los N agentes pueden ser uniformes o tomadas aleatoriamente.\r\n *\t\t 1. Las distribuciones por defecto son de tipo exponencial o de ley de potencias.\r\n *\t\tii. Pueden interactuar a través de un potencial u otra forma (una red neuronal i.e).\r\n *\t\t\t 1. El potencial por defecto es de esferas blandas.\r\n *\t\tii. Están confinados a un toroide topológico-\r\n *\t\t\t 1. El tamaño característico del toro se denota con L (lado del cuadrado).\r\n *\t\tiii. Poseen un estado interno, caracterizado por un número entero.\r\n *\t\t\t 1. Este estado puede o no interactaur con la dinámica espacial.\r\n *\t\tiV. El estado interno puede evolucionar en el tiempo.\r\n *\t\t\t 1. Esta dinámica está regulada, genéricamente, con una distribución de poisson.\r\n *---------------------------------------------------------------------------------------------------------\r\n * Red compleja: falta programar\r\n *\t\t\ti. El programa trackea todo el sistema de interacciones y guarda la red compleja resultante.\r\n *\t\t\tii. La red compleja puede ser la asociada a la propagación del estado interno o la de contactos\r\n *---------------------------------------------------------------------------------------------------------\r\n */\r\n#include <bits/stdc++.h>\r\n#include \"agentes.h\" //Módulo con la clase definida para los agentes.\r\n\r\n#define CHECK_POINT(val_1,val_2) if( val_1%val_2 == 0)\r\n\r\nusing namespace std;\r\n\r\nint main(void){\r\n\r\n\t/*Estimacion del uso de memoria del sistema*/\r\n\tprint_mem_info();\r\n\t\r\n\t/*DEFINICIÓN DE ARCHIVOS DE SALIDA DEL PROGRAMA*/\r\n\t//Para modelado de epidemias:\r\n\tofstream FinalState (\"data/evolution.txt\");\r\n\tofstream epidemic (\"data/epidemia.txt\" ); //Estado de la epidemia en cada instante modulo m.\r\n\tofstream anim (\"data/animacion.txt\");\r\n\tofstream metrica (\"data/metrica.txt\", ios_base::app) ; \r\n\t\r\n\t/*VARIABLES PARA CALCULAR METRICA Y PERFERMANCE*/\r\n\tfloat updates = 0; //puntos por segundo: pps\r\n\tsize_t start_s = clock();\r\n\r\n\t/*SIMULACION*/\r\n\tfor (size_t n_simulaciones = 0; n_simulaciones < 1; n_simulaciones++){\r\n\t\tgen.seed(seed); //cada simulacion tiene su propia semilla. \r\n\t\tprint_header(n_simulaciones);\r\n\r\n\t\t/*DECLARACIÓN DE VARIABLES*/\r\n\t\tvector<particle> system, \r\n\t\t\t\t\t\t system_new;\r\n\t\t\r\n\t\tvector<bool> inter; //Flag de interacción.\r\n\t\tvector<size_t> state_vector; //En cada lugar contiene la población de cada estado.\r\n\t\t\r\n\t\t//Iinicializamos inter y stat_verctor\r\n\t\tinter.resize(N,false);\r\n\t\tstate_vector.resize(spin,0);\r\n\r\n\t\t/*Estuctura de datos para optimizar la búsqueda de interacciones entre agentes:\r\n\t\t *\t1. Utiliza un red-and-black tree implementado en c++ como set.\r\n\t\t *\t2. Cada agente está indexado por un int que representa su posición en\r\n\t\t *\t los vectores system y system_new.\r\n\t\t *\t3. Se construye una grilla con cuadrículas de tamaño 1x1 y cada a una se le asigna un set.\r\n\t\t *\t4. Cada set contiene los agentes que están en cada cuadrícula.\r\n\t\t */\r\n\t\tvector<vector<set<size_t>>> grid;\r\n\t\tsize_t num_grid = floor(L);\r\n\t\t\r\n\t\t//Inicializamos grid\r\n\t\tgrid.resize(num_grid);\r\n\t\tfor (size_t i=0; i<grid.size(); i++) grid[i].resize(num_grid);\r\n\r\n\t\t/*CONDICIÓN INICIAL:\r\n\t\t *i) No hay particulas interact. ii) Define el system.size() via pushback.\r\n\t\t */\r\n\t\tinit_system(system, state_vector, grid);\r\n\t\tsystem_new.resize(system.size());\r\n\t\tprint_state(state_vector);\r\n\r\n\t\t/*EVOLUCIÓN DEL SISTEMA*/\r\n\t\tint TimeStep = 0; \r\n\t\twhile ((TimeStep < 50000) && (state_vector[1] > 0))\r\n\t\t{\r\n\t\t\tCHECK_POINT(TimeStep,100) print_epidemic_tofile(epidemic, state_vector, TimeStep);\r\n\t\t\tCHECK_POINT(TimeStep,10000) printf(\"Time: %0.f\\n\", (double)TimeStep*delta_time) ;\r\n\t\t\t\r\n\t\t\tTimeStep ++;\r\n\t\t\tupdate_system(system, system_new, state_vector, grid, inter, TimeStep, anim);\r\n\t\t}//while\r\n\t\tupdates += (float)TimeStep;\r\n\r\n\t\t/*ESCRITURA DE RESULTADOS*/\r\n\t\tcout << endl;\r\n\t\tcout << \"--------------------\" << endl;\r\n\t\tcout << \"Experimento data:\" << endl;\r\n\t\tcout << \"--------------------\" << endl;\r\n\r\n\t\tprint_state(state_vector);\r\n\t\tcout << endl;\r\n\t}//for simul\r\n\r\n\tint stop_s = clock();\r\n\tfloat cps = (float)CLOCKS_PER_SEC;\r\n\tfloat time_elapsed = (float)(clock() - start_s)/cps;\r\n\tfloat metric = updates*(float)N/time_elapsed;\r\n\r\n\tcout << \"Time[seg] : \" << time_elapsed << endl;\r\n\tcout << \"Metrica[pps]: \" << metric << endl;\r\n\t\r\n\tmetrica << metric << endl;\r\n\r\n\t//Cerramos los archivos:\r\n\tFinalState.close();\r\n\tepidemic.close();\r\n\tanim.close();\r\n\tmetrica.close();\r\n\r\n\treturn 0;\r\n}\r\n\r\n"
}
] | 2 |
sebastienmeyer2/mst-clustering
|
https://github.com/sebastienmeyer2/mst-clustering
|
712c60bb7b836a2c614e610c4de50aabf1472e72
|
9f4a326c52f87204494e115a0c3ec2fd4302f13a
|
dfebad918e353b79be0be6d24b56973d45e6caf1
|
refs/heads/master
| 2023-06-04T05:12:03.800203 | 2021-06-28T17:56:35 | 2021-06-28T17:56:35 | 381,118,210 | 4 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5506270527839661,
"alphanum_fraction": 0.5568316578865051,
"avg_line_length": 30.431535720825195,
"blob_id": "1783b245a18c92cc1d33192f2667ce1cacc1178d",
"content_id": "37a2b97ff2aa25785336afb32aae4a446a443c0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7575,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 241,
"path": "/data/convert_data.py",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe only purpose of this file is to treat datasets that have been \ndownloaded from the internet. Since there is no general way to convert \ncsv data files to txt easy-to-read files for k-NN, we assume that \nthe datasets to be converted are relatively simple. That is, these \nfiles contain a header line for all the data details, then for each \nvariable there can be a float value, a TRUE/FALSE value (with different \ncapitalization) or a variable to be discarded. Finally, it handles the lack of \nvalues (NaN) by inserting the mean of all known values in other lines,\nor zero if there is none. \n\"\"\"\n\nimport sys \nimport math\nimport numpy as np\nfrom sklearn.preprocessing import scale\n\ntrue_variants = {\"TRUE\", \"TRUE\\n\", \"true\", \"true\\n\", \"True\", \"True\\n\"}\nfalse_variants = {\"FALSE\", \"FALSE\\n\", \"false\", \"false\\n\", \"False\", \"False\\n\"}\nnan_variants = {\"NA\", \"NA\\n\", \"na\", \"na\\n\", \"NaN\", \"NaN\\n\", \"nan\", \"nan\\n\"}\n\ndef read_file(file_to_read, sep=\",\", except_cols=set()):\n \"\"\"Reads a file from its name. It is expected to be in the data \n folder of the current project. \n\n Args:\n file_to_read (str) : the name of the file, including extension\n sep (str) : the separator between columns in the dataset\n except_cols (tuple) : a list of columns that have to be ejected\n from the dataset\n\n Returns:\n None\n \"\"\"\n\n with open(file_to_read, \"r\", encoding=\"utf-8\") as f:\n\n header = f.readline().split(sep=sep)\n variables = \"The variables kept from this file are: \"\n for k in range(len(header)):\n if k not in except_cols:\n variables += header[k] + \" \"\n print(variables)\n\n for line in f:\n\n values = line.split(sep=sep)\n\n kept_values = [0 for i in range(len(values) - len(except_cols))]\n current_col = 0\n for i in range(len(values)):\n\n if i not in except_cols:\n\n # treatment of NaN values\n if values[i] in nan_variants:\n kept_values[current_col] = float(\"NaN\")\n\n # treatment of TRUE/FALSE \n elif values[i] in true_variants:\n kept_values[current_col] = 1\n elif values[i] in false_variants:\n kept_values[current_col] = 0\n\n else:\n kept_values[current_col] = float(values[i])\n \n current_col += 1\n\n for k in range(len(kept_values)):\n print(kept_values[k], end= \" \")\n print(\"\")\n\n return \n\ndef write_file(data, file_to_write):\n \"\"\"Writes a file in a txt format that can be easily read\n by any C++ file we have in this project (for instance, we \n use this for kmeans and mst clustering)\n Warning: it will overwrite any existing file with the same name\n\n Args:\n data (list) : a list of list, where each contained list \n is a row of the output file, containing only float\n values\n file_to_write (str) : name to be giver to the new txt file\n\n Returns:\n None\n \"\"\"\n\n n = len(data)\n m = len(data[0])\n\n with open(file_to_write, \"w\", encoding=\"utf-8\") as f:\n\n for i in range(n):\n\n line = \"\"\n\n for j in range(m-1):\n line += str(data[i][j]) + \" \"\n line += str(data[i][m-1])\n\n f.write(line + \"\\n\")\n\n return\n\ndef convert_file(file_to_read, sep=\",\", except_cols=set(), do_scale=False, nb_points=100):\n \"\"\"Converts a file which respects the requirements from detailed in \n the header of this file to a txt easy-to-read file for C++ algorithms\n\n Args:\n file_to_read (str) : the name of the file, including extension\n sep (str) : the separator between columns in the dataset\n except_cols (tuple) : a list of columns that have to be ejected\n from the dataset\n do_scale (bool) : whether the data has to be scaled before being \n written in the new file\n nb_points (int) : the number of points to keep from the initial \n dataset for the new one, the indices being chosen randomly\n \n Returns:\n None\n \"\"\"\n\n data = []\n file_to_write = \"\"\n\n # Reading the file and converting simple values to float (keeping NaN values)\n with open(file_to_read, \"r\", encoding=\"utf-8\") as f:\n\n header = f.readline().split(sep=sep)\n variables = \"The variables kept from this file are: \"\n for k in range(len(header)):\n if k not in except_cols:\n variables += header[k] + \" \"\n print(variables)\n\n for line in f:\n\n values = line.split(sep=sep)\n\n kept_values = [0 for i in range(len(values) - len(except_cols))]\n current_col = 0\n for i in range(len(values)):\n\n if i not in except_cols:\n\n # treatment of NaN values\n if values[i] in nan_variants:\n kept_values[current_col] = float(\"NaN\")\n\n # treatment of TRUE/FALSE \n elif values[i] in true_variants:\n kept_values[current_col] = 1\n elif values[i] in false_variants:\n kept_values[current_col] = 0\n\n else:\n kept_values[current_col] = float(values[i])\n \n current_col += 1\n\n data.append(kept_values) \n\n # Treating NaN values using the mean of known values \n n = len(data)\n m = len(data[0])\n for j in range(m):\n\n missing_indices = []\n known_values = 0\n mean = 0\n for i in range(n):\n if math.isnan(data[i][j]):\n missing_indices.append(i)\n else:\n mean += data[i][j]\n known_values += 1\n mean = 0 if known_values <= 0 else mean/known_values\n\n for i in range(n):\n if math.isnan(data[i][j]):\n data[i][j] = mean\n\n # Scaling step\n if do_scale:\n data = scale(data)\n file_to_write += \"scaled_\"\n\n # Selecting a certain part of the data \n indices = [] \n if nb_points >= n:\n indices = [i for i in range(n)]\n else:\n indices = np.random.choice(n, nb_points)\n\n data = [data[idx] for idx in indices]\n file_to_write += \"n\" + str(nb_points) + \"_\"\n\n # Writing a brand new file !\n file_to_write += file_to_read.split(\".\")[0] + \".txt\"\n write_file(data, file_to_write)\n print(\"Data has been written in the file: \" + file_to_write)\n\n return \n\n### Usage of this file via command prompt\n\nnb_args = len(sys.argv)\nif nb_args >= 2:\n\tfile_to_read = sys.argv[1]\nelse:\n\tprint(\"\"\"Syntax: python {} <file_to_read> [sep] [\"except_cols1;except_cols2;...\"] [do_scale:0/1] [nb_points]\"\"\".format(sys.argv[0]))\n\texit(0)\n\nsep = \",\"\nexcept_cols = set()\ndo_scale = False\nnb_points = 100\n\n# Selection of separator (format: \",\")\nif nb_args >= 3:\n sep = sys.argv[2]\n\n# Selection of columns to except (format: \"x;y;z\")\nif nb_args >= 4:\n cols_to_except = sys.argv[3].split(\";\")\n for col in cols_to_except:\n except_cols.add(int(col))\n\n# Selection of scaling (format: \"0/1\")\nif nb_args >= 5:\n do_scale = bool(int(sys.argv[4]))\n\n# Selection of the number of points \nif nb_args >= 6:\n nb_points = int(sys.argv[5])\n\nconvert_file(file_to_read, sep=sep, except_cols=except_cols, do_scale=do_scale, nb_points=nb_points)\n"
},
{
"alpha_fraction": 0.4253200590610504,
"alphanum_fraction": 0.4736842215061188,
"avg_line_length": 17.760000228881836,
"blob_id": "e56980cee0d8efac8a21721845e5d570ff5effb7",
"content_id": "59e5010219c3f0bdbf942b1ebe8a083f2258ebeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1406,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 75,
"path": "/test/test_node.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"catch.hpp\"\n\n#include \"../src/point.hpp\"\n#include \"../src/node.hpp\"\n\nTEST_CASE(\"Simple node initialization\", \"[node:init]\") \n{\n Point::d = 2;\n\n SECTION(\"Node with no assigned point\")\n {\n Node n = Node(3);\n\n REQUIRE(n.label == 3);\n REQUIRE(n.get_point() == NULL);\n }\n\n\n SECTION(\"Nodes with assigned point\")\n {\n Point p;\n p.coords[0] = 2;\n p.coords[1] = 3;\n\n Node n = Node(1);\n\n n.set_point(&p);\n REQUIRE(n.get_point()->coords[0] == 2);\n \n Node n2 = Node(1, &p);\n REQUIRE(n2.get_point()->coords[0] == 2);\n }\n}\n\nTEST_CASE(\"Distance between nodes\", \"[node:dist]\")\n{\n Point::d = 2;\n\n SECTION(\"Both nodes are empty\")\n {\n Node n1 = Node(1);\n Node n2 = Node(2);\n\n REQUIRE(n1.dist(&n2) == 1); // arbitrary choice in node.cpp\n }\n\n SECTION(\"At least one node is empty\")\n {\n Node n1 = Node(1);\n\n Point p2;\n p2.coords[0] = 1;\n p2.coords[1] = 2;\n\n Node n2 = Node(2, &p2);\n\n REQUIRE(n1.dist(&n2) == 1);\n }\n\n SECTION(\"Both nodes have assigned points\")\n {\n Point p1;\n p1.coords[0] = 0;\n p1.coords[1] = 2;\n\n Point p2;\n p2.coords[0] = 2;\n p2.coords[1] = 0;\n\n Node n1 = Node(1, &p1);\n Node n2 = Node(2, &p2);\n\n REQUIRE((n1.dist(&n2) - 2.828) < 0.001);\n }\n}"
},
{
"alpha_fraction": 0.7281106114387512,
"alphanum_fraction": 0.7281106114387512,
"avg_line_length": 14.571428298950195,
"blob_id": "71a3c05329f822fa450c326e3ce6fc08940f3392",
"content_id": "0a061648d1856512c58674306a5ecd0822f42bf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 14,
"path": "/src/mst_boruvka.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include \"mst_algorithm.hpp\"\n#include \"union_find.hpp\"\n\n#include <iostream>\n\nclass BoruvkaAlgorithm : public MSTAlgorithm\n{\npublic:\n BoruvkaAlgorithm(Graph* graph);\n\n virtual void compute_mst();\n};"
},
{
"alpha_fraction": 0.5617479681968689,
"alphanum_fraction": 0.5658644437789917,
"avg_line_length": 34.494380950927734,
"blob_id": "3554897e01cb20a9ea98750e85d6069e1e6e4b29",
"content_id": "ec784b6da0fadaf58760a22622ee3400b3fed8b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3159,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 89,
"path": "/src/mst_boruvka.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"mst_boruvka.hpp\"\n\nBoruvkaAlgorithm::BoruvkaAlgorithm(Graph* graph) : MSTAlgorithm(graph) {}\n\nvoid BoruvkaAlgorithm::compute_mst() \n{\n /*!\n * @brief Computes the Minimum Spanning Tree of the graph associated to \n * this instance of MSTAlgorithm, using Boruvka's algorithm. More specifically,\n * initializes a Union-Find data structure with all the nodes of the graph, \n * and unions connected components of the MST with respect to their edge of \n * minimal weight, until there is only one component left.\n * \n */\n\n this->mst_weight = 0.0;\n std::cout << \"\\nComputing MST using Boruvka's algorithm...\";\n std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();\n\n // initialization of the algorithm\n std::unordered_set<Edge*> edges = this->initial_graph->get_edges();\n std::vector<Node*> nodes = this->initial_graph->get_nodes();\n for(Node* n : nodes) {\n this->mst_graph.add_node(n);\n }\n\n UnionFind uf = UnionFind(nodes);\n\n // filling the MST with edges until there is only one component left\n int k = uf.get_num_classes();\n while (k > 1) {\n\n // for each component, we will keep track of the external edge with minimal weight\n std::unordered_set<Node*> representatives = uf.get_representatives();\n std::unordered_map<Node*, Edge*> min_edges;\n for (Node* rep : representatives) {\n min_edges.insert(std::pair<Node*, Edge*>(rep, NULL));\n }\n\n // getting all the edges with minimal weights according to components (not best complexity here)\n for (Edge* e : edges) {\n\n Node* n_in = e->p1;\n Node* n_out = e->p2;\n\n Node* rep_in = uf.Find(n_in);\n Node* rep_out = uf.Find(n_out);\n\n if (rep_in == rep_out) {\n continue;\n }\n\n if (min_edges.at(rep_in) == NULL || min_edges.at(rep_in)->weight > e->weight) {\n min_edges.erase(rep_in);\n min_edges.insert(std::pair<Node*, Edge*>(rep_in, e));\n }\n\n if (min_edges.at(rep_out) == NULL || min_edges.at(rep_out)->weight > e->weight) {\n min_edges.erase(rep_out);\n min_edges.insert(std::pair<Node*, Edge*>(rep_out, e));\n }\n \n }\n\n // linking components\n for (Node* rep : representatives) {\n Edge* min_edge = min_edges.at(rep);\n\n Node* rep1 = uf.Find(min_edge->p1);\n Node* rep2 = uf.Find(min_edge->p2);\n\n if (rep1 != rep2) {\n this->mst_graph.add_edge(min_edge);\n this->mst_weight += min_edge->weight;\n uf.Union(rep1, rep2);\n }\n }\n\n k = uf.get_num_classes();\n }\n\n std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();\n std::cout << \"[OK]\" << std::endl;\n this->treatment_done();\n\n std::cout << \"Total weight of the MST: \" << this->mst_weight << std::endl;\n\n std::cout << \"Time spent by the algorithm: \" << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << \" µs\" << std::endl;\n}"
},
{
"alpha_fraction": 0.7222222089767456,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 27.799999237060547,
"blob_id": "b15ec4c0da4b10e8ae042020e7a5a644521599fc",
"content_id": "904f784cad907bfa0822956bdb5206f719d3f87e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 5,
"path": "/test/main.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#define CATCH_CONFIG_MAIN\n#include \"catch.hpp\"\n\n// A file to give all rights on \"main\" to Catch2\n// To be compiled and used by other test files\n"
},
{
"alpha_fraction": 0.5288127064704895,
"alphanum_fraction": 0.5325384736061096,
"avg_line_length": 23.183183670043945,
"blob_id": "8575d362ed6b6a1710cf34f458348d3b6515a3e7",
"content_id": "ea39a70a7904510bc6c24289a6c9c2d63e5c5401",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8069,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 333,
"path": "/src/graph.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"graph.hpp\"\n\nGraph::Graph(){\n this->node_counter = 0;\n this->edge_counter = 0;\n}\n\n\nvoid build_from_ashiip(Graph* g, std::string file_path)\n{\n /*!\n * @brief Builds a graph from scratch, from the nodes and edges generated by \n * aSHIIP and contained in a specific file format. Adds a random weight drawn \n * by an exponential distribution to each edge.\n * \n * @param file_path The path to the file \n * \n */\n\n std::ifstream is(file_path);\n assert((\"No such file\", is.is_open()));\n\n std::unordered_map<node_label_t, Node*> created_nodes;\n std::unordered_map<Node*, std::unordered_set<Node*> > links;\n\n std::default_random_engine generator;\n std::exponential_distribution<double> distribution(1);\n\n std::string line;\n\n while (std::getline(is, line)) {\n\n std::istringstream iss(line);\n\n // node's label (here, we want positive integers)\n node_label_t label;\n iss >> label;\n\n if (label <= 0) {\n continue;\n }\n\n Node* n_in;\n if (created_nodes.find(label) == created_nodes.end()) {\n n_in = new Node(label);\n created_nodes.insert(std::pair<node_label_t, Node*>(label, n_in));\n } else {\n n_in = created_nodes.at(label);\n }\n\n // type of node (not useful here)\n std::string cat;\n iss >> cat;\n\n // edges\n std::string other_node;\n int j = 0;\n while (iss >> other_node) {\n\n if (j == 0) {\n other_node = other_node.substr(1); // eliminating bracket\n }\n\n int length = other_node.size();\n other_node = other_node.substr(0, length-1); // eliminating comma\n node_label_t other_label = stoi(other_node);\n\n Node* n_out;\n if (created_nodes.find(other_label) == created_nodes.end()) {\n n_out = new Node(other_label);\n created_nodes.insert(std::pair<node_label_t, Node*>(other_label, n_out));\n } else {\n n_out = created_nodes.at(other_label);\n }\n\n // avoid double insertion of same edges\n Node* master_node = (n_in->label > n_out->label) ? n_in : n_out;\n Node* slave_node = (master_node == n_in) ? n_out : n_in;\n\n bool force_insert = false;\n\n if (links.find(master_node) == links.end()) {\n links.insert(std::pair<Node*, std::unordered_set<Node*> >(master_node, std::unordered_set<Node*>()));\n force_insert = true;\n }\n\n std::unordered_set<Node*>& linked_nodes = links.at(master_node);\n if (linked_nodes.find(slave_node) == linked_nodes.end()) {\n linked_nodes.insert(slave_node);\n force_insert = true;\n }\n\n if (force_insert == true) {\n Edge* e = new Edge(n_in, n_out, distribution(generator));\n g->add_edge(e);\n }\n\n j += 1;\n }\n }\n}\n\nGraph::Graph(std::string file_path) : Graph()\n{\n /*!\n * @brief Builds a graph from scratch, with its nodes and edges contained in a file\n * \n * @param file_path The path to the file \n * \n */\n build_from_ashiip(this, file_path);\n}\n\nGraph::Graph(std::vector<Point*> points)\n{\n /*!\n * @brief Builds a graph from scratch, with its nodes being points of a certain dimension\n * \n * @param points A vector of points in memory \n * \n */\n\n int n = points.size();\n std::vector<Node*> nodes;\n\n for (int i = 0; i < n; i ++) {\n nodes.push_back(new Node(i, points[i]));\n this->add_node(nodes[i]);\n }\n\n for (int i = 0; i < n; i ++) {\n for (int j = i+1; j < n; j ++) {\n this->add_edge(new Edge(nodes[i], nodes[j]));\n }\n }\n}\n\nbool Graph::has_node(Node* n){\n /*!\n * @brief Vérifie si le noeud \\p n est suvi par le graphe\n * \n * @param n Le noeud\n * \n * @return true si le noeud \\p n appartient bien au graphe, false sinon\n * \n */\n\n return this->adjacency_lists.find(n) != this->adjacency_lists.end();\n}\n\nvoid Graph::add_node(Node* n){\n /*!\n * @brief Ajoute le noeud \\p n au graphe\n * \n * @param n Le noeud\n * \n */\n\n\n if(this->has_node(n))\n return;\n\n this->adjacency_lists.insert(\n std::pair<Node*, std::unordered_set<Edge*> >(\n n,\n std::unordered_set<Edge*>()\n )\n );\n \n n->set_id(this->node_counter);\n this->node_id_mapper.insert(std::pair<node_id_t, Node*>(this->node_counter++, n));\n\n \n \n}\n\n\nvoid Graph::add_edge(Edge* e){\n /*!\n * @brief Ajoute l'arrête \\p e au graphe\n * \n * @param e L'arrête\n * \n * @note Il n'est pas nécessaire d'ajouter les points de \\p e au graphe; si ces derniers\n * n'est font pas déjà partie, ils seront ajoutés automatiquement.\n * \n */\n\n this->add_node(e->p1);\n this->add_node(e->p2);\n\n std::unordered_set<Edge*>& adj_p1 = this->adjacency_lists.at(e->p1);\n adj_p1.insert(e);\n\n std::unordered_set<Edge*>& adj_p2 = this->adjacency_lists.at(e->p2);\n adj_p2.insert(e);\n\n this->all_edges.insert(e);\n\n e->id = this->edge_counter;\n this->edge_id_mapper.insert(std::pair<edge_id_t, Edge*>(this->edge_counter++, e));\n\n}\n\n\n\nconst std::unordered_set<Edge*>& Graph::connected_edges(Node* n){\n /*!\n * @brief Liste les arrêtes entrantes/sortantes de \\p n\n * \n * @param n Le noeud dont on veut connaître les arrêtes liées\n * \n * @return Un ensemble en lecture seule des arrêtes liées à \\p n\n * \n */\n\n return this->adjacency_lists.at(n);\n}\n\nint Graph::get_number_of_nodes() \n{\n /*!\n * @brief Renvoie le nombre total de noeuds du graphe\n * \n * @return Nombre de noeuds dans le graphe\n * \n */\n\n return this->adjacency_lists.size();\n}\n\nstd::vector<Node*> Graph::get_nodes()\n{\n /*!\n * @brief Renvoie tous les noeuds du graphe (pointeurs)\n * \n * @return Un vecteur avec les noeuds du graphe\n * \n */\n\n std::vector<Node*> nodes;\n for (auto kv : this->adjacency_lists) {\n nodes.push_back(kv.first);\n }\n\n return nodes;\n}\n\nint Graph::get_number_of_edges()\n{\n /*!\n * @brief Renvoie le nombre total de noeuds du graphe\n * \n * @return Nombre de noeuds dans le graphe\n * \n */\n\n return this->all_edges.size();\n}\n\nstd::unordered_set<Edge*> Graph::get_edges()\n{\n /*!\n * @brief Renvoie toutes les arêtes du graphe (pointeurs)\n * \n * @return Un set non classé avec toutes les arêtes du graphe\n * \n */\n\n return this->all_edges;\n}\n\nNode* Graph::get_any_node()\n{\n /*!\n * @brief Renvoie un sommet au hasard dans les sommets du graphe\n * \n * @return Un noeud au hasard\n * \n */\n\n std::vector<Node*> nodes = this->get_nodes();\n int n = this->get_number_of_nodes();\n\n std::random_device rd; \n std::mt19937 gen(rd());\n std::uniform_int_distribution<> distrib(0, n-1);\n\n int idx = distrib(gen);\n\n return nodes[idx];\n}\n\ndouble Graph::total_weight()\n{\n /*!\n * @brief Returns the total weight of the edges of this graph\n * \n * @return Weight of all edges (by unique identifier)\n * \n */\n\n double weight = 0.0;\n for (Edge* e : this->all_edges) {\n weight += e->weight;\n }\n return weight;\n}\n\nNode* Graph::get_node(node_id_t id){\n /*!\n * @brief Returns the node whose id matches \\p id\n *\n * @param id The id of the node\n * \n * @returns The node matching \\p id\n */\n\n\n return this->node_id_mapper.at(id); \n}\n\nEdge* Graph::get_edge(edge_id_t id){\n /*!\n * @brief Returns the edge whose id matches \\p id\n *\n * @param id The id of the edge\n * \n * @returns The edge matching \\p id\n */\n\n return this->edge_id_mapper.at(id);\n}"
},
{
"alpha_fraction": 0.516533613204956,
"alphanum_fraction": 0.5218073129653931,
"avg_line_length": 32.409523010253906,
"blob_id": "bdc51d0330e089bf5232c743a1dfdbc101d23a11",
"content_id": "8da39b8ebdeedde087ddd1e8c282a988b16c952d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7017,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 210,
"path": "/src/mst_prim_kumar.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"mst_prim_kumar.hpp\"\n\n#include <iostream>\n#include <mpi.h>\n\n\n#define IS_NODE_ALLOCATED_TO_ME(nodeid) (nodeid >= node_id_min && nodeid < node_id_max)\n\nPrimKumarAlgorithm::PrimKumarAlgorithm(Graph* graph, int rank_, int world_size_) : MSTAlgorithm(graph) \n{\n /*!\n * @brief Builds the MST \"solver\"\n * \n * @param graph The graph to use\n * @param rank_ The rank of the processor (MPI-related)\n * @param world_size_ The world size (MPI-related)\n *\n */\n \n this->rank = rank_;\n this->world_size = world_size_;\n}\n\n\nvoid PrimKumarAlgorithm::compute_mst() \n{\n /*!\n * @brief Computes the Minimum Spanning Tree of the initial graph using \n * Prim-Kumar's algorithm. More specifically, initialize a priority queue to \n * sort the edges by their weight. At each iteration, it adds the edge with \n * minimal weight to the MST and checks for new edges to discover. \n * \n */\n\n\n int n_slaves = this->world_size;\n int n_nodes = this->initial_graph->get_number_of_nodes();\n\n int min_nodes_per_proc = n_nodes/(n_slaves);\n int split_nodes = n_nodes % n_slaves;\n\n int nodes_interval[2];\n\n nodes_interval[0] = this->rank*min_nodes_per_proc + (this->rank < split_nodes ? this->rank : split_nodes); \n nodes_interval[1] = nodes_interval[0] + min_nodes_per_proc + (this->rank < split_nodes);\n\n int node_id_min = nodes_interval[0];\n int node_id_max = nodes_interval[1]; \n\n struct {double d; int edge_id;} iter_res;\n struct {double d; int edge_id;} reduction_res;\n\n\n this->mst_weight = 0.0;\n\n std::chrono::steady_clock::time_point begin;\n if(this->rank == 0){\n std::cout << \"Computing the MST using Prim-Kumar's algorithm...\";\n begin = std::chrono::steady_clock::now();\n }\n\n int n = this->initial_graph->get_number_of_nodes();\n //std::vector<Node*> nodes = this->initial_graph->get_nodes();\n\n auto cmp = [](const Edge* e1, const Edge* e2) \n { \n double w1 = e1->weight;\n double w2 = e2->weight;\n\n if (w1 < w2) {\n return true;\n } else if (w1 > w2) {\n return false;\n } else { // Allows multiple keys for same weight values\n intptr_t p1 = reinterpret_cast<intptr_t>(e1);\n intptr_t p2 = reinterpret_cast<intptr_t>(e2);\n\n return p1 < p2;\n }\n };\n\n // initialization of the priority queue (ie set), best edge per vertex and beginning vertex\n std::unordered_map<Node*, Edge*> min_edge;\n std::set<Edge*, decltype(cmp)> pq(cmp);\n\n\n Node* current_node = this->initial_graph->get_node(0);\n\n\n\n\n this->mst_graph.add_node(current_node); // just to have the source node in the MST\n\n std::unordered_set<Edge*> connected_edges = this->initial_graph->connected_edges(current_node);\n\n for (Edge* e : connected_edges) {\n\n Node* other_node = e->other_node(current_node);\n if(IS_NODE_ALLOCATED_TO_ME(other_node->get_id())){\n if (!this->mst_graph.has_node(other_node)) {\n if (min_edge.find(other_node) != min_edge.end()) \n {\n if (*e < *min_edge.at(other_node)) {\n pq.erase(min_edge.at(other_node));\n min_edge.erase(other_node);\n\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n else \n {\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n } \n }\n\n //std::printf(\"Proc %d, %d remaning items in queue\\n\", this->rank, pq.size());\n\n // filling the MST with new nodes until it forms a tree \n while (this->mst_graph.get_number_of_nodes() < n) {\n\n Edge* new_edge;\n\n if(!pq.empty()){\n // adding the new edge and node to the MST and the visited nodes\n Edge* new_edge = *pq.begin();\n\n // We shall now send this to the root processor, so that it decides which edge is the best\n iter_res.edge_id = new_edge->id;\n iter_res.d = new_edge->weight; \n } else {\n iter_res.d = std::numeric_limits<double>::infinity();\n }\n\n\n //std::printf(\"Proc %d, %d remaning items in queue\\n\", this->rank, pq.size());\n MPI_Barrier(MPI_COMM_WORLD);\n MPI_Allreduce(&iter_res, &reduction_res, 1, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD);\n MPI_Barrier(MPI_COMM_WORLD);\n\n\n // This is the REAL new edge\n new_edge = this->initial_graph->get_edge(reduction_res.edge_id);\n\n if(reduction_res.d == std::numeric_limits<double>::infinity()){\n printf(\"ARRRHS\\n\");\n } else {\n //printf(\"Elected edge %d\\n\", reduction_res.edge_id);\n }\n\n Node* p1 = new_edge->p1;\n Node* p2 = new_edge->p2;\n Node* new_node = (this->mst_graph.has_node(p1)) ? p2 : p1;\n\n this->mst_graph.add_edge(new_edge);\n this->mst_weight += new_edge->weight;\n\n\n\n this->mst_graph.add_node(new_node);\n \n pq.erase(new_edge);\n\n if(IS_NODE_ALLOCATED_TO_ME(p1->get_id()) || IS_NODE_ALLOCATED_TO_ME(p2->get_id())){\n // we won't need the best edge for this node anymore\n min_edge.erase(new_node);\n\n }\n\n\n // updating the external nodes best edges\n connected_edges = this->initial_graph->connected_edges(new_node);\n for (Edge* e : connected_edges) {\n Node* other_node = e->other_node(new_node);\n if(IS_NODE_ALLOCATED_TO_ME(other_node->get_id())){\n if (!this->mst_graph.has_node(other_node)) {\n if (min_edge.find(other_node) != min_edge.end()) \n {\n if (*e < *min_edge.at(other_node)) {\n pq.erase(min_edge.at(other_node));\n min_edge.erase(other_node);\n\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n else \n {\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n }\n }\n }\n \n MPI_Barrier(MPI_COMM_WORLD);\n if(this->rank == 0){\n std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();\n std::cout << \"[OK]\" << std::endl;\n this->treatment_done();\n\n std::cout << \"Total weight of the MST: \" << this->mst_weight << std::endl;\n\n std::cout << \"Time spent by the algorithm: \" << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << \" µs\" << std::endl;\n }\n}\n"
},
{
"alpha_fraction": 0.5800502896308899,
"alphanum_fraction": 0.5800502896308899,
"avg_line_length": 21.11111068725586,
"blob_id": "b56834226df18ec98b5a19ea62ece432ee27e8da",
"content_id": "a55938057f9c01552f4de14c22dd16886b9767b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1194,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 54,
"path": "/src/graph.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include \"edge.hpp\"\n#include \"node.hpp\"\n\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n#include <iostream>\n#include <fstream>\n#include <sstream>\n#include <cassert>\n#include <random>\n\nclass Graph{\n\n /*!\n * @class Décrit le graphe\n *\n */\n\n private:\n std::unordered_map<Node*, std::unordered_set<Edge*> > adjacency_lists;\n std::unordered_set<Edge*> all_edges;\n\n std::unordered_map<node_id_t, Node*> node_id_mapper;\n std::unordered_map<edge_id_t, Edge*> edge_id_mapper;\n\n node_id_t node_counter;\n edge_id_t edge_counter;\n \n public:\n Graph();\n Graph(std::string file_path);\n Graph(std::vector<Point*> points);\n\n void add_node(Node* n);\n void add_edge(Edge* e);\n bool has_node(Node* n);\n\n const std::unordered_set<Edge*>& connected_edges(Node* n);\n\n int get_number_of_nodes();\n std::vector<Node*> get_nodes();\n int get_number_of_edges();\n std::unordered_set<Edge*> get_edges();\n Node* get_any_node();\n\n Node* get_node(node_id_t id);\n Edge* get_edge(edge_id_t id);\n\n double total_weight();\n\n};"
},
{
"alpha_fraction": 0.6399416923522949,
"alphanum_fraction": 0.6413994431495667,
"avg_line_length": 17.078947067260742,
"blob_id": "936e4476e3b0c83cc8e0e6cd6b4dfc9d2ce18245",
"content_id": "459de8f0a0c27c2310a5350eb464332c02dc25a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 38,
"path": "/src/mst_algorithm.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once \n\n#include \"graph.hpp\"\n#include \"union_find.hpp\"\n\n#include <queue>\n#include <set>\n#include <chrono>\n\nclass MSTAlgorithm \n{\n\n /*!\n * @class Abstract class for all the algorithms computing MST\n * \n */\n\nprotected: \n Graph* initial_graph;\n Graph mst_graph;\n double mst_weight;\n bool treated;\n\n void treatment_done();\n\npublic:\n MSTAlgorithm(Graph* graph);\n\n Graph* get_initial_graph();\n Graph* get_mst_graph();\n double get_mst_weight();\n bool is_treated();\n\n virtual void compute_mst() = 0;\n\n std::unordered_map<Node*, Node*> compute_clustering(int k);\n std::unordered_map<Node*, Node*> compute_clustering(double cutoff);\n};"
},
{
"alpha_fraction": 0.5351812243461609,
"alphanum_fraction": 0.5479744076728821,
"avg_line_length": 14.161290168762207,
"blob_id": "45fba452c0841904e576e8150bcd03d5f16dd385",
"content_id": "c582529015163372f89f4d5feea7db93da441369",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 471,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 31,
"path": "/src/edge.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include \"node.hpp\"\n\n#include <string>\n\ntypedef double edge_weight_t;\ntypedef int edge_id_t;\n\nclass Edge{\n\n /*!\n * @class Décrit une arrête du graphe\n *\n */\n\n public:\n Node* p1;\n Node* p2;\n edge_weight_t weight;\n\n node_id_t id;\n\n Edge(Node* p1, Node* p2, edge_weight_t weight);\n Edge(Node* p1, Node* p2);\n\n Node* other_node(Node* p);\n\n bool operator<(Edge const& other) const;\n \n};"
},
{
"alpha_fraction": 0.6322778463363647,
"alphanum_fraction": 0.6322778463363647,
"avg_line_length": 17.49056625366211,
"blob_id": "55814b703b48a7d1a69cd1d291380b5e5d73f0d0",
"content_id": "9bfa98d7f6d0d96b809de2a323188cbb926dfe83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 53,
"path": "/src/kmeans.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once \n\n#include <iostream>\n#include <cassert>\n#include <cmath>\t// for sqrt, fabs\n#include <cfloat>\t// for DBL_MAX\n#include <cstdlib>\t// for rand, srand\n#include <ctime>\t// for rand seed\n#include <fstream>\n#include <cstdio>\t// for EOF\n#include <string>\n#include <algorithm>\t// for count\n#include <vector>\n\n#include \"point.hpp\"\n\nclass Cloud \n{\nprivate:\n\tint d;\n\tint n;\n\tint k;\n\n\t// maximum possible number of Points\n\tint nmax;\n\n\tPoint *points;\n\tPoint *centers;\n\npublic:\n Cloud(int _d, int _nmax, int _k);\n ~Cloud();\n\n int get_d();\n int get_n();\n int get_k();\n Point& get_point(int i);\n Point& get_center(int j);\n\n void add_point(Point &p, int label);\n void set_center(Point &p, int j);\n\n double intracluster_variance();\n int set_voronoi_labels();\n void set_centroid_centers();\n void kmeans();\n void init_bounding_box();\n void init_forgy();\n void init_plusplus();\n void init_random_partition();\n\n double silhouette();\n};"
},
{
"alpha_fraction": 0.6283581852912903,
"alphanum_fraction": 0.6313433051109314,
"avg_line_length": 19.33333396911621,
"blob_id": "e2ebf9e1719ccf1fcecb4664d4118b10ea5c1a7b",
"content_id": "333ce292a85dc9e9e81c04c79e39de3f7392494e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 33,
"path": "/src/union_find.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include \"graph.hpp\"\n\n#include <cassert>\n\nclass UnionFind \n{\n\n /*!\n * @class Union-Find data structure, which implements \n * an efficient way of working with connected components \n *\n */\n\nprotected:\n std::unordered_map<Node*, Node*> parent;\n std::unordered_map<Node*, int> rank;\n std::unordered_set<Node*> representatives;\n int num_classes;\n \npublic:\n UnionFind(std::vector<Node*> nodes);\n ~UnionFind();\n\n Node* get_parent(Node* node);\n int get_rank(Node* node);\n std::unordered_set<Node*> get_representatives();\n int get_num_classes();\n\n Node* Find(Node* node);\n void Union(Node* n1, Node* n2);\n};"
},
{
"alpha_fraction": 0.5447906255722046,
"alphanum_fraction": 0.5481986403465271,
"avg_line_length": 17.85321044921875,
"blob_id": "befde9ba07e18803a2b84b64bbf147983ebfa9c4",
"content_id": "1fce2f85c6454b39f6d3a537c95dbea3978f555c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2054,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 109,
"path": "/src/point.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"point.hpp\"\n\nint Point::d;\n\nPoint::Point() \n{\n /*!\n * @brief Base constructor of a Point\n * \n */\n\n this->coords = new double [d] ();\n this->label = 0;\n}\n\nPoint::~Point()\n{\n /*!\n * @brief Base destructor of a Point\n * \n */\n\n delete[] this->coords;\n}\n\nint Point::get_dim()\n{\n /*!\n * @brief Gives the shared dimension between all the points\n * \n * @return The dimension \\p d of all the points\n * \n */\n\n return this->d;\n}\n\nvoid Point::print()\n{\n /*!\n * @brief Enables to print d-dimensional points on the console\n * \n */\n\n for (int i = 0; i < d; i ++) {\n std::cout << coords[i] << std::endl;\n }\n std::cout << \"\\n\" << std::endl;\n return;\n}\n\ndouble Point::distance(Point* other)\n{\n /*!\n * @brief Evaluates the distance between two instances of the class Point\n * \n * @param other Another Point to evaluate the distance with \n * \n * @return The distance between this point and the \\p other \n */\n\n double dist = 0.0;\n for (int i = 0; i < d; i ++) {\n dist += std::pow(coords[i] - other->coords[i], 2);\n }\n dist = std::sqrt(dist);\n\n return dist;\n}\n\nstd::vector<Point*> Point::read_points_from_file(std::string file_path, int d) \n{\n /*!\n * @brief Builds a vector of points from a file where data is correctly\n * formatted (double spaced with \" \")\n * \n * @param file_path the path to the file to get data from\n * @param d the dimension of the points\n * \n * @return A vector of pointers to the created points\n */\n\n std::vector<Point*> points;\n\n std::ifstream is(file_path);\n assert((\"No such file\", is.is_open()));\n\n std::string line;\n\n\t// while not at the end of the file\n\twhile(is.peek() != EOF)\n\t{\n\t\t// read new points\n Point* p = new Point();\n\n\t\tfor(int m = 0; m < d; m++)\n\t\t{\n\t\t\tis >> p->coords[m];\n\t\t}\n\n points.push_back(p);\n\n\t\t// new line\n\t\tis.get();\n\t}\n points.pop_back(); // last line makes an empty point otherwise\n\n return points;\n}"
},
{
"alpha_fraction": 0.4479905366897583,
"alphanum_fraction": 0.5010638236999512,
"avg_line_length": 30.102941513061523,
"blob_id": "8daa8a55284f24924180465461a100238b812483",
"content_id": "1cc24806e30281179cf10f43dfaa5aef28cf95c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8464,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 272,
"path": "/src/plot.py",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe purpose of this file is to plot results in fancy charts.\n\nNote:\n - Results from MST algorithms are the mean results after 5 iterations \n with the same data \n - Results from MST clustering & k-means are the mean results after 5 \n iterations with the same data (k=5 and different size & n=1000 and \n different number of clusters)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport matplotlib as mpl\nfont = {\"size\": 18} \nmpl.rc(\"font\", **font)\n\n######################################################################\n### COMPARISON OF MST ALGORITHMS ON GENERATED GRAPHS ###\n######################################################################\n\nMST_ALGORITHMS = {\"P\": \"Prim\", \"B\": \"Boruvka\", \"K\": \"Kruskal\"}\n\nERDOS_RENYI_RESULTS = {\n 100: {\"P\": 5.428, \"B\": 14.104, \"K\": 2.707, \"PP\": 1.237},\n 250: {\"P\": 7.108, \"B\": 32.221, \"K\": 6.360, \"PP\": 5.033},\n 500: {\"P\": 23.607, \"B\": 120.978, \"K\": 17.007, \"PP\": 16.427},\n 1000: {\"P\": 98.500, \"B\": 480.175, \"K\": 65.417, \"PP\": 64.981}\n }\n\nERDOS_RENYI_SIZE = {\n 100: 540,\n 250: 3089,\n 500: 12498,\n 1000: 50210\n }\n\nBARABASI_ALBERT_RESULTS = {\n 100: {\"P\": 6.128, \"B\": 14.120, \"K\": 5.249, \"PP\": 1.067},\n 250: {\"P\": 3.107, \"B\": 8.099, \"K\": 3.645, \"PP\": 1.636},\n 500: {\"P\": 5.267, \"B\": 16.708, \"K\": 6.148, \"PP\": 4.677},\n 1000: {\"P\": 9.082, \"B\": 30.212, \"K\": 10.780, \"PP\": 8.436}\n }\n\nBARABASI_ALBERT_SIZE = {\n 100: 614,\n 250: 465,\n 500: 966,\n 1000: 1966\n }\n\ndef plot_mst_results():\n \"\"\"Plot the results of MST algorithms. \n \n Plot the mean time spent on different graphs generated using \n Erdos-Rényi and Barabasi-Albert methods\n \"\"\"\n\n # Plot for Erdos-Rényi results\n nb_nodes = []\n nb_edges = []\n compl = []\n\n prim_res = []\n boruvka_res = []\n kruskal_res = []\n prim_para_res = []\n\n for n, algos in ERDOS_RENYI_RESULTS.items():\n\n prim_res.append(algos[\"P\"])\n boruvka_res.append(algos[\"B\"])\n kruskal_res.append(algos[\"K\"])\n prim_para_res.append(algos[\"PP\"])\n\n nb_nodes.append(n)\n nb_edges.append(ERDOS_RENYI_SIZE[n])\n compl.append(nb_edges[-1]*np.log(nb_nodes[-1]))\n\n plt.figure(1)\n plt.plot(compl, prim_res, label=\"Prim\")\n plt.plot(compl, boruvka_res, label=\"Boruvka\")\n plt.plot(compl, kruskal_res, label=\"Kruskal\")\n plt.xlabel(r\"$m \\times \\log(n)$\")\n plt.ylabel(\"Time spent in $ms$\")\n plt.title(\"Comparison: Erdos-Rényi graphs\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"report/figures/ER_MST.png\")\n plt.show()\n\n plt.figure(2) \n plt.plot(compl, prim_res, label=\"Prim\")\n plt.plot(compl, prim_para_res, label=\"Parallelized Prim\")\n plt.plot(compl, kruskal_res, label=\"Kruskal\")\n plt.xlabel(r\"$m \\times \\log(n)$\")\n plt.ylabel(\"Time spent in $ms$\")\n plt.title(\"Comparison: Erdos-Rényi graphs\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"report/figures/ER_MST_para.png\")\n plt.show()\n\n # Plot for Barabasi-Albert results\n nb_nodes = []\n nb_edges = []\n compl = []\n\n prim_res = []\n boruvka_res = []\n kruskal_res = []\n prim_para_res = []\n\n for n, algos in BARABASI_ALBERT_RESULTS.items():\n\n prim_res.append(algos[\"P\"])\n boruvka_res.append(algos[\"B\"])\n kruskal_res.append(algos[\"K\"])\n prim_para_res.append(algos[\"PP\"])\n\n nb_nodes.append(n)\n nb_edges.append(BARABASI_ALBERT_SIZE[n])\n compl.append(nb_edges[-1]*np.log(nb_nodes[-1]))\n\n plt.figure(3)\n plt.plot(compl, prim_res, label=\"Prim\")\n plt.plot(compl, boruvka_res, label=\"Boruvka\")\n plt.plot(compl, kruskal_res, label=\"Kruskal\")\n plt.xlabel(r\"$m \\times \\log(n)$\")\n plt.ylabel(\"Time spent in $ms$\")\n plt.title(\"Comparison: Barabasi-Albert graphs\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"report/figures/BA_MST.png\")\n plt.show()\n\n plt.figure(4)\n plt.plot(compl, prim_res, label=\"Prim\")\n plt.plot(compl, prim_para_res, label=\"Parallelized Prim\")\n plt.plot(compl, kruskal_res, label=\"Kruskal\")\n plt.xlabel(r\"$m \\times \\log(n)$\")\n plt.ylabel(\"Time spent in $ms$\")\n plt.title(\"Comparison: Barabasi-Albert graphs\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"report/figures/BA_MST_para.png\")\n plt.show()\n\n######################################################################\n### COMPARISON OF MST CLUSTERING AND K-MEANS ###\n######################################################################\n\nCLUSTERING_ALGORITHMS = {\"K\": \"Kruskal MST Clustering\", \"KM\": \"K-means Clustering\",\n \"I\": \"Edge inconsistency\"}\n\n# With k=5, comparing time spent\nWALMART_N_RESULTS = {\n 100: {\"K\": 26.035, \"KM\": 1.820},\n 250: {\"K\": 145.940, \"KM\": 4.271},\n 500: {\"K\": 683.692, \"KM\": 7.229},\n 1000: {\"K\": 3296.818, \"KM\": 25.571}\n }\n\n# With n=1000, comparing intracluster variance\nWALMART_K_RESULTS = {\n 3: {\"K\": 2.40, \"KM\": 2.09},\n 4: {\"K\": 2.39, \"KM\": 2.04},\n 5: {\"K\": 2.33, \"KM\": 1.85},\n 6: {\"K\": 2.32, \"KM\": 1.81},\n 7: {\"K\": 2.30, \"KM\": 1.77},\n 8: {\"K\": 2.18, \"KM\": 1.67},\n 10: {\"K\": 2.16, \"KM\": 1.60}\n }\n\nERDOS_RENYI_I = {\n 5: {\"all\": 249, \"big\": 23},\n 10: {\"all\": 193, \"big\": 13},\n 50: {\"all\": 149, \"big\": 4},\n 100: {\"all\": 145, \"big\": 3},\n 500: {\"all\": 142, \"big\": 1},\n 1000: {\"all\": 142, \"big\": 1}\n }\n\nWALMART_I_RESULTS = {\n 5: {\"all\": 244, \"big\": 53},\n 10: {\"all\": 189, \"big\": 30},\n 50: {\"all\": 148, \"big\": 6},\n 100: {\"all\": 137, \"big\": 4},\n 500: {\"all\": 138, \"big\": 3},\n 1000: {\"all\": 134, \"big\": 3}\n }\n\ndef plot_clustering_results():\n \"\"\"Plot clustering results on Walmart dataset.\"\"\"\n\n # Plot for time complexity\n nb_nodes = []\n\n mst_res = []\n kmeans_res = []\n\n for n, algos in WALMART_N_RESULTS.items():\n\n mst_res.append(np.log10(algos[\"K\"]))\n kmeans_res.append(np.log10(algos[\"KM\"]))\n\n nb_nodes.append(n)\n\n plt.figure(1)\n plt.plot(nb_nodes, mst_res, label=\"MST Clustering\")\n plt.plot(nb_nodes, kmeans_res, label=\"K-means\")\n plt.xlabel(\"Number of points\")\n plt.ylabel(r\"$\\log($Time spent$)$\")\n plt.title(\"Comparison: Time spent $(k = 5)$\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"report/figures/clustering_time.png\")\n plt.show()\n\n # Plot for intracluster variance\n nb_clusters = []\n\n mst_res = []\n kmeans_res = []\n\n for k, algos in WALMART_K_RESULTS.items():\n\n mst_res.append(algos[\"K\"])\n kmeans_res.append(algos[\"KM\"])\n\n nb_clusters.append(k)\n\n plt.figure(2)\n plt.plot(nb_clusters, mst_res, label=\"MST Clustering\")\n plt.plot(nb_clusters, kmeans_res, label=\"K-means\")\n plt.xlabel(\"Number of clusters\")\n plt.ylabel(\"Intracluster variance\")\n plt.title(\"Comparison: Performance $(n = 1000)$\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"report/figures/clustering_perf.png\")\n plt.show()\n\n # Plot for MST clustering (inconsistency method)\n cutoffs = []\n\n er_res = []\n walmart_res = []\n\n for c, er_clusters in ERDOS_RENYI_I.items():\n\n cutoffs.append(np.log10(c))\n\n er_res.append(er_clusters[\"big\"])\n walmart_res.append(WALMART_I_RESULTS[c][\"big\"])\n\n plt.figure(3)\n plt.plot(cutoffs, er_res, label=\"E-R graph\")\n plt.plot(cutoffs, walmart_res, label=\"Walmart data\")\n plt.xlabel(r\"$\\log($Cutoff$)$\")\n plt.ylabel(r\"Clusters of size $\\geq 2$\")\n plt.title(\"Comparison: Inconsistency method\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"report/figures/clustering_inc.png\")\n plt.show()\n\nif __name__ == \"__main__\":\n\n plot_mst_results()\n\n plot_clustering_results()\n"
},
{
"alpha_fraction": 0.7543103694915771,
"alphanum_fraction": 0.7543103694915771,
"avg_line_length": 28.125,
"blob_id": "86a5917cf90f1774b3a9a63f536a6d9544e1208b",
"content_id": "d4a662b73b246620ab0bdeb82a6e14198c6fc319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 8,
"path": "/src/compare_clustering.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once \n\n#include \"mst_kruskal.hpp\"\n#include \"kmeans.hpp\"\n\nint nb_columns(const std::string &line);\nvoid compare_clustering(std::string file_to_read, int k);\nvoid compare_inconsistency(std::string file_to_read, double cutoff);"
},
{
"alpha_fraction": 0.5606013536453247,
"alphanum_fraction": 0.5647861361503601,
"avg_line_length": 25.883333206176758,
"blob_id": "7b94defaccc2555a4a23eb58ea6d491c69c4c010",
"content_id": "01168ce4bfea91b23ad5855f598f15e177cc3938",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6452,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 240,
"path": "/src/node.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"node.hpp\"\n\nNode::Node(node_label_t label)\n{\n /*!\n * @brief Base constructor of a Node\n * \n * @param label A purely cosmetic label for the Node\n * \n */\n \n this->label = label;\n this->p = NULL;\n}\n\nNode::Node(node_label_t label, Point* p)\n{\n /*!\n * @brief Constructor of a Node wrt a specified point \n * \n * @param label A purely cosmetic label for the Node\n * @param p The Point \\p p to be used\n * \n */\n\n this->label = label;\n this->p = p;\n}\n\nPoint* Node::get_point()\n{\n /*!\n * @brief Returns the point contained in the Node\n * \n * @return The \\p point of the Node\n * \n */\n\n return this->p;\n}\n\nvoid Node::set_point(Point* p)\n{\n /*!\n * @brief Overwrites the point contained in the Node\n * \n * @param p The \\p point to change\n * \n */\n\n this->p = p;\n}\n\ndouble Node::dist(Node* other)\n{ \n /*!\n * @brief Returns the Euclidian distance between two Nodes\n * \n * @param other The \\p other Node to evaluate distance with\n * \n * @return The Euclidian distance between this Node and the \\p other Node\n * \n */\n\n Point* p1_p = this->p;\n Point* p2_p = other->get_point();\n\n if (p1_p != NULL && p2_p != NULL) {\n if (p1_p->get_dim() == p2_p->get_dim()) {\n return p1_p->distance(p2_p);\n }\n }\n\n return 1; // arbitrary choice\n}\n\ndouble intracluster_variance(std::unordered_map<Node*, Node*> clusters)\n{\n /*!\n * @brief Returns the intracluster variance by computing the centers of \n * the different clusters first \n * \n * @param clusters The different clusters computed, that is, a map between \n * nodes and their representatives\n * \n * @return The intracluster variance of these clusters\n * \n */\n\n double intracluster_variance = 0.0;\n\n // firstly, we compute the clusters in the reverse mode, that is, \n // we take for each representative, all its members\n std::unordered_set<Node*> representatives;\n std::unordered_map<Node*, std::unordered_set<Node*> > members;\n\n for (auto kv : clusters) {\n Node* node = kv.first;\n Node* rep = kv.second;\n\n representatives.insert(rep);\n \n if (members.find(rep) == members.end()) {\n members.insert(std::pair<Node*, std::unordered_set<Node*> >(rep, std::unordered_set<Node*>()));\n }\n\n members.at(rep).insert(node);\n }\n\n for (Node* rep : representatives) {\n int d = rep->get_point()->get_dim();\n int cluster_size = members.at(rep).size();\n\n // secondly, we compute centers as in the M-step of kmeans \n Node* center = new Node(rep->label, new Point());\n\n for (Node* member : members.at(rep)) {\n for (int j = 0; j < d; j ++) {\n center->get_point()->coords[j] += member->get_point()->coords[j];\n }\n }\n\n for (int j = 0; j < d; j ++) {\n center->get_point()->coords[j] /= cluster_size;\n }\n\n // finally, we compute the intracluster variance out of these centers\n double cluster_variance = 0.0;\n for (Node* member : members.at(rep)) {\n cluster_variance += member->get_point()->distance(center->get_point());\n }\n\n intracluster_variance += cluster_variance;\n }\n intracluster_variance /= clusters.size();\n\n return intracluster_variance;\n}\n\ndouble silhouette(std::unordered_map<Node*, Node*> clusters)\n{\n /*!\n * @brief Computes the silhouette of the clusters given in argument\n *\n * @param clusters A mapping between nodes and their representatives \n * in clusters\n * \n * @return The silhouette of these clusters\n */\n\n int n = clusters.size(); // number of nodes\n\n\tdouble silhouette = 0.0;\n\n // firstly, we compute the clusters in the reverse mode, that is, \n // we take for each representative, all its members\n std::unordered_set<Node*> representatives;\n std::unordered_map<Node*, int> clusters_size;\n std::unordered_map<Node*, std::unordered_set<Node*> > members;\n std::unordered_set<Node*> nodes;\n\n for (auto kv : clusters) {\n Node* node = kv.first;\n Node* rep = kv.second;\n\n nodes.insert(node);\n\n representatives.insert(rep);\n if (clusters_size.find(rep) == clusters_size.end()) {\n clusters_size.insert(std::pair<Node*, int>(rep, 0));\n }\n clusters_size.at(rep) += 1;\n \n if (members.find(rep) == members.end()) {\n members.insert(std::pair<Node*, std::unordered_set<Node*> >(rep, std::unordered_set<Node*>()));\n }\n\n members.at(rep).insert(node);\n }\n\n int nb_clusters = representatives.size();\n\n\t// we compute a and b which are respectively the \n\t// average distance to points of the same cluster \n\t// and min of average distances to foreign clusters\n std::unordered_map<Node*, double> a;\n std::unordered_map<Node*, double> b;\n std::unordered_map<Node*, double> s;\n\n for (Node* node : nodes) {\n a.insert(std::pair<Node*, double>(node, 0.0));\n b.insert(std::pair<Node*, double>(node, 0.0));\n s.insert(std::pair<Node*, double>(node, 0.0));\n }\n\n\tfor (Node* node : nodes){\n std::unordered_map<Node*, double> dist_to_clusters;\n for (Node* rep : representatives) {\n dist_to_clusters.insert(std::pair<Node*, double>(rep, 0.0));\n }\n\n for (Node* other_node : nodes) {\n if (other_node != node) {\n Node* other_node_cluster = clusters.at(other_node);\n\n dist_to_clusters.at(other_node_cluster) += std::sqrt(node->dist(other_node));\n }\n }\n\n double minDist = DBL_MAX;\n for (Node* rep : representatives) {\n dist_to_clusters.at(rep) /= clusters_size.at(rep);\n\n if (rep != clusters.at(node) && dist_to_clusters.at(rep) < minDist) {\n minDist = dist_to_clusters.at(rep);\n }\n }\n\n a.at(node) = dist_to_clusters.at(clusters.at(node));\n if (minDist < DBL_MAX) {\n b.at(node) = minDist;\n }\n s.at(node) = (b.at(node) - a.at(node)) / (std::max(a.at(node), b.at(node)));\n\t}\n\n for (Node* node : nodes) {\n silhouette += s.at(node);\n }\n\tsilhouette /= n;\n\n\treturn silhouette;\n}\n\nvoid Node::set_id(node_id_t id_){\n this->id = id_;\n}\n\nnode_id_t Node::get_id(){\n return this->id;\n}\n"
},
{
"alpha_fraction": 0.47390180826187134,
"alphanum_fraction": 0.5307493805885315,
"avg_line_length": 20.27472496032715,
"blob_id": "d6d8f249e5258a0cff1e2cc93e8ae4f94be11570",
"content_id": "bf2b4d6da5919a00e3b9f27ec885d4d412bed747",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1935,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 91,
"path": "/test/test_edge.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"catch.hpp\"\n\n#include \"../src/edge.hpp\"\n\nTEST_CASE(\"Simple edge initialization\", \"[edge:init]\") \n{\n Point::d = 2;\n\n Node n1 = Node(1);\n Node n2 = Node(5);\n\n SECTION(\"Without input weight and no points\") \n {\n Edge edge_no_weight = Edge(&n1, &n2);\n\n REQUIRE(edge_no_weight.p1->label == 1);\n REQUIRE(edge_no_weight.p2->label == 5);\n REQUIRE(std::abs(edge_no_weight.weight - 1) < 0.001);\n }\n\n SECTION(\"Without input weight and with points\")\n {\n Point p1;\n p1.coords[0] = 0;\n p1.coords[1] = 2;\n\n Point p2;\n p2.coords[0] = 2;\n p2.coords[1] = 0;\n\n n1.set_point(&p1);\n n2.set_point(&p2); \n\n Edge edge_with_points = Edge(&n1, &n2);\n\n REQUIRE(edge_with_points.p1->label == 1);\n REQUIRE(edge_with_points.p2->label == 5);\n REQUIRE(std::abs(edge_with_points.weight - 2.828) < 0.001);\n }\n\n SECTION(\"With input weight\") \n {\n Edge edge_weight = Edge(&n1, &n2, 10);\n\n REQUIRE(edge_weight.p1->label == 1);\n REQUIRE(edge_weight.p2->label == 5);\n REQUIRE(std::abs(edge_weight.weight - 10) < 0.001);\n }\n}\n\nTEST_CASE(\"Getter of other node\", \"[edge:other_node]\")\n{\n Node p1 = Node(1);\n Node p2 = Node(2);\n Node p3 = Node(3);\n Node* p0 = NULL;\n\n Edge e1 = Edge(&p1, &p2);\n\n REQUIRE(e1.other_node(&p1) == &p2);\n REQUIRE(e1.other_node(&p3) == NULL);\n REQUIRE(e1.other_node(p0) == NULL);\n}\n\nTEST_CASE(\"Edge operator for <\", \"[edge:<]\")\n{\n Point::d = 2;\n\n Node n1 = Node(1);\n Node n2 = Node(2);\n\n Edge edge_no_weight = Edge(&n1, &n2);\n\n Node n3 = Node(3);\n Node n4 = Node(4);\n\n Point p1;\n p1.coords[0] = 0;\n p1.coords[1] = 2;\n\n Point p2;\n p2.coords[0] = 2;\n p2.coords[1] = 0;\n\n n3.set_point(&p1);\n n4.set_point(&p2);\n\n Edge edge_with_points = Edge(&n3, &n4);\n\n REQUIRE(edge_no_weight < edge_with_points);\n}"
},
{
"alpha_fraction": 0.5118483304977417,
"alphanum_fraction": 0.5333234667778015,
"avg_line_length": 29.14285659790039,
"blob_id": "f44c2d329e4f37bea44b85eac5d393a0bb99b4a9",
"content_id": "978395335c16599c61c6a60bb840a0313de84a31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6758,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 224,
"path": "/src/main.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"graph.hpp\"\n#include \"mst_prim.hpp\"\n#include \"mst_boruvka.hpp\"\n#include \"mst_kruskal.hpp\"\n#include \"mst_prim_kumar.hpp\"\n#include \"compare_clustering.hpp\"\n\n#include <mpi.h>\n\n#include <vector>\n#include <iostream>\n#include <sstream>\n\nvoid exit_with_help()\n{\n std::printf(\n \"Usage: build/main [options]\\n\"\n \"options:\\n\"\n \"-a : run Prim, Boruvka & Kruskal algorithms on generated graphs\\n\"\n \"-c : run a comparison between MST clustering and k-means\\n\"\n \"-i : run a comparison between Inconsistency clustering and k-means\\n\"\n \"-m : run Prim with MPI\\n\"\n );\n\texit(1);\n}\n\nint main(int argc, char** argv){\n\n bool run_mst_algo = false;\n bool run_mst_clust = false;\n bool run_mst_inc = false;\n bool run_mpi_prim = false;\n\n if (argc <= 1) \n {\n exit_with_help();\n }\n \n\tfor (int i = 1; i<argc; i++)\n\t{\n\t\tif(argv[i][0] != '-') exit_with_help();\n\t\t++i;\n\t\tswitch(argv[i-1][1])\n\t\t{\n\t\t\tcase 'a': run_mst_algo = true; break;\n\t\t\tcase 'c': run_mst_clust = true; break;\n\t\t\tcase 'i': run_mst_inc = true; break;\n case 'm': run_mpi_prim = true; break;\n\t\t\tdefault:\n\t\t\t\tstd::fprintf(stderr,\"unknown option\\n\");\n\t\t\t\texit_with_help();\n\t\t}\n\t}\n\n /*\n * Tasks \n */\n\n // TASKS 1 & 2\n\n if (run_mst_algo) \n {\n // Comparing MST algorithms on Erdos-Rényi graphs\n\n std::vector<int> ER_graph_sizes({4, 8, 100, 250, 500, 1000});\n\n for (int n : ER_graph_sizes) \n {\n Graph gER = Graph(\"data/ERn\" + std::to_string(n) + \"p10.txt\");\n\n std::cout << \"\\nComparing MST algorithms with Erdos-Rényi graphs of size \" << n << \".\" << std::endl;\n std::cout << \"The graph contains \" << gER.get_number_of_edges() << \" edges.\" << std::endl;\n\n PrimAlgorithm primER = PrimAlgorithm(&gER);\n primER.compute_mst();\n\n BoruvkaAlgorithm boruvkaER = BoruvkaAlgorithm(&gER);\n boruvkaER.compute_mst();\n\n KruskalAlgorithm kruskalER = KruskalAlgorithm(&gER);\n kruskalER.compute_mst();\n }\n\n // Comparing MST algorithms on Barabasi-Albert graphs\n\n std::vector<int> BA_graph_sizes({100, 250, 500, 1000});\n\n for (int n : BA_graph_sizes) \n {\n Graph gBA = Graph(\"data/BAn\" + std::to_string(n) + \"mo20m2.txt\");\n\n std::cout << \"\\nComparing MST algorithms with Barabasi-Albert graphs of size \" << n << \".\" << std::endl;\n std::cout << \"The graph contains \" << gBA.get_number_of_edges() << \" edges.\" << std::endl;\n\n PrimAlgorithm primBA = PrimAlgorithm(&gBA);\n primBA.compute_mst();\n\n BoruvkaAlgorithm boruvkaBA = BoruvkaAlgorithm(&gBA);\n boruvkaBA.compute_mst();\n\n KruskalAlgorithm kruskalBA = KruskalAlgorithm(&gBA);\n kruskalBA.compute_mst();\n }\n }\n\n if (run_mpi_prim) \n {\n // Comparing MST algorithms on Erdos-Rényi graphs\n\n std::vector<int> ER_graph_sizes({4, 8, 100, 250, 500, 1000});\n \n MPI_Init(NULL, NULL);\n\n int world_size;\n MPI_Comm_size(MPI_COMM_WORLD, &world_size);\n\n int world_rank;\n MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);\n\n \n \n\n for (int n : ER_graph_sizes) \n {\n Graph gER = Graph(\"data/ERn\" + std::to_string(n) + \"p10.txt\");\n\n std::cout << \"\\nComparing MST algorithms with Erdos-Rényi graphs of size \" << n << \".\" << std::endl;\n std::cout << \"The graph contains \" << gER.get_number_of_edges() << \" edges.\" << std::endl;\n\n PrimKumarAlgorithm primER = PrimKumarAlgorithm(&gER, world_rank, world_size);\n primER.compute_mst();\n\n\n }\n\n // Comparing MST algorithms on Barabasi-Albert graphs\n\n std::vector<int> BA_graph_sizes({100, 250, 500, 1000});\n\n for (int n : BA_graph_sizes) \n {\n Graph gBA = Graph(\"data/BAn\" + std::to_string(n) + \"mo20m2.txt\");\n\n std::cout << \"\\nComparing MST algorithms with Barabasi-Albert graphs of size \" << n << \".\" << std::endl;\n std::cout << \"The graph contains \" << gBA.get_number_of_edges() << \" edges.\" << std::endl;\n\n PrimKumarAlgorithm primBA = PrimKumarAlgorithm(&gBA, world_rank, world_size);\n primBA.compute_mst();\n }\n \n MPI_Finalize();\n }\n\n \n // TASK 5\n\n if (run_mst_clust) \n {\n // Comparing MST clustering and k-means on Walmart dataset \n\n std::vector<int> walmart_graph_sizes({100, 250, 500, 1000});\n std::vector<int> nb_clusters({3, 4, 5, 6, 7, 8});\n\n for (int n : walmart_graph_sizes)\n {\n for (int k : nb_clusters)\n {\n std::cout << \"\\nComparing MST clustering and k-means on graph of size \" << n;\n std::cout << \" and with \" << k << \" clusters\" << std::endl;\n\n compare_clustering(\"data/scaled_n\" + std::to_string(n) + \"_walmart_features.txt\", k);\n }\n }\n }\n\n // TASK 6 \n\n if (run_mst_inc)\n {\n // with Erdos-Rényi graphs \n\n std::cout << \"\\nComparing different cutoffs on Erdos-Rényi graphs\" << std::endl;\n std::vector<double> ER_cutoffs({1, 5, 10, 50, 100, 500, 1000});\n\n for (double c : ER_cutoffs) {\n Graph g = Graph(\"data/ERn1000p10.txt\");\n KruskalAlgorithm k = KruskalAlgorithm(&g);\n\n std::unordered_map<Node*, Node*> clusters = k.compute_clustering(c);\n\n std::unordered_map<Node*, int> clusters_size;\n for (auto kv : clusters) {\n if(clusters_size.find(kv.second) != clusters_size.end()) {\n clusters_size.at(kv.second) += 1;\n } else {\n clusters_size.insert(std::pair<Node*, int>(kv.second, 1));\n }\n }\n std::cout << \"In total, there are : \" << clusters_size.size() << \" clusters when cutoff = \" << c << std::endl;\n\n int big_clusters = 0;\n std::cout << \"The cluster sizes above 1 are the following : \";\n for (auto kv : clusters_size) {\n if (kv.second > 1) {\n std::cout << kv.second << \" \";\n big_clusters += 1;\n }\n }\n std::cout << std::endl;\n std::cout << \"There are \" << big_clusters << \" clusters of size above 1.\" << std::endl;\n }\n\n // with Walmart dataset\n\n std::cout << \"\\nComparing different cutoffs on Walmart dataset\" << std::endl;\n std::vector<double> walmart_cutoffs({1, 5, 10, 50, 100, 500, 1000});\n\n for (double c : walmart_cutoffs) {\n compare_inconsistency(\"data/scaled_n1000_walmart_features.txt\", c);\n }\n }\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7264957427978516,
"alphanum_fraction": 0.7264957427978516,
"avg_line_length": 14.666666984558105,
"blob_id": "403b110d4d81ddd6acd71992e3854eedc6531f98",
"content_id": "b479693f448cebac46f218608d345e0d0dec3e78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 234,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 15,
"path": "/src/mst_kruskal.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include \"mst_algorithm.hpp\"\n#include \"union_find.hpp\"\n\n#include <iostream>\n#include <queue>\n\nclass KruskalAlgorithm : public MSTAlgorithm\n{\npublic:\n KruskalAlgorithm(Graph* graph);\n\n virtual void compute_mst();\n};"
},
{
"alpha_fraction": 0.6365914940834045,
"alphanum_fraction": 0.6365914940834045,
"avg_line_length": 18.047618865966797,
"blob_id": "2c8258caa988d06bee3026d345164a28f60c4df7",
"content_id": "315564a6357b02945d024c6a9c22b6d0b2db4b2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 21,
"path": "/src/mst_prim_kumar.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"mst_algorithm.hpp\"\n\n#include <set>\n#include <cfloat>\n#include <limits>\n#include <stdexcept>\n\nclass PrimKumarAlgorithm : public MSTAlgorithm\n{\n private:\n int world_size;\n int rank;\n\n void compute_mst_slave();\n void compute_mst_master();\n\n public:\n PrimKumarAlgorithm(Graph* graph, int rank_, int world_size_);\n\n virtual void compute_mst();\n};"
},
{
"alpha_fraction": 0.5610753893852234,
"alphanum_fraction": 0.5713033080101013,
"avg_line_length": 22.445205688476562,
"blob_id": "7a8094179c701526e762fed9d8143ed529cc0ca6",
"content_id": "8a755793f9b51c81399179322a05bbea5f6963da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3422,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 146,
"path": "/src/union_find.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"union_find.hpp\"\n\n// Constructor(s) / Destructor(s)\n\nUnionFind::UnionFind(std::vector<Node*> nodes) \n{\n /*!\n * @brief Builds the Union-Find data structure from a vector\n * of nodes\n * \n * @param nodes Nodes of the graph\n * \n */\n\n this->num_classes = nodes.size();\n for(Node* n : nodes) {\n this->parent.insert(std::pair<Node*, Node*>(n, n));\n this->rank.insert(std::pair<Node*, int>(n, 1));\n this->representatives.insert(n);\n }\n}\n\nUnionFind::~UnionFind() {} \n\n// Getters\n\nNode* UnionFind::get_parent(Node* node) \n{\n /*!\n * @brief Returns the parent of \\p node in the data structure, if it exists\n * \n * @param node Node of which we want the parent\n * \n * @return NULL if the node is not part of the data structure, otherwise \n * its parent\n * \n */\n\n if(this->parent.find(node) == this->parent.end()) {\n return NULL;\n }\n return this->parent.at(node);\n}\n\nint UnionFind::get_rank(Node* node)\n{\n /*!\n * @brief Returns the rank of \\p node in the data structure, if it exists\n * \n * @param node Node of which we want the rank\n * \n * @return 0 if the node is not part of the data structure, otherwise \n * its rank\n * \n */\n\n if(this->rank.find(node) == this->rank.end()) {\n return 0;\n }\n return this->rank.at(node);\n}\n\nstd::unordered_set<Node*> UnionFind::get_representatives()\n{\n /*!\n * @brief Returns the representatives of components\n * \n * @return A unordered set containing all the representatives left\n * \n */\n\n return this->representatives;\n}\n\nint UnionFind::get_num_classes()\n{\n /*!\n * @brief Returns the number of classes of this data structure\n *\n * @return num_classes from the data structure\n * \n */\n\n return this->num_classes;\n}\n\n// Union & Find functions with all ameliorations (path compression, num classes, ...)\n\nNode* UnionFind::Find(Node* node)\n{\n /*!\n * @brief For a \\p node that belongs to the data structure, find its \n * representative in the data structure using path compression\n * \n * @param node Node of which we want the reprensentative\n * \n * @return The representative of the initial node\n * \n */\n\n assert((\"This node is not part of the data structure\", this->get_parent(node) != NULL));\n\n Node* parent = this->get_parent(node);\n\n if(node == parent) {\n return parent;\n }\n\n Node* r = this->Find(parent);\n this->parent[node] = r;\n return r;\n}\n\nvoid UnionFind::Union(Node* n1, Node* n2)\n{\n /*!\n * @brief For two nodes \\p n1 and \\p n2 of the data structure, computes \n * the union of their two classes \n * \n * @param n1 First node \n * @param n2 Second node\n * \n */\n\n Node* rep1 = this->Find(n1);\n Node* rep2 = this->Find(n2);\n\n if(rep1 != rep2) {\n this->num_classes -= 1; // proceeding with Union\n\n int r1 = this->get_rank(rep1);\n int r2 = this->get_rank(rep2);\n\n if(r1 > r2) {\n this->parent[rep2] = rep1;\n this->representatives.erase(rep2);\n } else if(r2 > r1) {\n this->parent[rep1] = rep2;\n this->representatives.erase(rep1);\n } else { // arbitrary choice\n this->parent[rep1] = rep2;\n this->representatives.erase(rep1);\n this->rank[rep2] += 1;\n }\n }\n}"
},
{
"alpha_fraction": 0.5405364036560059,
"alphanum_fraction": 0.550899088382721,
"avg_line_length": 25.67479705810547,
"blob_id": "4ef2926f948f9dcf1c4261ce3115d442faed09d2",
"content_id": "0c7910c8ca3c0420e1e83b3fd9241bfacdeab22e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6562,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 246,
"path": "/src/mst_algorithm.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"mst_algorithm.hpp\"\n\nMSTAlgorithm::MSTAlgorithm(Graph* graph)\n{\n /*!\n * @brief Constructor of a MST Algorithm\n * \n * @param graph A pointer towards the graph of which we want to compute the MST\n * \n */\n\n this->initial_graph = graph;\n this->mst_weight = 0.0;\n this->treated = false;\n}\n\nGraph* MSTAlgorithm::get_initial_graph()\n{\n /*!\n * @brief Gets the (protected) pointer of the initial graph\n * \n * @return Initial graph's pointer\n * \n */\n\n return this->initial_graph;\n}\n\nGraph* MSTAlgorithm::get_mst_graph()\n{\n /*!\n * @brief Gets the (protected) pointer of the MST, if it has been computed\n * \n * @return MST's pointer\n * \n */\n\n return (this->is_treated()) ? &this->mst_graph : NULL;\n}\n\ndouble MSTAlgorithm::get_mst_weight()\n{\n /*!\n * @brief Gets the (protected) pointer of the MST weight, if it has been computed\n * \n * @return 0.0 if the MST has not been computed, otherwise its total weight\n * \n */\n\n return this->mst_weight;\n}\n\nbool MSTAlgorithm::is_treated() \n{\n /*!\n * @brief Gets the boolean value checking whether if the MST has been computed\n * \n * @return Value of treated\n * \n */\n\n return this->treated;\n}\n\nvoid MSTAlgorithm::treatment_done()\n{\n /*!\n * @brief Sets the boolean value of treated to true\n * \n */\n\n this->treated = true;\n}\n\nstd::unordered_map<Node*, Node*> MSTAlgorithm::compute_clustering(int k)\n{\n /*!\n * @brief Creates a mapping between nodes of the MST (and initial) graph \n * after it is built, and their representatives where we have eliminated \n * the k-1 most expansive edges from the MST graph. \n * \n * @param k The number of clusters to build \n * \n * @return A mapping between nodes of the graph(s) and their representatives \n * after removing k-1 edges\n * \n */\n\n if (!this->treated) {\n this->compute_mst();\n }\n\n int n = this->mst_graph.get_number_of_nodes();\n assert((\"There are less points than clusters!\", k <= n));\n\n auto cmp = [](const Edge* e1, const Edge* e2) \n { \n double w1 = e1->weight;\n double w2 = e2->weight;\n\n if (w1 < w2) {\n return false;\n } else if (w1 > w2) {\n return true;\n } else { // Allows multiple keys for same weight values\n intptr_t p1 = reinterpret_cast<intptr_t>(e1);\n intptr_t p2 = reinterpret_cast<intptr_t>(e2);\n\n return p1 < p2;\n }\n };\n\n // builds an ordered queue of the edges of the MST graph\n std::unordered_set<Edge*> mst_edges = this->mst_graph.get_edges();\n\n std::priority_queue<Edge*, std::vector<Edge*>, decltype(cmp)> pq(cmp);\n\n for (Edge* e : mst_edges) {\n pq.push(e);\n }\n\n // selects only the E-(k-1) lightest edges of the MST graph\n std::vector<Node*> nodes = this->mst_graph.get_nodes();\n UnionFind uf = UnionFind(nodes);\n\n int nb_edges = (n-1) - (k-1); // MST is a tree \n\n for (int i = 0; i < nb_edges; i ++) {\n Edge* min_edge = pq.top();\n uf.Union(min_edge->p1, min_edge->p2);\n pq.pop();\n }\n\n // builds a map from all the nodes to their representatives \n std::unordered_map<Node*, Node*> clusters;\n\n for (Node* node : nodes) {\n Node* rep = uf.Find(node);\n\n clusters.insert(std::pair<Node*, Node*>(node, rep));\n }\n\n return clusters;\n}\n\nstd::unordered_map<Node*, Node*> MSTAlgorithm::compute_clustering(double cutoff)\n{\n /*!\n * @brief Creates a mapping between nodes of the MST (and initial) graph \n * after it is built, and their representatives where we have eliminated \n * the edges whose weights have a standard deviation larger than cutoff \n * times the average weights of the other edges connected to its nodes\n * \n * @param cutoff The scale to eliminate edges\n * \n * @return A mapping between nodes of the graph(s) and their representatives \n * after removing inconsistent edges\n * \n */\n\n if (!this->treated) {\n this->compute_mst();\n }\n\n auto cmp = [](const Edge* e1, const Edge* e2) \n { \n double w1 = e1->weight;\n double w2 = e2->weight;\n\n if (w1 < w2) {\n return false;\n } else if (w1 > w2) {\n return true;\n } else { // Allows multiple keys for same weight values\n intptr_t p1 = reinterpret_cast<intptr_t>(e1);\n intptr_t p2 = reinterpret_cast<intptr_t>(e2);\n\n return p1 < p2;\n }\n };\n\n // builds an union-find ds and initializes the edges\n std::unordered_set<Edge*> mst_edges = this->mst_graph.get_edges();\n\n std::vector<Node*> nodes = this->mst_graph.get_nodes();\n UnionFind uf = UnionFind(nodes);\n\n // passes through the MST edges and keeps the consistent edges\n \n for (Edge* e : mst_edges) {\n bool add_edge = true;\n\n Node* p1 = e->p1;\n Node* p2 = e->p2;\n\n double avg_weight = 0.0;\n double std_dev = 0.0;\n double cnt = 0.0;\n\n double edge_std_dev = 0.0;\n\n // computes mean weight and std deviation for edges on both sides\n for (Edge* e1 : this->mst_graph.connected_edges(p1)) {\n if (e1 != e) {\n double edge_weight = e1->weight;\n avg_weight += edge_weight;\n std_dev += edge_weight*edge_weight;\n cnt += 1;\n }\n }\n\n for (Edge* e2 : this->mst_graph.connected_edges(p2)) {\n if (e2 != e) {\n double edge_weight = e2->weight;\n avg_weight += edge_weight;\n std_dev += edge_weight*edge_weight;\n cnt += 1;\n }\n }\n\n avg_weight /= cnt;\n std_dev = std::sqrt((std_dev/cnt) - avg_weight*avg_weight);\n\n // discards the considered edge if its std deviation is too large \n edge_std_dev = std::abs(e->weight - avg_weight);\n if (edge_std_dev >= cutoff*std_dev) {\n add_edge = false;\n }\n\n // adds the edge if it is consistent\n if (add_edge == true) {\n uf.Union(p1, p2);\n }\n }\n\n // builds a map from all the nodes to their representatives \n std::unordered_map<Node*, Node*> clusters;\n\n for (Node* node : nodes) {\n Node* rep = uf.Find(node);\n\n clusters.insert(std::pair<Node*, Node*>(node, rep));\n }\n\n return clusters;\n}\n"
},
{
"alpha_fraction": 0.5230942368507385,
"alphanum_fraction": 0.5441231727600098,
"avg_line_length": 24.864078521728516,
"blob_id": "52f1265bdaceffe9b0181de29e1da0d670d5b80d",
"content_id": "4c6892fba91d6f0409d3ac3ffeae88181b9e912b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2663,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 103,
"path": "/test/test_union_find.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"catch.hpp\"\n\n#include \"../src/union_find.hpp\"\n\nTEST_CASE(\"Inizalition tests for Union Find data structure\", \"[uf:init]\")\n{\n Node absent_node = Node(11);\n\n // creating a vector of nodes\n std::vector<Node*> my_nodes;\n for(int i = 0; i < 10; i++) {\n my_nodes.push_back(new Node(i));\n }\n\n UnionFind uf = UnionFind(my_nodes);\n\n SECTION(\"Parent initialization\")\n {\n REQUIRE(uf.get_parent(&absent_node) == NULL);\n\n for (int i = 0; i < 10; i ++) {\n REQUIRE(uf.get_parent(my_nodes[i]) == my_nodes[i]);\n }\n }\n\n SECTION(\"Rank initialization\")\n {\n REQUIRE(uf.get_rank(&absent_node) == 0);\n\n for (int i = 0; i < 10; i ++) {\n REQUIRE(uf.get_rank(my_nodes[i]) == 1);\n }\n }\n\n SECTION(\"Representatives initialization\")\n {\n std::unordered_set<Node*> representatives = uf.get_representatives();\n\n REQUIRE(representatives.find(&absent_node) == representatives.end());\n\n for (int i = 0; i < 10; i ++) {\n REQUIRE(representatives.find(my_nodes[i]) != representatives.end());\n }\n }\n\n SECTION(\"Number of classes initialization\")\n {\n int nb_of_classes = uf.get_num_classes();\n\n REQUIRE(nb_of_classes == 10);\n }\n}\n\nTEST_CASE(\"Basic tests for Union Find data structure manipulation\", \"[uf:base]\")\n{\n // creating a vector of nodes\n std::vector<Node*> my_nodes;\n for(int i = 0; i < 10; i++){\n my_nodes.push_back(new Node(i));\n }\n\n UnionFind uf = UnionFind(my_nodes);\n\n SECTION(\"Find tests\") \n {\n REQUIRE(uf.Find(my_nodes[0]) == my_nodes[0]);\n REQUIRE(uf.Find(my_nodes[3]) != my_nodes[4]);\n REQUIRE(uf.Find(my_nodes[9]) == my_nodes[9]);\n }\n\n SECTION(\"Union tests\")\n {\n uf.Union(my_nodes[0], my_nodes[1]);\n\n REQUIRE(uf.get_num_classes() == 9);\n\n // be careful: here, the union is arbitrary (second node is the parent)\n\n REQUIRE(uf.Find(my_nodes[0]) == my_nodes[1]);\n REQUIRE(uf.Find(my_nodes[1]) == my_nodes[1]);\n\n REQUIRE(uf.get_rank(my_nodes[1]) == 2);\n\n uf.Union(my_nodes[0], my_nodes[2]);\n\n REQUIRE(uf.get_num_classes() == 8);\n\n REQUIRE(uf.Find(my_nodes[2]) == my_nodes[1]);\n \n REQUIRE(uf.get_rank(my_nodes[1]) == 2);\n REQUIRE(uf.get_rank(my_nodes[2]) == 1);\n\n uf.Union(my_nodes[1], my_nodes[2]);\n\n REQUIRE(uf.get_num_classes() == 8);\n\n REQUIRE(uf.Find(my_nodes[1]) == my_nodes[1]);\n REQUIRE(uf.Find(my_nodes[2]) == my_nodes[1]);\n\n REQUIRE(uf.get_rank(my_nodes[1]) == 2);\n REQUIRE(uf.get_rank(my_nodes[2]) == 1);\n }\n}"
},
{
"alpha_fraction": 0.6587859392166138,
"alphanum_fraction": 0.6677316427230835,
"avg_line_length": 53,
"blob_id": "7aa2cf02a2d5c3e886b790888c30a656ff216b05",
"content_id": "b0becf58ac71fae35ed8883d86e2ace78d7d0feb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 243,
"num_lines": 29,
"path": "/test/Makefile",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": ".PHONY: all\n\nOBJDIR = ../build\n\nall: test_point test_node test_edge test_graph test_union_find test_mst\n\nmain.o: main.cpp\n\tg++ -c -std=c++11 -Wall main.cpp\n\ntest_point: test_point.cpp ../build/point.o main.o \n\tg++ -std=c++11 -Wall $(OBJDIR)/point.o main.o test_point.cpp -o test_point.o\n\ntest_node: test_node.cpp ../build/point.o ../build/node.o main.o\n\tg++ -std=c++11 -Wall $(OBJDIR)/point.o $(OBJDIR)/node.o main.o test_node.cpp -o test_node.o\n\ntest_edge: test_edge.cpp ../build/point.o ../build/node.o ../build/edge.o main.o \n\tg++ -std=c++11 -Wall $(OBJDIR)/point.o $(OBJDIR)/node.o $(OBJDIR)/edge.o main.o test_edge.cpp -o test_edge.o\n\ntest_graph: test_graph.cpp ../build/point.o ../build/node.o ../build/edge.o ../build/graph.o main.o\n\tg++ -std=c++11 -Wall $(OBJDIR)/point.o $(OBJDIR)/node.o $(OBJDIR)/edge.o $(OBJDIR)/graph.o main.o test_graph.cpp -o test_graph.o\n\ntest_union_find: test_union_find.cpp ../build/point.o ../build/node.o ../build/union_find.o main.o \n\tg++ -std=c++11 -Wall $(OBJDIR)/point.o $(OBJDIR)/node.o $(OBJDIR)/union_find.o main.o test_union_find.cpp -o test_union_find.o\n\ntest_mst: test_mst.cpp ../build/point.o ../build/node.o ../build/edge.o ../build/graph.o ../build/union_find.o ../build/mst_algorithm.o ../build/mst_prim.o ../build/mst_boruvka.o ../build/mst_kruskal.o \n\tg++ -std=c++11 -Wall $(OBJDIR)/point.o $(OBJDIR)/node.o $(OBJDIR)/edge.o $(OBJDIR)/graph.o $(OBJDIR)/union_find.o $(OBJDIR)/mst_algorithm.o $(OBJDIR)/mst_prim.o $(OBJDIR)/mst_boruvka.o $(OBJDIR)/mst_kruskal.o main.o test_mst.cpp -o test_mst.o\n\nclean:\n\trm -f *.o"
},
{
"alpha_fraction": 0.5635673403739929,
"alphanum_fraction": 0.5635673403739929,
"avg_line_length": 15.5,
"blob_id": "335bd856faaef23ff66163c575ccdac983dc312c",
"content_id": "40096524b65302bc96c36e50dd6a36047e05d4c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 527,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 32,
"path": "/src/point.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once \n\n#include <iostream>\n#include <cmath>\n#include <vector>\n#include <cassert>\n#include <fstream>\n#include <sstream>\n\nclass Point\n{\n /*!\n * @class Describes a Point with static dimension\n * \n */\n\n public:\n\n static int d;\n double *coords;\n int label; // for k-means\n\n Point();\n ~Point();\n\n int get_dim();\n void print();\n double distance(Point* other);\n\n static std::vector<Point*> read_points_from_file(std::string file_path, int d);\n\n};"
},
{
"alpha_fraction": 0.44680851697921753,
"alphanum_fraction": 0.49061325192451477,
"avg_line_length": 14.384614944458008,
"blob_id": "6797ee7313923272a9cffc5d96eaf584b68fb45f",
"content_id": "aab990e4dffaeff7b01ef3e6cd90ac23f94afe81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 799,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 52,
"path": "/test/test_point.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"catch.hpp\"\n\n#include \"../src/point.hpp\"\n\nTEST_CASE(\"Simple point initialization\", \"[point:init]\")\n{\n Point::d = 2;\n\n SECTION(\"Empty point\")\n {\n Point p;\n\n REQUIRE(p.label == 0);\n REQUIRE(p.d == 2);\n }\n\n SECTION(\"Complete point\")\n {\n Point p;\n\n p.coords[0] = 9;\n\n REQUIRE(p.coords[0] == 9);\n REQUIRE(p.coords[1] == 0);\n }\n\n SECTION(\"Dimension getter\")\n {\n Point p;\n\n int d = p.get_dim();\n\n REQUIRE(d == 2);\n }\n}\n\nTEST_CASE(\"Computation of distances between points\", \"[point:dist]\")\n{\n Point::d = 2;\n\n Point p1;\n p1.coords[0] = 0;\n p1.coords[1] = 2;\n\n Point p2;\n p2.coords[0] = 2;\n p2.coords[1] = 0;\n\n double d = p1.distance(&p2);\n\n REQUIRE((d - 2.828) < 0.001);\n}"
},
{
"alpha_fraction": 0.599211573600769,
"alphanum_fraction": 0.599211573600769,
"avg_line_length": 17.560976028442383,
"blob_id": "2d7bc8c5680d565f047f6b45005ce1396b44924e",
"content_id": "c965c3521adc9d29aaae95e82a3fe32454247bda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 761,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 41,
"path": "/src/node.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include \"point.hpp\"\n\n#include <vector>\n#include <cmath>\n#include <unordered_map>\n#include <unordered_set>\n#include <cfloat>\n\ntypedef int node_label_t;\ntypedef int node_id_t;\n\nclass Node{\n\n /*!\n * @class Describes a Node of the Graph\n * \n */\n private:\n node_id_t id;\n\n protected:\n Point* p;\n\n public:\n node_label_t label;\n\n Node(node_label_t label);\n Node(node_label_t label, Point* p);\n\n Point* get_point();\n void set_point(Point* p);\n double dist(Node* other);\n\n void set_id(node_id_t id_);\n node_id_t get_id();\n};\n\ndouble intracluster_variance(std::unordered_map<Node*, Node*> clusters);\ndouble silhouette(std::unordered_map<Node*, Node*> clusters);\n"
},
{
"alpha_fraction": 0.5288549065589905,
"alphanum_fraction": 0.5671767592430115,
"avg_line_length": 51.10472869873047,
"blob_id": "b92fc591f295a7cf577235b742782b8c8612054b",
"content_id": "fa8d7f3182623bc7e1a954b74bddfb26124971a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 15448,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 296,
"path": "/test/test_mst.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"catch.hpp\"\n\n#include \"../src/mst_algorithm.hpp\"\n#include \"../src/mst_prim.hpp\"\n#include \"../src/mst_boruvka.hpp\"\n#include \"../src/mst_kruskal.hpp\"\n\nTEST_CASE(\"First test for MST construction\")\n{\n std::vector<Node*> my_nodes;\n for (int i = 1; i <= 7; i ++) {\n my_nodes.push_back(new Node(i));\n }\n\n std::vector<Edge*> my_edges;\n my_edges.push_back(new Edge(my_nodes[0], my_nodes[2], 6)); // Edge n°0\n my_edges.push_back(new Edge(my_nodes[0], my_nodes[3], 1)); // Edge n°1\n my_edges.push_back(new Edge(my_nodes[1], my_nodes[2], 6)); // Edge n°2\n my_edges.push_back(new Edge(my_nodes[1], my_nodes[4], 8)); // Edge n°3\n my_edges.push_back(new Edge(my_nodes[1], my_nodes[5], 4)); // Edge n°4\n my_edges.push_back(new Edge(my_nodes[2], my_nodes[3], 9)); // Edge n°5\n my_edges.push_back(new Edge(my_nodes[2], my_nodes[5], 2)); // Edge n°6\n my_edges.push_back(new Edge(my_nodes[3], my_nodes[4], 3)); // Edge n°7\n my_edges.push_back(new Edge(my_nodes[3], my_nodes[5], 3)); // Edge n°8\n my_edges.push_back(new Edge(my_nodes[3], my_nodes[6], 3)); // Edge n°9\n my_edges.push_back(new Edge(my_nodes[4], my_nodes[5], 5)); // Edge n°10\n my_edges.push_back(new Edge(my_nodes[4], my_nodes[6], 2)); // Edge n°11\n int E = my_edges.size();\n\n Graph g;\n // we add all edges, since every node has an edge they will all be in the graph\n for (int i = 0; i < E; i ++) {\n g.add_edge(my_edges[i]); \n }\n\n SECTION(\"Prim's algorithm\")\n {\n PrimAlgorithm prim = PrimAlgorithm(&g);\n prim.compute_mst();\n\n REQUIRE(std::abs(prim.get_mst_graph()->total_weight() - 15) < 0.001);\n\n std::unordered_set<Edge*> mst_edges = prim.get_mst_graph()->get_edges();\n\n REQUIRE(mst_edges.find(my_edges[0]) == mst_edges.end()); // Edge 1<-6->2\n REQUIRE(mst_edges.find(my_edges[1]) != mst_edges.end()); // Edge 1<-1->3\n REQUIRE(mst_edges.find(my_edges[2]) == mst_edges.end()); // Edge 2<-6->3\n REQUIRE(mst_edges.find(my_edges[3]) == mst_edges.end()); // Edge 2<-8->5\n REQUIRE(mst_edges.find(my_edges[4]) != mst_edges.end()); // Edge 2<-4->6\n REQUIRE(mst_edges.find(my_edges[5]) == mst_edges.end()); // Edge 3<-9->4\n REQUIRE(mst_edges.find(my_edges[6]) != mst_edges.end()); // Edge 3<-2->6\n // REQUIRE(mst_edges.find(my_edges[7]) == mst_edges.end()); \n REQUIRE(mst_edges.find(my_edges[8]) != mst_edges.end()); // Edge 4<-3->6\n // REQUIRE(mst_edges.find(my_edges[9]) != mst_edges.end()); \n REQUIRE(mst_edges.find(my_edges[10]) == mst_edges.end()); // Edge 5<-5->6\n REQUIRE(mst_edges.find(my_edges[11]) != mst_edges.end()); // Edge 5<-2->7\n\n // For Prim on this graph, there are two possibilities, given that the graph \n // is initialized with a random Node, either the fourth Node will be connected \n // to 5th or 7th, but 5th and 7th are always connected\n\n bool needed_edge = mst_edges.find(my_edges[7]) != mst_edges.end() || mst_edges.find(my_edges[9]) != mst_edges.end();\n bool limit_edge = mst_edges.find(my_edges[7]) == mst_edges.end() || mst_edges.find(my_edges[9]) == mst_edges.end();\n REQUIRE(limit_edge == true); \n REQUIRE(needed_edge == true); \n }\n\n SECTION(\"Boruvka's algorithm\")\n {\n BoruvkaAlgorithm boruvka = BoruvkaAlgorithm(&g);\n boruvka.compute_mst();\n\n REQUIRE(std::abs(boruvka.get_mst_graph()->total_weight() - 15) < 0.001);\n\n std::unordered_set<Edge*> mst_edges = boruvka.get_mst_graph()->get_edges();\n\n REQUIRE(mst_edges.find(my_edges[0]) == mst_edges.end()); // Edge 1<-6->2\n REQUIRE(mst_edges.find(my_edges[1]) != mst_edges.end()); // Edge 1<-1->3\n REQUIRE(mst_edges.find(my_edges[2]) == mst_edges.end()); // Edge 2<-6->3\n REQUIRE(mst_edges.find(my_edges[3]) == mst_edges.end()); // Edge 2<-8->5\n REQUIRE(mst_edges.find(my_edges[4]) != mst_edges.end()); // Edge 2<-4->6\n REQUIRE(mst_edges.find(my_edges[5]) == mst_edges.end()); // Edge 3<-9->4\n REQUIRE(mst_edges.find(my_edges[6]) != mst_edges.end()); // Edge 3<-2->6\n // REQUIRE(mst_edges.find(my_edges[7]) == mst_edges.end()); \n REQUIRE(mst_edges.find(my_edges[8]) != mst_edges.end()); // Edge 4<-3->6\n // REQUIRE(mst_edges.find(my_edges[9]) != mst_edges.end()); \n REQUIRE(mst_edges.find(my_edges[10]) == mst_edges.end()); // Edge 5<-5->6\n REQUIRE(mst_edges.find(my_edges[11]) != mst_edges.end()); // Edge 5<-2->7\n\n // For Boruvka on this graph, there are two possibilities, given that the connected\n // components will at one step have two edges with weight 3 that link them, either \n // the fourth Node will be connected to 5th or 7th, but 5th and 7th are always connected\n\n bool needed_edge = mst_edges.find(my_edges[7]) != mst_edges.end() || mst_edges.find(my_edges[9]) != mst_edges.end();\n bool limit_edge = mst_edges.find(my_edges[7]) == mst_edges.end() || mst_edges.find(my_edges[9]) == mst_edges.end();\n REQUIRE(limit_edge == true); \n REQUIRE(needed_edge == true); \n }\n\n SECTION(\"Kruskal's algorithm\")\n {\n KruskalAlgorithm kruskal = KruskalAlgorithm(&g);\n kruskal.compute_mst();\n\n SECTION(\"MST computed with Kruskal's algorithm\")\n {\n REQUIRE(std::abs(kruskal.get_mst_graph()->total_weight() - 15) < 0.001);\n\n std::unordered_set<Edge*> mst_edges = kruskal.get_mst_graph()->get_edges();\n\n REQUIRE(mst_edges.find(my_edges[0]) == mst_edges.end()); // Edge 1<-6->2\n REQUIRE(mst_edges.find(my_edges[1]) != mst_edges.end()); // Edge 1<-1->3\n REQUIRE(mst_edges.find(my_edges[2]) == mst_edges.end()); // Edge 2<-6->3\n REQUIRE(mst_edges.find(my_edges[3]) == mst_edges.end()); // Edge 2<-8->5\n REQUIRE(mst_edges.find(my_edges[4]) != mst_edges.end()); // Edge 2<-4->6\n REQUIRE(mst_edges.find(my_edges[5]) == mst_edges.end()); // Edge 3<-9->4\n REQUIRE(mst_edges.find(my_edges[6]) != mst_edges.end()); // Edge 3<-2->6\n // REQUIRE(mst_edges.find(my_edges[7]) == mst_edges.end()); \n REQUIRE(mst_edges.find(my_edges[8]) != mst_edges.end()); // Edge 4<-3->6\n // REQUIRE(mst_edges.find(my_edges[9]) != mst_edges.end()); \n REQUIRE(mst_edges.find(my_edges[10]) == mst_edges.end()); // Edge 5<-5->6\n REQUIRE(mst_edges.find(my_edges[11]) != mst_edges.end()); // Edge 5<-2->7\n\n // For Kruskal on this graph, there are two possibilities, because two edges\n // have the same weight and can lead to two different MST, in which the fourth \n // node will be connected to either the 5th or 7th, but 5th and 7th are always connected\n\n bool needed_edge = mst_edges.find(my_edges[7]) != mst_edges.end() || mst_edges.find(my_edges[9]) != mst_edges.end();\n bool limit_edge = mst_edges.find(my_edges[7]) == mst_edges.end() || mst_edges.find(my_edges[9]) == mst_edges.end();\n REQUIRE(limit_edge == true); \n REQUIRE(needed_edge == true); \n }\n\n SECTION(\"Clustering using Kruskal's MST\")\n {\n int k = 2;\n std::unordered_map<Node*, Node*> clustering_k2 = kruskal.compute_clustering(k);\n\n // The heaviest weight in the MST has to be 2<-4->6, thus we will have two groups of nodes\n REQUIRE(clustering_k2.at(my_nodes[1]) == my_nodes[1]);\n\n REQUIRE(clustering_k2.at(my_nodes[0]) != my_nodes[1]);\n REQUIRE(clustering_k2.at(my_nodes[5]) != my_nodes[1]);\n REQUIRE(clustering_k2.at(my_nodes[4]) != my_nodes[1]);\n\n k = 4;\n std::unordered_map<Node*, Node*> clustering_k4 = kruskal.compute_clustering(k);\n\n // The two heaviest edges in the MST are 2<-4->6 & 4<-4->6\n REQUIRE(clustering_k4.at(my_nodes[1]) == my_nodes[1]);\n\n REQUIRE(clustering_k4.at(my_nodes[2]) == clustering_k4.at(my_nodes[5]));\n REQUIRE(clustering_k4.at(my_nodes[2]) != my_nodes[0]);\n\n REQUIRE(clustering_k4.at(my_nodes[0]) == clustering_k4.at(my_nodes[3]));\n\n REQUIRE(clustering_k4.at(my_nodes[4]) == clustering_k4.at(my_nodes[6]));\n }\n }\n}\n\nTEST_CASE(\"Second test for MST construction\")\n{\n std::vector<Node*> my_nodes;\n for (int i = 0; i <= 8; i ++) {\n my_nodes.push_back(new Node(i));\n }\n\n std::vector<Edge*> my_edges;\n my_edges.push_back(new Edge(my_nodes[0], my_nodes[1], 4)); // Edge n°0\n my_edges.push_back(new Edge(my_nodes[0], my_nodes[7], 9)); // Edge n°1\n my_edges.push_back(new Edge(my_nodes[1], my_nodes[2], 8)); // Edge n°2\n my_edges.push_back(new Edge(my_nodes[1], my_nodes[7], 11)); // Edge n°3\n my_edges.push_back(new Edge(my_nodes[2], my_nodes[3], 7)); // Edge n°4\n my_edges.push_back(new Edge(my_nodes[2], my_nodes[5], 4)); // Edge n°5\n my_edges.push_back(new Edge(my_nodes[2], my_nodes[8], 2)); // Edge n°6\n my_edges.push_back(new Edge(my_nodes[3], my_nodes[4], 9)); // Edge n°7\n my_edges.push_back(new Edge(my_nodes[3], my_nodes[5], 14)); // Edge n°8\n my_edges.push_back(new Edge(my_nodes[4], my_nodes[5], 10)); // Edge n°9\n my_edges.push_back(new Edge(my_nodes[5], my_nodes[6], 2)); // Edge n°10\n my_edges.push_back(new Edge(my_nodes[6], my_nodes[7], 1)); // Edge n°11\n my_edges.push_back(new Edge(my_nodes[6], my_nodes[8], 6)); // Edge n°12\n my_edges.push_back(new Edge(my_nodes[7], my_nodes[8], 7)); // Edge n°13\n int E = my_edges.size();\n\n Graph g;\n // we add all edges, since every node has an edge they will all be in the graph\n for (int i = 0; i < E; i ++) {\n g.add_edge(my_edges[i]);\n }\n\n SECTION(\"Prim's algorithm\")\n {\n PrimAlgorithm prim = PrimAlgorithm(&g);\n prim.compute_mst();\n\n REQUIRE(std::abs(prim.get_mst_graph()->total_weight() - 37) < 0.001);\n\n std::unordered_set<Edge*> mst_edges = prim.get_mst_graph()->get_edges();\n\n REQUIRE(mst_edges.find(my_edges[0]) != mst_edges.end()); // Edge 0<-4->1\n REQUIRE(mst_edges.find(my_edges[1]) == mst_edges.end()); // Edge 0<-9->7\n REQUIRE(mst_edges.find(my_edges[2]) != mst_edges.end()); // Edge 1<-8->2\n REQUIRE(mst_edges.find(my_edges[3]) == mst_edges.end()); // Edge 1<-11->7\n REQUIRE(mst_edges.find(my_edges[4]) != mst_edges.end()); // Edge 2<-7->3\n REQUIRE(mst_edges.find(my_edges[5]) != mst_edges.end()); // Edge 2<-4->5\n REQUIRE(mst_edges.find(my_edges[6]) != mst_edges.end()); // Edge 2<-2->8\n REQUIRE(mst_edges.find(my_edges[7]) != mst_edges.end()); // Edge 3<-9->4\n REQUIRE(mst_edges.find(my_edges[8]) == mst_edges.end()); // Edge 3<-14->5\n REQUIRE(mst_edges.find(my_edges[9]) == mst_edges.end()); // Edge 4<-10->5\n REQUIRE(mst_edges.find(my_edges[10]) != mst_edges.end()); // Edge 5<-2->6\n REQUIRE(mst_edges.find(my_edges[11]) != mst_edges.end()); // Edge 6<-1->7\n REQUIRE(mst_edges.find(my_edges[12]) == mst_edges.end()); // Edge 6<-6->8\n REQUIRE(mst_edges.find(my_edges[13]) == mst_edges.end()); // Edge 7<-7->8\n }\n\n SECTION(\"Boruvka's algorithm\")\n {\n BoruvkaAlgorithm boruvka = BoruvkaAlgorithm(&g);\n boruvka.compute_mst();\n\n REQUIRE(std::abs(boruvka.get_mst_graph()->total_weight() - 37) < 0.001);\n\n std::unordered_set<Edge*> mst_edges = boruvka.get_mst_graph()->get_edges();\n\n REQUIRE(mst_edges.find(my_edges[0]) != mst_edges.end()); // Edge 0<-4->1\n REQUIRE(mst_edges.find(my_edges[1]) == mst_edges.end()); // Edge 0<-9->7\n REQUIRE(mst_edges.find(my_edges[2]) != mst_edges.end()); // Edge 1<-8->2\n REQUIRE(mst_edges.find(my_edges[3]) == mst_edges.end()); // Edge 1<-11->7\n REQUIRE(mst_edges.find(my_edges[4]) != mst_edges.end()); // Edge 2<-7->3\n REQUIRE(mst_edges.find(my_edges[5]) != mst_edges.end()); // Edge 2<-4->5\n REQUIRE(mst_edges.find(my_edges[6]) != mst_edges.end()); // Edge 2<-2->8\n REQUIRE(mst_edges.find(my_edges[7]) != mst_edges.end()); // Edge 3<-9->4\n REQUIRE(mst_edges.find(my_edges[8]) == mst_edges.end()); // Edge 3<-14->5\n REQUIRE(mst_edges.find(my_edges[9]) == mst_edges.end()); // Edge 4<-10->5\n REQUIRE(mst_edges.find(my_edges[10]) != mst_edges.end()); // Edge 5<-2->6\n REQUIRE(mst_edges.find(my_edges[11]) != mst_edges.end()); // Edge 6<-1->7\n REQUIRE(mst_edges.find(my_edges[12]) == mst_edges.end()); // Edge 6<-6->8\n REQUIRE(mst_edges.find(my_edges[13]) == mst_edges.end()); // Edge 7<-7->8\n }\n\n SECTION(\"Kruskal's algorithm\")\n {\n KruskalAlgorithm kruskal = KruskalAlgorithm(&g);\n kruskal.compute_mst();\n\n SECTION(\"MST computed with Kruskal's algorithm\")\n {\n REQUIRE(std::abs(kruskal.get_mst_graph()->total_weight() - 37) < 0.001);\n\n std::unordered_set<Edge*> mst_edges = kruskal.get_mst_graph()->get_edges();\n\n REQUIRE(mst_edges.find(my_edges[0]) != mst_edges.end()); // Edge 0<-4->1\n REQUIRE(mst_edges.find(my_edges[1]) == mst_edges.end()); // Edge 0<-9->7\n REQUIRE(mst_edges.find(my_edges[2]) != mst_edges.end()); // Edge 1<-8->2\n REQUIRE(mst_edges.find(my_edges[3]) == mst_edges.end()); // Edge 1<-11->7\n REQUIRE(mst_edges.find(my_edges[4]) != mst_edges.end()); // Edge 2<-7->3\n REQUIRE(mst_edges.find(my_edges[5]) != mst_edges.end()); // Edge 2<-4->5\n REQUIRE(mst_edges.find(my_edges[6]) != mst_edges.end()); // Edge 2<-2->8\n REQUIRE(mst_edges.find(my_edges[7]) != mst_edges.end()); // Edge 3<-9->4\n REQUIRE(mst_edges.find(my_edges[8]) == mst_edges.end()); // Edge 3<-14->5\n REQUIRE(mst_edges.find(my_edges[9]) == mst_edges.end()); // Edge 4<-10->5\n REQUIRE(mst_edges.find(my_edges[10]) != mst_edges.end()); // Edge 5<-2->6\n REQUIRE(mst_edges.find(my_edges[11]) != mst_edges.end()); // Edge 6<-1->7\n REQUIRE(mst_edges.find(my_edges[12]) == mst_edges.end()); // Edge 6<-6->8\n REQUIRE(mst_edges.find(my_edges[13]) == mst_edges.end()); // Edge 7<-7->8\n }\n\n SECTION(\"Clustering using Kruskal's MST\")\n {\n int k = 2;\n std::unordered_map<Node*, Node*> clustering_k2 = kruskal.compute_clustering(k);\n\n // The heaviest edge in this MST is 3<-9->4\n REQUIRE(clustering_k2.at(my_nodes[4]) == my_nodes[4]);\n REQUIRE(clustering_k2.at(my_nodes[4]) != my_nodes[7]);\n\n REQUIRE(clustering_k2.at(my_nodes[0]) == my_nodes[7]);\n REQUIRE(clustering_k2.at(my_nodes[3]) == my_nodes[7]);\n REQUIRE(clustering_k2.at(my_nodes[7]) == my_nodes[7]);\n\n k = 3;\n std::unordered_map<Node*, Node*> clustering_k3 = kruskal.compute_clustering(k);\n\n // The two heaviest edges in this MST are 3<-9->4 & 1<-8->2\n REQUIRE(clustering_k3.at(my_nodes[4]) == my_nodes[4]);\n REQUIRE(clustering_k3.at(my_nodes[4]) != my_nodes[7]);\n\n REQUIRE(clustering_k3.at(my_nodes[0]) == clustering_k3.at(my_nodes[1]));\n REQUIRE(clustering_k3.at(my_nodes[0]) != my_nodes[7]);\n \n REQUIRE(clustering_k3.at(my_nodes[3]) == my_nodes[7]);\n REQUIRE(clustering_k3.at(my_nodes[7]) == my_nodes[7]);\n }\n }\n}"
},
{
"alpha_fraction": 0.5678688287734985,
"alphanum_fraction": 0.5862295031547546,
"avg_line_length": 24.399999618530273,
"blob_id": "09ac295f43ae673b92e913034844dabb56305589",
"content_id": "684510e0c45c51a3c2b392ebe2b48f29816c18c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1544,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 60,
"path": "/src/edge.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"edge.hpp\"\n\n#include <sstream>\n\nEdge::Edge(Node* p1, Node* p2){\n /*!\n * @brief Constructeur de l'arrête\n * \n * @param p1 Le premier noeud constituant l'arrête\n * @param p2 Le second noeud constituant l'arrête\n * \n * @note Quand aucun poids n'est spécifié et que les noeuds ne portent pas d'information, \n * on considère qu'il vaut 1. \n * \n * @note Voir \\ref Edge::Edge(Node* p1, Node* p2, edge_weight_t weight) pour d'autres remarques.\n * \n */\n\n this->p1 = p1;\n this->p2 = p2;\n this->weight = p1->dist(p2);\n}\n\nEdge::Edge(Node* p1, Node* p2, edge_weight_t weight){\n /*!\n * @brief Constructeur de l'arrête avec poids donné\n * \n * @param p1 Le premier noeud constituant l'arrête\n * @param p2 Le second noeud constituant l'arrête\n * @param weight Le poids de l'arrête\n * \n * @note Comme on ne considère que des graphes non dirigés, l'ordre de p1 et p2 n'a pas d'importance\n * \n */\n\n this->p1 = p1;\n this->p2 = p2;\n this->weight = weight;\n}\n\nNode* Edge::other_node(Node* p){\n /*!\n * @brief Renvoie le noeud de l'arrête différent de \\p p\n * \n * @param p Le noeud de l'arrête qu'on connaît\n * \n * @return L'autre noeud, ou nullptr si \\p p n'appartient pas à l'arrête\n * \n */\n\n if(this->p1 != p && this->p2 != p)\n return nullptr;\n \n return this->p1 == p ? this->p2 : this->p1;\n}\n\nbool Edge::operator<(Edge const& other) const \n{\n return this->weight < other.weight;\n}\n\n"
},
{
"alpha_fraction": 0.5870910882949829,
"alphanum_fraction": 0.5870910882949829,
"avg_line_length": 18.169490814208984,
"blob_id": "5bba560e29bc8506578150ab4bf096e43152dbd5",
"content_id": "6f4aad0ccc3a7cd8a22403d9822252ba7979ef25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1137,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 59,
"path": "/Makefile",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": ".PHONY: all doc main\n\nCXXFLAGS= -DDEBUG -Wall -Werror\nLDFLAGS=\nCC=mpicxx\n\nOBJDIR=build\nSRCDIR=src\n\nOBJS = $(addprefix $(OBJDIR)/,\\\n\t\t\t\tedge.o \\\n\t\t\t\tpoint.o \\\n\t\t\t\tgraph.o \\\n\t\t\t\tmain.o \\\n\t\t\t\tnode.o \\\n\t\t\t\tmst_algorithm.o \\\n\t\t\t\tmst_prim.o \\\n\t\t\t\tmst_boruvka.o \\\n\t\t\t\tmst_kruskal.o \\\n mst_prim_kumar.o \\\n\t\t\t\tunion_find.o \\\n\t\t\t\tkmeans.o \\\n\t\t\t\tcompare_clustering.o \\\n\t\t\t)\n\n\nmain: $(OBJDIR)/main\n\n\n$(OBJDIR)/main: $(OBJS)\t\n\t$(CC) -g -o $(OBJDIR)/main $(OBJS) $(LDFLAGS)\n\n\n.SECONDEXPANSION:\n$(OBJDIR)/%.o: $(SRCDIR)/%.cpp $$(wildcard $(SRCDIR)/$$*.hpp)\n\tmkdir -p build\n\t$(CC) -g -c $(CFLAGS) $(filter %.cpp,$^) -o $@\n\n\n\nclean:\n\t# Client cleanup\n\trm -rf $(OBJDIR)/*.o\n\trm -rf $(OBJDIR)/main\n\n\t# valgrind cleanup\n\trm -rf *.dSYM\n\nmrproper: clean\n\trm -rf doc/\n\trm -rf build/\n\ndoc:\n\t# On peut avoir des problèmes en cas d'environnement parallèle\n\t# si la numéro de build est mis à jour après l'appel à doc.\n\t#A = $(shell git rev-parse HEAD)\n\t#A += generation date -- \n\t#A += $(shell date +\"%Y-%m-%dT%H:%M:%S%z\")\n\tPROJECT_COMMIT_HASH=\"$(shell echo $$(git rev-parse HEAD) -- gendate $$(date +\"%Y-%m-%dT%H:%M:%S%z\"))\" doxygen Doxyfile\n"
},
{
"alpha_fraction": 0.7675478458404541,
"alphanum_fraction": 0.7695989012718201,
"avg_line_length": 37.49122619628906,
"blob_id": "faee4f626f70e33d97442aa058204368f5d247b7",
"content_id": "d87168267b3ca698bf047cef3e6c9a97af12f1e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4391,
"license_type": "no_license",
"max_line_length": 264,
"num_lines": 114,
"path": "/README.md",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "# Minimum Spanning Trees and Clustering \n\nIn this project, we implement different Minimum Spanning Trees (MST) algorithms, namely \nBoruvka, Prim and Kruskal algorithms. Also, we implement a parallelized version \nof Prim's algorithm with OpenMPI. Finally, we compute different clustering\ntechniques and we compare them to k-means. Thus, MST Clustering led us to \ninteresting results, despite the fact MST algorithms have a high computation cost. \n\nPlease refer to the following sections for more information:\n\n1. [Usage](#package-usage)\n2. [Documentation](#documentation)\n3. [Results](#our-results)\n\nThis project was made in collaboration with another student from École Polytechnique.\n\n## Package usage\n\n### Compiling\n\nTo run the programs, we advise you to recompile from root folder. To do so,\nyou can type:\n\n- `make clean`\n- `make`\n\nYou will need a C++11-capable MPI compiler.\n\n### Options\n\nThen, in order to use the programs, go in the root folder. Here, you can\nrun the different algorithms by using following command: \n\n`build/main [options]`\n\n- `-a` : runs Prim, Boruvka & Kruskal algorithms on generated graphs\n- `-c` : runs a comparison between MST clustering and k-means\n- `-i` : runs a comparison between Inconsistency clustering and k-means\n- `-m` : runs Prim with MPI\n\nThe basic usage of `build/main` will not run the parallelized version of Prim's algorithm.\nTo do so, you need to complete the following command:\n\n- `mpirun -np [number of cores] build/main -m`\n\n### Tests\n\nAlso, there are tests available for all basic classes and some algorithms\nin the *test/* folder. You can use the Makefile provided there to compile\nthem and run them separately.\n\n## Documentation\n\nA complete documentation is available in the *doc/* folder. If it is not\ngenerated, you can run from root folder:\n\n- `make doc`\n\nThen, you can open index.html in your browser and follow the guide!\n\n## Our results \n\nHere, we will describe our results on computing MST and MST Clustering. Firstly, \nwe compare classic MST algorithms. Secondly, we add a parallelized version of Prim's \nalgorithm and we compare its computation cost. Finally, we do some clustering with MST \nand we compare the results with the well-known k-means method.\n\n### MST Algorithms \n\nHere, we show the computation cost for Boruvka, Prim and Kruskal algorithms. The generated \ngraphs are following Barabasi-Albert and Erdos-Rényi rules.\n\n\n\n\n\nFrom these experiments, we can confirm that the MST algorithms have a complexity in \nO(*m* log(*n*)) where *n* is the number of nodes and *m* is the number of edges. Also, \nBoruvka's algorithm seems to be more costful. However, it is mainly due to our implementation \nchoices, because we compute several Union-Find data structures. \n\n### Parallelized Prim\n\nHere, we show a comparison between parallelized Prim's algorithm and the previous algorithms. The results are obtained by using 4 cores.\n\n\n\n\n\nIn fact, we show that parallelized version of Prim outperforms the other algorithms.\n\n### MST Clustering \n\nFirstly, the most simple way of performing clustering based on MST is to remove the \n*k*-1 edges with highest weights when we want *k* clusters. Here, we show a comparison of \ntime complexity and intracluster variance between this technique and k-means.\n\n\n\n\n\nMore specifically, we see that MST Clustering is both most costful and less accurate than \nk-means in terms of intracluster variance. Thus, this explains why MST Clustering is not \nwidely used in practice.\n\nHowever, there exists a lot of other MST Clustering methods. For instance, we implement a \nmethod based on inconsistency. We define an edge as *inconsistent* when its weight is larger \nthan a *cutoff* times the standard deviation of all edges connected to its both nodes. Here, we \nshow the results for clusters of size at least 2, on Erdos-Rényi graphs and freely available \nWalmart markets data:\n\n\n\nWe notice that, one can compute an approximate number of clusters by choosing the *cutoff* value. Then, the Inconsistency method is able to discard a lot of outliers in clusters of size 1. Thus, MST Clustering allows to differentiate strong clusters from outliers.\n"
},
{
"alpha_fraction": 0.5807033181190491,
"alphanum_fraction": 0.5867147445678711,
"avg_line_length": 19.793750762939453,
"blob_id": "30658ffeafe325e56ff95fbd5a5c9a45ce27dec9",
"content_id": "383c18de69dfa701f3ac9c0ce1e001d95212cdae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9981,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 480,
"path": "/src/kmeans.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"kmeans.hpp\"\n\nCloud::Cloud(int _d, int _nmax, int _k)\t\n{\n /*!\n * @brief Builds an empty cloud\n * \n * @param _d The dimension of the points\n\t * @param _nmax The maximal number of points in this cloud\n\t * @param _k The number of clusters to be drawn\n */\n\n\td = _d;\n\tPoint::d = _d;\n\tn = 0;\n\tk = _k;\n\n\tnmax = _nmax;\n\n\tpoints = new Point[nmax];\n\tcenters = new Point[k];\n}\n\nCloud::~Cloud()\n{\n /*!\n * @brief Destructor of the cloud class\n */\n\n\tdelete[] centers;\n\tdelete[] points;\n}\n\nvoid Cloud::add_point(Point &p, int label)\n{\n /*!\n * @brief Adds a point in the cloud by copying its values\n * \n * @param p The point to be added\n\t * @param label A label to give to this point\n */\n\n\tassert((\"Too much points!\", n < nmax));\n\tassert((\"No center to assign to this point!\", label<k));\n\n\tfor(int m = 0; m < d; m++)\n\t{\n\t\tpoints[n].coords[m] = p.coords[m];\n\t}\n\n\tpoints[n].label = label;\n\n\tn++;\n}\n\nint Cloud::get_d()\n{\n /*!\n * @brief Getter of the point dimension\n * \n * @return The dimension of the points contained in this cloud\n */\n\n\treturn d;\n}\n\nint Cloud::get_n()\n{\n /*!\n * @brief Getter of the number of points\n * \n * @return The number of points in this cloud\n */\n\n\treturn n;\n}\n\nint Cloud::get_k()\n{\n /*!\n * @brief Getter of the number of clusters\n * \n * @return The number of clusters in this cloud\n */\n\n\treturn k;\n}\n\nPoint& Cloud::get_point(int i)\n{\n /*!\n * @brief Getter of points\n * \n * @param i Index of the point in this cloud\n * \n * @return The point at index \\p i\n */\n\n\treturn points[i];\n}\n\nPoint& Cloud::get_center(int j)\n{\n /*!\n * @brief Getter of centers\n * \n * @param j Index of the center in this cloud\n * \n * @return The center point at index \\p j\n */\n\n\treturn centers[j];\n}\n\nvoid Cloud::set_center(Point &p, int j)\n{\n /*!\n * @brief Enables to change centers from this cloud\n * \n * @param p A point to change the center by\n\t * @param j Index of the center to be modified\n */\n\n\tfor(int m = 0; m < d; m++)\n\t\tcenters[j].coords[m] = p.coords[m];\n}\n\ndouble Cloud::intracluster_variance()\n{\n /*!\n * @brief Computes the intracluster variance in this cloud\n * \n * @return The intracluster variance\n */\n\n\tdouble intraclusterVar = 0.0;\n\tfor(int i = 0; i < this->n; i ++){\n\t\tintraclusterVar += points[i].distance(&(centers[points[i].label]));\n\t}\n\tif (n > 0) {\n\t\tintraclusterVar /= n;\n\t}\n\n\treturn intraclusterVar;\n}\n\nint Cloud::set_voronoi_labels()\n{\n /*!\n * @brief E-step in the kmeans algorithm\n */\n\n\tint changedLabels = 0;\n\n\tfor (int i = 0; i < this->n; i ++){\n\t\tint pointLabel = points[i].label;\n\t\tint newLabel = points[i].label;\n\t\tdouble minDist = points[i].distance(&(centers[pointLabel]));\n\n\t\tfor (int j = 0; j < this->k; j ++){\n\t\t\tdouble dist = points[i].distance(&(centers[j]));\n\t\t\tif (dist < minDist || (dist == minDist && j < newLabel)) {\n\t\t\t\tminDist = dist;\n\t\t\t\tnewLabel = j;\n\t\t\t}\n\t\t}\n\n\t\tif (newLabel != pointLabel){\n\t\t\tpoints[i].label = newLabel;\n\t\t\tchangedLabels += 1;\n\t\t}\n\t}\n\t\n\treturn changedLabels;\n}\n\nvoid Cloud::set_centroid_centers()\n{\n /*!\n * @brief M-step in the kmeans algorithm\n */\n\n\tint* clusterSizes = new int [this->k] ();\n\n\t// we start with \"new\" points\n\tPoint *newCenters = new Point [this->k] ();\n\n\t// we count the number of data points in each cluster \n\t// and accumulate their sum\n\tfor (int i = 0; i < this->n; i ++){\n\n\t\tint pointLabel = points[i].label;\n\t\tclusterSizes[pointLabel] += 1;\n\n\t\tfor (int j = 0; j < this->d; j ++){\n\t\t\tnewCenters[pointLabel].coords[j] += points[i].coords[j];\n\t\t}\n\t}\n\n\t// we compute the average per cluster\n\tfor (int i = 0; i < this->k; i ++){\n\t\tif (clusterSizes[i] > 0){\n\t\t\tfor (int j = 0; j < this->d; j ++){\n\t\t\t\tnewCenters[i].coords[j] /= clusterSizes[i];\n\t\t\t}\n\t\t\t\n\t\t\tthis->set_center(newCenters[i], i);\n\t\t}\n\t}\n\n\tdelete[] clusterSizes;\n\tclusterSizes = NULL;\n\tdelete[] newCenters;\n\tnewCenters = NULL;\n}\n\nvoid Cloud::kmeans()\n{\n\t/*!\n * @brief Computes the kmeans algorithm on this cloud. It is possible \n\t * to select different initializations for the centers, within bounding \n\t * box, forgy, plusplus and random partition. \n */\n\n\tstd::cout << \"\\nComputing clustering using kmeans...\";\n\n\t// init_bounding_box();\n\tinit_forgy(); \n\t// init_plusplus(); \n\t// init_random_partition();\n\n\t// running Lloyd's heuristics\n\tint changesMade = 0;\n\tdo {\n\t\tchangesMade = set_voronoi_labels();\n\t\tset_centroid_centers();\n\t} while (changesMade > 0);\n\t\n\tstd::cout << \"[OK]\" << std::endl;\n}\n\nvoid Cloud::init_bounding_box()\n{\n /*!\n * @brief Initializes the centers of this cloud by using a bounding box over \n\t * all the points\n */\n\n\tdouble* minBox = new double [this->d] ();\n\tdouble* maxBox = new double [this->d] ();\n\tfor (int i = 0; i < this->d; i ++){\n\t\tminBox[i] = DBL_MAX;\n\t\tmaxBox[i] = DBL_MIN;\n\t}\n\n\tfor (int i = 0; i < this->n; i ++){\n\t\tfor (int j = 0; j < this->d; j ++){\n\t\t\tif (points[i].coords[j] < minBox[j]){\n\t\t\t\tminBox[j] = points[i].coords[j];\n\t\t\t}\n\t\t\tif (points[i].coords[j] > maxBox[j]){\n\t\t\t\tmaxBox[j] = points[i].coords[j];\n\t\t\t}\n\t\t}\n\t}\n\n\tPoint center;\n\tfor (int i = 0; i < this->k; i ++){\n\t\tcenter.label = i;\n\n\t\tfor (int j = 0; j < this->d; j ++){\n\t\t\tdouble randomValue = (double)rand() / RAND_MAX;\n\t\t\trandomValue = minBox[j] + (randomValue * (maxBox[j] - minBox[j]));\n\t\t\t\n\t\t\tcenter.coords[j] = randomValue;\n\t\t}\n\n\t\tthis->set_center(center, i);\n\t}\n\n\tdelete[] minBox;\n\tminBox = NULL;\n\tdelete[] maxBox;\n\tmaxBox = NULL;\n}\n\nvoid Cloud::init_forgy()\n{\n /*!\n * @brief Initializes the centers of this cloud using Forgy method,\n\t * that is it samples the centers uniformly from the data points\n */\n\n\t// we will pick points from our data \n\tint* availablePoints = new int [this->n] ();\n\tfor (int i = 0; i < this->n; i ++){\n\t\tavailablePoints[i] = 1;\n\t}\n\n\t// we must know what we've chosen so far\n\tint* chosenPoints = new int [this->k] ();\n\tint leftCenters = this->k;\n\n\twhile (leftCenters > 0){\n\t\tint randomPoint = rand() % this->n;\n\n\t\tif (availablePoints[randomPoint] > 0){\n\t\t\tavailablePoints[randomPoint] = 0;\n\t\t\tchosenPoints[this->k - leftCenters] = randomPoint;\n\t\t\tleftCenters -= 1;\n\t\t}\n\t}\n\n\t// we just have to build our centers now!\n\tPoint c;\n\tfor (int i = 0; i < this->k; i ++){\n\t\tc.label = i;\n\n\t\tfor (int j = 0; j < this->d; j ++){\n\t\t\tc.coords[j] = points[chosenPoints[i]].coords[j];\n\t\t}\n\n\t\tthis->set_center(c, i);\n\t}\n\n\tdelete[] availablePoints;\n\tavailablePoints = NULL;\n\tdelete[] chosenPoints;\n\tchosenPoints = NULL;\n}\n\nvoid Cloud::init_plusplus()\n{\n /*!\n * @brief Initializes the centers of this cloud using kmeans++\n\t * which consists in drawing a distribution of points\n */\n\n\t// step of initialization for kmeans++\n\tint* chosenPoints = new int [this->k] ();\n\tint randomPoint = rand() % this->n;\n\tchosenPoints[0] = randomPoint;\n\n\t// loop \n\tdouble* distances = new double [this->n] ();\n\tfor (int i = 1; i < this->k; i ++){\n\n\t\t// we compute the nearest center for each data point\n\t\tdouble totalDist = 0.0;\n\n\t\tfor (int j = 0; j < this->n; j ++){\n\t\t\tdistances[j] = DBL_MAX;\n\n\t\t\tfor (int l = 0; l < i; l ++){\n\t\t\t\tdouble dist = points[j].distance(&(points[chosenPoints[l]]));\n\n\t\t\t\tif (dist < distances[j]){\n\t\t\t\t\tdistances[j] = dist;\n\t\t\t\t}\n\n\t\t\t\ttotalDist += distances[j];\n\t\t\t}\n\t\t}\n\n\t\tfor (int j = 0; j < this->n; j ++){\n\t\t\tdistances[j] /= totalDist; \n\t\t\tif (j > 0){\n\t\t\t\tdistances[j] += distances[j-1]; // we create a cumulative sum\n\t\t\t}\n\t\t}\n\n\t\t// we find the next center from a uniform distribution\n\t\tdouble randomUnif = (double)rand() / RAND_MAX;\n\n\t\tfor (int j = 0; j < this->n; j ++){\n\t\t\tif ( (j == 0 && distances[j] > randomUnif && randomUnif >= 0) \\\n\t\t\t\t\t|| (j > 0 && distances[j] > randomUnif && randomUnif >= distances[j-1]) ){\n\t\t\t\tchosenPoints[i] = j;\n\t\t\t}\n\t\t}\n\t}\n\n\t// we just have to build our centers now!\n\tPoint c;\n\tfor (int i = 0; i < this->k; i ++){\n\t\tc.label = i;\n\n\t\tfor (int j = 0; j < this->d; j ++){\n\t\t\tc.coords[j] = points[chosenPoints[i]].coords[j];\n\t\t}\n\n\t\tthis->set_center(c, i);\n\t}\t\t\n\n\tdelete[] chosenPoints;\n\tchosenPoints = NULL;\n\tdelete[] distances;\n\tdistances = NULL;\n}\n\nvoid Cloud::init_random_partition()\n{\n /*!\n * @brief Initializes the centers of this cloud using random partitions,\n\t * ie builds centers as the centers of random partitions of the data points\n */\n\n\t// random labelization of data points\n\tint randomCenter = 0;\n\tfor (int i = 0; i < this->n; i ++){\n\t\trandomCenter = rand() % this->k;\n\t\tpoints[i].label = randomCenter;\n\t}\n\n\t// centers will be centroids of this partition\n\tthis->set_centroid_centers();\n}\n\ndouble Cloud::silhouette()\n{\n /*!\n * @brief Computes the silhouette of this cloud \n * \n * @return The silhouette of this cloud\n */\n\n\tdouble silhouette = 0.0;\n\n\t// we compute a and b which are respectively the \n\t// average distance to points of the same cluster \n\t// and min of average distances to foreign clusters\n\tdouble* a = new double [n] ();\n\tdouble* b = new double [n] ();\n\tdouble* s = new double [n] ();\n\n\tfor (int i = 0; i < this->n; i ++){\n\t\tdouble* distPointClusters = new double [k] ();\n\t\tint* clustersSize = new int [k] ();\n\t\t\n\t\tfor (int j = 0; j < this->n; j ++){\n\t\t\tif (j != i){\n\t\t\t\tint pointLabel = points[j].label;\n\t\t\t\tclustersSize[pointLabel] += 1;\n\n\t\t\t\tdistPointClusters[pointLabel] += std::sqrt(points[i].distance(&(points[j])));\n\t\t\t}\n\t\t}\n\n\t\tdouble minDist = DBL_MAX;\n\t\tfor (int j = 0; j < this->k; j ++){\n\t\t\tif (clustersSize[j] > 0){\n\t\t\t\tdistPointClusters[j] /= clustersSize[j];\n\n\t\t\t\tif (j != points[i].label && distPointClusters[j] < minDist) {\n\t\t\t\t\tminDist = distPointClusters[j];\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ta[i] = distPointClusters[points[i].label];\n\t\tb[i] = 0.0;\n\t\tif (minDist < DBL_MAX){\n\t\t\tb[i] = minDist;\n\t\t}\n\n\t\tdelete[] distPointClusters;\n\t\tdistPointClusters = NULL;\n\t\tdelete[] clustersSize;\n\t\tclustersSize = NULL;\n\n\t\ts[i] = (b[i] - a[i]) / (std::max(a[i], b[i]));\n\t}\n\n\tfor (int i = 0; i < this->n; i ++){\n\t\tsilhouette += s[i];\n\t}\n\tsilhouette /= this->n;\n\n\treturn silhouette;\n}\n"
},
{
"alpha_fraction": 0.5265102386474609,
"alphanum_fraction": 0.53194260597229,
"avg_line_length": 33.8636360168457,
"blob_id": "0862f6669fc94041d6d21040401e32c227f2fc6c",
"content_id": "7068c4bf4c24e605d532b2f6f1c08ec2a515035f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4603,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 132,
"path": "/src/mst_prim.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"mst_prim.hpp\"\n\n#include <iostream>\n\nPrimAlgorithm::PrimAlgorithm(Graph* graph) : MSTAlgorithm(graph) {}\n\nvoid PrimAlgorithm::compute_mst() \n{\n /*!\n * @brief Computes the Minimum Spanning Tree of the initial graph using \n * Prim's algorithm. More specifically, initialize a priority queue to \n * sort the edges by their weight. At each iteration, it adds the edge with \n * minimal weight to the MST and checks for new edges to discover. \n * \n */\n\n this->mst_weight = 0.0;\n std::cout << \"\\nComputing the MST using Prim's algorithm...\";\n std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();\n\n int n = this->initial_graph->get_number_of_nodes();\n std::vector<Node*> nodes = this->initial_graph->get_nodes();\n\n auto cmp = [](const Edge* e1, const Edge* e2) \n { \n double w1 = e1->weight;\n double w2 = e2->weight;\n\n if (w1 < w2) {\n return true;\n } else if (w1 > w2) {\n return false;\n } else { // Allows multiple keys for same weight values\n intptr_t p1 = reinterpret_cast<intptr_t>(e1);\n intptr_t p2 = reinterpret_cast<intptr_t>(e2);\n\n return p1 < p2;\n }\n };\n\n // initialization of the priority queue (ie set), best edge per vertex and beginning vertex\n std::unordered_map<Node*, Edge*> min_edge;\n std::set<Edge*, decltype(cmp)> pq(cmp);\n\n std::unordered_set<Node*> visited_nodes;\n\n Node* current_node = this->initial_graph->get_any_node();\n visited_nodes.insert(current_node);\n this->mst_graph.add_node(current_node); // just to have the source node in the MST\n\n std::unordered_set<Edge*> connected_edges = this->initial_graph->connected_edges(current_node);\n\n for (Edge* e : connected_edges) {\n Node* other_node = e->other_node(current_node);\n\n if (visited_nodes.find(other_node) == visited_nodes.end()) {\n if (min_edge.find(other_node) != min_edge.end()) \n {\n if (*e < *min_edge.at(other_node)) {\n pq.erase(min_edge.at(other_node));\n min_edge.erase(other_node);\n\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n else \n {\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n }\n\n // filling the MST with new nodes until it forms a tree \n while (this->mst_graph.get_number_of_nodes() < n) {\n\n // testing whether if the set is empty\n if (pq.empty()) {\n throw std::invalid_argument(\"No MST can be built !\");\n }\n\n // adding the new edge and node to the MST and the visited nodes\n Edge* new_edge = *pq.begin();\n\n Node* p1 = new_edge->p1;\n Node* p2 = new_edge->p2;\n Node* new_node = (visited_nodes.find(p1) != visited_nodes.end()) ? p2 : p1;\n\n visited_nodes.insert(new_node);\n\n this->mst_graph.add_edge(new_edge);\n this->mst_weight += new_edge->weight;\n\n // we won't need the best edge for this node anymore\n min_edge.erase(new_node);\n pq.erase(new_edge);\n\n // updating the external nodes best edges\n connected_edges = this->initial_graph->connected_edges(new_node);\n\n for (Edge* e : connected_edges) {\n Node* other_node = e->other_node(new_node);\n\n if (visited_nodes.find(other_node) == visited_nodes.end()) {\n if (min_edge.find(other_node) != min_edge.end()) \n {\n if (*e < *min_edge.at(other_node)) {\n pq.erase(min_edge.at(other_node));\n min_edge.erase(other_node);\n\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n else \n {\n min_edge.insert(std::pair<Node*, Edge*>(other_node, e));\n pq.insert(e);\n }\n }\n }\n }\n \n std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();\n std::cout << \"[OK]\" << std::endl;\n this->treatment_done();\n\n std::cout << \"Total weight of the MST: \" << this->mst_weight << std::endl;\n\n std::cout << \"Time spent by the algorithm: \" << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << \" µs\" << std::endl;\n}\n"
},
{
"alpha_fraction": 0.7172995805740356,
"alphanum_fraction": 0.7172995805740356,
"avg_line_length": 13.875,
"blob_id": "6ad58dcfc27a5252b5f7cb3c9c35230606c36dfc",
"content_id": "a4337433fad993e5ac501fbaba12e868f1909958",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 16,
"path": "/src/mst_prim.hpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include \"mst_algorithm.hpp\"\n\n#include <set>\n#include <cfloat>\n#include <limits>\n#include <stdexcept>\n\nclass PrimAlgorithm : public MSTAlgorithm\n{\npublic:\n PrimAlgorithm(Graph* graph);\n\n virtual void compute_mst();\n};"
},
{
"alpha_fraction": 0.4708428680896759,
"alphanum_fraction": 0.4929971992969513,
"avg_line_length": 22.242603302001953,
"blob_id": "64bbb7ec4633fba722ce1825abe7cbc207fb3d61",
"content_id": "c64e7a93213d0345dac92c3f95cd487cf374922a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3927,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 169,
"path": "/test/test_graph.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"catch.hpp\" \n\n#include \"../src/graph.hpp\"\n#include <unordered_set>\n\nTEST_CASE(\"Simple graph construction\", \"[graph:init]\") \n{\n std::vector<Node> my_nodes;\n std::vector<Edge> my_edges;\n\n // Creating nodes \"i\"\n for(int i = 0; i < 10; i++){\n my_nodes.push_back(Node(i));\n }\n\n // Edges between \"i\" and \"9-i\" with weight 2*i\n for(int i = 0; i < 10; i++){\n my_edges.push_back(Edge(\n &my_nodes[i],\n &my_nodes[9-i],\n 2*i\n ));\n }\n\n Graph my_graph;\n\n // Adding edges\n for(int i = 0; i < 10; i++){\n my_graph.add_edge(&my_edges[i]);\n }\n\n SECTION(\"Tests of nodes\") \n {\n\n Node not_in_graph = Node(10);\n Node duplicate = Node(5);\n\n REQUIRE(my_graph.has_node(&my_nodes[5]) == true);\n REQUIRE(my_graph.has_node(¬_in_graph) == false);\n REQUIRE(my_graph.has_node(&duplicate) == false); // Pointer test\n\n }\n\n SECTION(\"Tests of edges- Will not expand the iterator of the set but works\") \n {\n\n std::unordered_set<Edge*> set1 = my_graph.connected_edges(&my_nodes[5]);\n Edge not_in_graph = Edge(&my_nodes[1], &my_nodes[5]);\n Edge duplicate = Edge(&my_nodes[0], &my_nodes[9]);\n\n REQUIRE(set1.find(&my_edges[5]) != set1.end());\n REQUIRE(set1.find(&my_edges[0]) == set1.end());\n REQUIRE(set1.find(¬_in_graph) == set1.end());\n REQUIRE(set1.find(&duplicate) == set1.end()); // Pointer test\n\n }\n\n SECTION(\"Tests to see if all edges are in memory\")\n {\n std::unordered_set<Edge*> all_edges = my_graph.get_edges();\n\n for (int i = 0; i < 10; i ++) {\n REQUIRE(all_edges.find(&my_edges[i]) != all_edges.end());\n }\n\n REQUIRE(all_edges.size() == 10);\n }\n}\n\nTEST_CASE(\"Tests of complex getters\", \"[graph:getters]\")\n{\n std::vector<Node> my_nodes;\n std::vector<Edge> my_edges;\n\n // Creating nodes \"i\"\n for(int i = 0; i < 10; i++){\n my_nodes.push_back(Node(i));\n }\n\n // Edges between \"i\" and \"9-i\" with weight 2*i\n for(int i = 0; i < 10; i++){\n my_edges.push_back(Edge(\n &my_nodes[i],\n &my_nodes[9-i],\n 2*i\n ));\n }\n\n Graph my_graph;\n\n // Adding edges\n for(int i = 0; i < 10; i++){\n my_graph.add_edge(&my_edges[i]);\n }\n\n SECTION(\"Number of nodes getter\") \n {\n int n = my_graph.get_number_of_nodes();\n\n REQUIRE(n == 10);\n }\n\n SECTION(\"Getter of any node\")\n {\n // We will just test whether the Node is in the graph\n\n Node* node = my_graph.get_any_node();\n\n bool node_in_graph = false; \n\n for (int i = 0; i < 10; i ++) {\n if (node == &my_nodes[i]) {\n node_in_graph = true;\n }\n }\n\n REQUIRE(node_in_graph == true);\n }\n\n SECTION(\"Nodes getter\")\n {\n std::vector<Node*> nodes = my_graph.get_nodes();\n\n REQUIRE(nodes.size() == 10);\n\n int nodes_in_graph = 0;\n\n for (int i = 0; i < 10; i ++) {\n for (int j = 0; j < 10; j ++) {\n if (nodes[i] == &my_nodes[j]) {\n nodes_in_graph += 1;\n }\n }\n }\n\n REQUIRE(nodes_in_graph == 10);\n }\n}\n\nTEST_CASE(\"Tests for the total weight computation\", \"[graph:weight]\")\n{\n std::vector<Node> my_nodes;\n std::vector<Edge> my_edges;\n\n // Creating nodes \"i\"\n for(int i = 0; i < 10; i++){\n my_nodes.push_back(Node(i));\n }\n\n // Edges between \"i\" and \"9-i\" with weight 2*i\n for(int i = 0; i < 10; i++){\n my_edges.push_back(Edge(\n &my_nodes[i],\n &my_nodes[9-i],\n 2*i\n ));\n }\n\n Graph my_graph;\n\n // Adding edges\n for(int i = 0; i < 10; i++){\n my_graph.add_edge(&my_edges[i]);\n }\n\n double weight = my_graph.total_weight();\n\n REQUIRE(std::abs(weight - 90) < 0.001);\n}"
},
{
"alpha_fraction": 0.6206758618354797,
"alphanum_fraction": 0.6226792931556702,
"avg_line_length": 32.57847595214844,
"blob_id": "61425e7efae94d593081cc3d9560466157be3dac",
"content_id": "913cee2c9ba4af458e5ca702c70caef9b30da1e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7495,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 223,
"path": "/src/compare_clustering.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"compare_clustering.hpp\"\n\nint nb_columns(const std::string &line)\n{\n /*!\n * @brief Enables to count the number of columns from a header\n * \n * @param line The header line of the considered file\n * \n * @return The number of columns \n */\n\n\treturn std::count_if(line.begin(), line.end(),\n [](unsigned char c){ return std::isspace(c); }) + 2;\n}\n\nvoid compare_clustering(std::string file_to_read, int k)\n{\n /*!\n * @brief From a file given as entry and an integer k for the number of \n * clusters to draw, establishes a comparison between kmeans and MST clustering\n * \n * @param file_to_read Path to the data\n * @param k Number of clusters to draw\n * \n */\n\n\t// get points from datafile\n\n\tstd::ifstream is(file_to_read);\n\tassert(is.is_open());\n\n\tstd::string header_line;\n\tstd::getline(is, header_line);\n\n\tconst int d = nb_columns(header_line) - 1;\n Point::d = d;\n\n std::vector<Point*> points = Point::read_points_from_file(file_to_read, d);\n int n = points.size();\n\n // run MST clustering \n\n std::chrono::steady_clock::time_point graph_creation = std::chrono::steady_clock::now();\n\n std::cout << \"\\nCreating graph...\";\n Graph* g = new Graph(points);\n std::cout << \"[OK]\" << std::endl;\n\n std::chrono::steady_clock::time_point kruskal_algo = std::chrono::steady_clock::now();\n\n KruskalAlgorithm kruskal = KruskalAlgorithm(g);\n kruskal.compute_mst();\n\n std::chrono::steady_clock::time_point kruskal_clustering = std::chrono::steady_clock::now();\n\n std::unordered_map<Node*, Node*> clusters = kruskal.compute_clustering(k);\n\n std::chrono::steady_clock::time_point mst_end = std::chrono::steady_clock::now();\n\n // run kmeans \n\n std::chrono::steady_clock::time_point cloud_creation = std::chrono::steady_clock::now();\n\n std::cout << \"\\nCreating cloud...\";\n\n Cloud cloud = Cloud(d, n, k);\n\n int label = 0;\n for (int i = 0; i < n; i ++) {\n cloud.add_point(*points[i], label);\n label = (label+1) % k;\n }\n\n std::cout << \"[OK]\" << std::endl;\n\n std::chrono::steady_clock::time_point kmeans_algo = std::chrono::steady_clock::now();\n\n cloud.kmeans();\n\n std::chrono::steady_clock::time_point kmeans_end = std::chrono::steady_clock::now();\n\n // comparison\n\n std::cout << \"\\nTotal time spent by MST clustering including graph creation: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(mst_end - graph_creation).count() << \" µs\" << std::endl;\n\n std::cout << \"Total time spent by MST clustering: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(mst_end - kruskal_algo).count() << \" µs\" << std::endl;\n\n double mst_variance = intracluster_variance(clusters);\n std::cout << \"Intracluster variance in MST: \";\n std::cout << mst_variance << std::endl;\n\n double mst_silhouette = silhouette(clusters);\n std::cout << \"Silhouette in MST: \";\n std::cout << mst_silhouette << std::endl;\n\n std::cout << \"\\nTotal time spent by kmeans including cloud creation: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(kmeans_end - cloud_creation).count() << \" µs\" << std::endl;\n\n std::cout << \"Total time spent by kmeans: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(kmeans_end - kmeans_algo).count() << \" µs\" << std::endl;\n\n double kmeans_variance = cloud.intracluster_variance();\n std::cout << \"Intracluster variance in kmeans: \";\n std::cout << kmeans_variance << std::endl;\n\n double kmeans_silhouette = cloud.silhouette();\n std::cout << \"Silhouette in kmeans: \";\n std::cout << kmeans_silhouette << std::endl;\n\n return;\n}\n\nvoid compare_inconsistency(std::string file_to_read, double cutoff)\n{\n /*!\n * @brief From a file given as entry and an integer k for the number of \n * clusters to draw, establishes a comparison between kmeans and MST clustering\n * \n * @param file_to_read Path to the data\n * @param k Number of clusters to draw\n * \n */\n\n\t// get points from datafile\n\n\tstd::ifstream is(file_to_read);\n\tassert(is.is_open());\n\n\tstd::string header_line;\n\tstd::getline(is, header_line);\n\n\tconst int d = nb_columns(header_line) - 1;\n Point::d = d;\n\n std::vector<Point*> points = Point::read_points_from_file(file_to_read, d);\n int n = points.size();\n\n // run Inconsistency method \n\n std::chrono::steady_clock::time_point graph_creation = std::chrono::steady_clock::now();\n\n std::cout << \"\\nCreating graph...\";\n Graph* g = new Graph(points);\n std::cout << \"[OK]\" << std::endl;\n\n std::chrono::steady_clock::time_point kruskal_algo = std::chrono::steady_clock::now();\n\n KruskalAlgorithm kruskal = KruskalAlgorithm(g);\n kruskal.compute_mst();\n\n std::chrono::steady_clock::time_point inc_clustering = std::chrono::steady_clock::now();\n\n std::unordered_map<Node*, Node*> clusters = kruskal.compute_clustering(cutoff);\n\n std::unordered_map<Node*, int> clusters_size;\n for (auto kv : clusters) {\n if(clusters_size.find(kv.second) != clusters_size.end()) {\n clusters_size.at(kv.second) += 1;\n } else {\n clusters_size.insert(std::pair<Node*, int>(kv.second, 1));\n }\n }\n std::cout << \"In total, there are \" << clusters_size.size() << \" clusters when cutoff = \" << cutoff << std::endl;\n\n int big_clusters = 0;\n for (auto kv : clusters_size) {\n if (kv.second > 1) {\n big_clusters += 1;\n }\n }\n std::cout << \"There are \" << big_clusters << \" clusters of size at least 2\" << std::endl;\n\n std::chrono::steady_clock::time_point inc_end = std::chrono::steady_clock::now();\n\n // run kmeans \n\n std::chrono::steady_clock::time_point cloud_creation = std::chrono::steady_clock::now();\n\n std::cout << \"\\nCreating cloud...\";\n\n Cloud cloud = Cloud(d, n, clusters.size());\n\n int label = 0;\n for (int i = 0; i < n; i ++) {\n cloud.add_point(*points[i], label);\n label = (label+1) % clusters.size();\n }\n\n std::cout << \"[OK]\" << std::endl;\n\n std::chrono::steady_clock::time_point kmeans_algo = std::chrono::steady_clock::now();\n\n cloud.kmeans();\n\n std::chrono::steady_clock::time_point kmeans_end = std::chrono::steady_clock::now();\n\n // comparison\n\n std::cout << \"\\nTotal time spent by Inconsistency clustering including graph creation: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(inc_end - graph_creation).count() << \" µs\" << std::endl;\n\n std::cout << \"Total time spent by Inconsistency clustering: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(inc_end - kruskal_algo).count() << \" µs\" << std::endl;\n\n double inc_variance = intracluster_variance(clusters);\n std::cout << \"Intracluster variance with Inconsistency clustering: \";\n std::cout << inc_variance << std::endl;\n\n std::cout << \"\\nTotal time spent by kmeans including cloud creation: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(kmeans_end - cloud_creation).count() << \" µs\" << std::endl;\n\n std::cout << \"Total time spent by kmeans: \";\n std::cout << std::chrono::duration_cast<std::chrono::microseconds>(kmeans_end - kmeans_algo).count() << \" µs\" << std::endl;\n\n double kmeans_variance = cloud.intracluster_variance();\n std::cout << \"Intracluster variance in kmeans: \";\n std::cout << kmeans_variance << std::endl;\n\n return;\n}"
},
{
"alpha_fraction": 0.5682527422904968,
"alphanum_fraction": 0.5787831544876099,
"avg_line_length": 33.20000076293945,
"blob_id": "f4551744315a95cca4ba977aa48e157b1ba8782e",
"content_id": "b2d1f2cdd513aad3a6f7d1bb9f26047f50af110d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2565,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 75,
"path": "/src/mst_kruskal.cpp",
"repo_name": "sebastienmeyer2/mst-clustering",
"src_encoding": "UTF-8",
"text": "#include \"mst_kruskal.hpp\"\n\nKruskalAlgorithm::KruskalAlgorithm(Graph* graph) : MSTAlgorithm(graph) {}\n\nvoid KruskalAlgorithm::compute_mst()\n{\n /*!\n * @brief Computes the Minimum Spanning Tree of the graph associated to \n * this instance of MSTAlgorithm, using Kruskal's algorithm. More specifically,\n * sorts all the edges by their weight, and uses an Union-Find data structure\n * to select which components to union at each iteration, until there is \n * only one left, which is the MST.\n * \n */\n\n this->mst_weight = 0.0;\n std::cout << \"\\nComputing the MST using Kruskal's algorithm...\";\n std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();\n\n int n = this->initial_graph->get_number_of_nodes();\n std::vector<Node*> nodes = this->initial_graph->get_nodes();\n std::unordered_set<Edge*> edges = this->initial_graph->get_edges();\n\n auto cmp = [](const Edge* e1, const Edge* e2) \n { \n double w1 = e1->weight;\n double w2 = e2->weight;\n\n if (w1 < w2) {\n return false; // reversed in order to follow priority queue's implementation\n } else if (w1 > w2) {\n return true;\n } else { // allows multiple keys for same weight values (arbitrary comp)\n intptr_t p1 = reinterpret_cast<intptr_t>(e1);\n intptr_t p2 = reinterpret_cast<intptr_t>(e2);\n\n return p1 < p2;\n }\n };\n\n // initialization of the algorithm\n std::priority_queue<Edge*, std::vector<Edge*>, decltype(cmp)> pq(cmp);\n for (Edge* e : edges) {\n pq.push(e);\n }\n\n UnionFind uf = UnionFind(nodes);\n\n // while there is more than one component, we add edges that are shared \n int k = uf.get_num_classes();\n while (k > 1) {\n\n Edge* min_edge = pq.top();\n pq.pop();\n\n Node* rep1 = uf.Find(min_edge->p1);\n Node* rep2 = uf.Find(min_edge->p2);\n\n if (rep1 != rep2) {\n this->mst_graph.add_edge(min_edge);\n this->mst_weight += min_edge->weight;\n uf.Union(rep1, rep2);\n }\n\n k = uf.get_num_classes();\n }\n\n std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();\n std::cout << \"[OK]\" << std::endl;\n this->treatment_done();\n\n std::cout << \"Total weight of the MST: \" << this->mst_weight << std::endl;\n\n std::cout << \"Time spent by the algorithm: \" << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << \" µs\" << std::endl;\n}"
}
] | 37 |
PTyneu/Raif
|
https://github.com/PTyneu/Raif
|
e37a8d951301076dbb27eba38604f2ab7bae770d
|
d6d53a334ddec8a46506715b49b6955f1ebc0d79
|
f71a49b2dd42641f00a82a12f302c90f308d45e9
|
refs/heads/main
| 2023-08-29T07:14:57.398756 | 2021-10-06T06:24:13 | 2021-10-06T06:24:13 | 411,401,940 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.453125,
"alphanum_fraction": 0.671875,
"avg_line_length": 14.75,
"blob_id": "e6998e6c19ee766cc46f562475928f8d5dff1fde",
"content_id": "9d1fb55a31510ed60b775ee11ec05c2271b125a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "PTyneu/Raif",
"src_encoding": "UTF-8",
"text": "catboost==0.26.1\nmatplotlib==3.4.3\nnumpy==1.21.2\npandas==1.3.3\n\n"
},
{
"alpha_fraction": 0.5685049295425415,
"alphanum_fraction": 0.6267770528793335,
"avg_line_length": 41.11184310913086,
"blob_id": "bdfafd28ee01b3df5c391015054e9521bd9aecfa",
"content_id": "619148399a6e7e4f3b86e9439575e07c536b50b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6540,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 152,
"path": "/Raif_finale.py",
"repo_name": "PTyneu/Raif",
"src_encoding": "UTF-8",
"text": "\nimport pandas as pd\nimport matplotlib as plt\nfrom catboost import CatBoostRegressor\nimport numpy as np\n\n\ntrain=pd.read_csv('/Users/maksimvejnbender/Downloads/raif/raif_train.csv')\ntest=pd.read_csv('/Users/maksimvejnbender/Downloads/raif/test.csv')\n\n\ntrain['split']=1\ntest['split']=0\n\n\ntrain.plot(kind=\"scatter\", x=\"lng\", y=\"lat\", alpha=0.05)\ntrain.plot(kind=\"scatter\", x=\"lng\", y=\"lat\", alpha=0.05)\n\ndf=pd.concat([train, test]).query('price_type == 1')\n\ndf['floor']=df['floor'].fillna(1.0)\ndf['street'].fillna('NAN', inplace=True)\ndf['per_square_meter_price'].fillna(0, inplace=True)\ndef antipropusk(column):\n mean=df[column].mean()\n df[column]=df[column].fillna(mean)\n\nantipropusk('reform_mean_floor_count_500')\nantipropusk('reform_mean_year_building_500')\nantipropusk('reform_house_population_500')\nantipropusk('reform_mean_floor_count_1000')\nantipropusk('reform_mean_year_building_1000')\nantipropusk('reform_house_population_1000')\nantipropusk('osm_city_nearest_population')\n\nlist1 = [0.001, 0.005, 0.0075, 0.001]\nlist2 = ['osm_amenity_points_in_', 'osm_building_points_in_', 'osm_catering_points_in_', 'osm_crossing_points_in_', 'osm_culture_points_in_',\n 'osm_finance_points_in_', 'osm_offices_points_in_', 'osm_shops_points_in_']\ncol_names=[]\nfor i in range(len(list2)):\n for j in list1:\n print(list2[i] + '{}'.format(j))\n col_names.append(list2[i] + '{}'.format(j))\n\nmean_names = []\nfor i in range(0,len(col_names), 4):\n print(col_names[i][4:-16] + '_mean')\n mean_names.append(col_names[i][4:-16] + '_mean')\n\nmean_names.append('per_square_meter_price')\nfor i in range(0,len(col_names), 4):\n df[col_names[i][4:-16] + '_mean'] = (df[col_names[i]] * 1\n + df[col_names[i+1]] * 0.2\n + df[col_names[i+2]] * 0.1333\n + df[col_names[i+3]] * 0.1)/(df.shape[0]-1)\n\nlist1 = [0.005, 0.0075, 0.01]\nlist2 = ['osm_healthcare_points_in_', 'osm_historic_points_in_', 'osm_hotels_points_in_', 'osm_leisure_points_in_',\n 'osm_train_stop_points_in_', 'osm_transport_stop_points_in_']\ncol_names=[]\n\nfor i in range(len(list2)):\n for j in list1:\n print(list2[i] + '{}'.format(j))\n col_names.append(list2[i] + '{}'.format(j))\n\nmean_names = []\nfor i in range(0,len(col_names), 3):\n print(col_names[i][4:-16] + '_mean')\n mean_names.append(col_names[i][4:-16] + '_mean')\n\nmean_names.append('per_square_meter_price')\nfor i in range(0,len(col_names), 3):\n df[col_names[i][4:-16] + '_mean'] = (df[col_names[i]] * 1\n + df[col_names[i+1]] * 0.67\n + df[col_names[i+2]] * 0.5)/(df.shape[0]-1)\n\ndf.drop(columns=['osm_amenity_points_in_0.001',\n 'osm_amenity_points_in_0.005', 'osm_amenity_points_in_0.0075',\n 'osm_amenity_points_in_0.01', 'osm_building_points_in_0.001',\n 'osm_building_points_in_0.005', 'osm_building_points_in_0.0075',\n 'osm_building_points_in_0.01', 'osm_catering_points_in_0.001',\n 'osm_catering_points_in_0.005', 'osm_catering_points_in_0.0075',\n 'osm_catering_points_in_0.01', 'osm_crossing_points_in_0.001',\n 'osm_crossing_points_in_0.005', 'osm_crossing_points_in_0.0075',\n 'osm_crossing_points_in_0.01', 'osm_culture_points_in_0.001',\n 'osm_culture_points_in_0.005', 'osm_culture_points_in_0.0075',\n 'osm_culture_points_in_0.01', 'osm_finance_points_in_0.001',\n 'osm_finance_points_in_0.005', 'osm_finance_points_in_0.0075',\n 'osm_finance_points_in_0.01', 'osm_healthcare_points_in_0.005',\n 'osm_healthcare_points_in_0.0075', 'osm_healthcare_points_in_0.01',\n 'osm_historic_points_in_0.005', 'osm_historic_points_in_0.0075',\n 'osm_historic_points_in_0.01', 'osm_hotels_points_in_0.005',\n 'osm_hotels_points_in_0.0075', 'osm_hotels_points_in_0.01',\n 'osm_leisure_points_in_0.005', 'osm_leisure_points_in_0.0075',\n 'osm_leisure_points_in_0.01', 'osm_offices_points_in_0.001',\n 'osm_offices_points_in_0.005', 'osm_offices_points_in_0.0075',\n 'osm_offices_points_in_0.01', 'osm_shops_points_in_0.001',\n 'osm_shops_points_in_0.005', 'osm_shops_points_in_0.0075',\n 'osm_shops_points_in_0.01', 'osm_train_stop_points_in_0.005',\n 'osm_train_stop_points_in_0.0075', 'osm_train_stop_points_in_0.01', \n 'osm_transport_stop_points_in_0.005','osm_transport_stop_points_in_0.0075',\n 'osm_transport_stop_points_in_0.01','reform_mean_floor_count_500', \n 'reform_mean_year_building_500', 'reform_house_population_500', \n 'reform_count_of_houses_500','city', \n 'lat', 'lng', 'osm_city_nearest_name', \n 'building_mean', 'floor'], inplace=True)\n\ndf['date'] = pd.to_datetime(df['date'])\n\ndef deviation_metric_one_sample(y_true, y_pred):\n deviation = (y_pred - y_true) / np.maximum(1e-8, y_true)\n if np.abs(deviation) <= 0.15: \n return 0\n elif deviation <= -0.6:\n return 9.9\n elif deviation < -0.15:\n return 1.1 * (deviation / 0.15 + 1) ** 2\n elif deviation < 0.6:\n return (deviation / 0.15 - 1) ** 2\n return 9\n\ndef deviation_metric(y_true, y_pred):\n return np.array([deviation_metric_one_sample(y_true[n], y_pred[n]) for n in range(len(y_true))]).mean()\n\ntest=df.query('split==0')\ntrain=df.query('split==1')\nod=test['id']\n\ntest_features=test.drop(['id','per_square_meter_price', 'split', 'price_type'], axis=1)\ntrain_features=train.drop(['id','per_square_meter_price', 'split', 'price_type'], axis=1)\ntrain_target=train['per_square_meter_price']\ncat_cols = ['street', 'region']\n\nmodel = CatBoostRegressor(loss_function='MAE', verbose=0)\nmodel = model.fit(train_features, train_target, cat_features=cat_cols)\n\npred=model.predict(test_features)\npredf=pd.DataFrame(pred)\na=pd.Series(pred)\n\nresult=pd.DataFrame({'id':od, 'per_square_meter_price':a})\n\nresult.to_csv('sub1.csv', index=False)\n\n##Для анализа полученных результатов\n##загрузить вектор фактических цен за квадратный метр и назвать переменную test_target\nmodel.get_feature_importance(prettified=True)\nscores = []\nmetric = deviation_metric(test_target, predictions)\nscores.append(metric)\nprint(f'Средняя метрика по бинам: {np.mean(scores):.3f}')\nprint(f'Отклонение метрики по бинам: {np.std(scores):.3f}')"
},
{
"alpha_fraction": 0.7812685370445251,
"alphanum_fraction": 0.8203912377357483,
"avg_line_length": 104.4375,
"blob_id": "6b439be91722278bc5aaf02ad64d40ababecc703",
"content_id": "45af2c3a5b97a9e8a0ba41d0fb2bf9416cbc3617",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2867,
"license_type": "no_license",
"max_line_length": 784,
"num_lines": 16,
"path": "/README.md",
"repo_name": "PTyneu/Raif",
"src_encoding": "UTF-8",
"text": "В данном репозитории представлено решение задачи хакатона от Райффайзенбанка.\n\nЗадача предсталяла собой предсказание цен на коммерческую недвижимость в России (полное условие задачи по ссылке https://apply.raifhack.ru/competition). \n\nРешение представлено в файле с расширением .py, в .ipynb - некоторое дополнение и промежуточные результаты <img width=\"867\" alt=\"Снимок экрана 2021-09-28 в 21 53 56\" src=\"https://user-images.githubusercontent.com/90149954/135148439-0fae78d0-91de-4d50-8691-8c44e1ca2c11.png\"> \n\nИспользованные библиотеки указаны в файле requirements.txt.\n\n\nНаша команда заняла почетное 107 место (некоторые ребята уже после хакатона сабмитили новые решения, поэтому на скрине мы ниже).\n\nРешение представляет собой много-много фич инженеринга, оценку корреляций а также небольшую предобработку данных.\n\nВ обозримом будущем постараюсь улучшить проект дабы уже после соревнования залезть в топ-50. Как это можно сделать? В условиях неограниченного времени довольно просто: добить фичу floor и вероятно разбить ее при помощи ohe, попытаться стандартизировать данные, убрать фичи, корреляция которых будет ниже рандомного значения. Ну и конечно же модельная часть: провести подбор гиперпараметров, сравнить результаты с градиентным бустингом, попробовать в нейросети (хочу полностью обучить перцептрон для регрессии). Также можно попытаться обучить модель классификации на обучающей выборке с типом realty_class = 1 (оценка была произведена оценщиком) и сделать предикт с высоким порогом (+- 0.75, но надо будет смотреть по факту разумеется) дабы получить больше данных для обучения модели. \n\nP.S. среди топ-200 только у нас один сабмит был, нам просто не хватило времени (:\n"
}
] | 3 |
didadadida93/t5apy
|
https://github.com/didadadida93/t5apy
|
a16e575d1c07290eb453f3daccbd1ddb41517127
|
74f9f7f85b69a01a23b78ab1b1a89c4c2e2b07e9
|
74dc4d5c785889b5443bb3938197882b00487d9a
|
refs/heads/master
| 2020-07-08T02:39:34.019378 | 2019-08-29T01:09:07 | 2019-08-29T01:09:07 | 203,542,243 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6590313911437988,
"alphanum_fraction": 0.6832460761070251,
"avg_line_length": 35.380950927734375,
"blob_id": "aa18cc4d8b72e711947f540d6bffed0aed9beaad",
"content_id": "517c9488ae969d1ffb097ada5cd9f9b98a977392",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1528,
"license_type": "permissive",
"max_line_length": 356,
"num_lines": 42,
"path": "/README.md",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "# t5apy\n\nRequest public map data from Travian 5 API more convenient with `t5apy`. \nThis [[official thread](https://forum.kingdoms.com/index.php?thread/4099-api-for-external-tools/)] explain on how to use Travian 5 API.\n\n[](https://travis-ci.org/didadadida93/t5apy) [](https://codecov.io/gh/didadadida93/t5apy) [](https://choosealicense.com/licenses/mit/)\n\n# Installation\n\n> it's recommended to use [virtualenv](https://docs.python-guide.org/dev/virtualenvs/).\n\n1. git clone this repo \n`(venv)$ git clone https://github.com/didadadida93/t5apy.git`\n2. change directory to t5apy. \n`(venv)$ cd t5apy`\n3. install it \n`(venv)$ pip install .`\n\n# Usage\n\nBased on [[official thread](https://forum.kingdoms.com/index.php?thread/4099-api-for-external-tools/)] first we need API key. \nAfter that we can request public map data.\n```python\n>>> import t5apy\n>>>\n>>> api_key = t5apy.request_api_key(\n... email='[email protected]',\n... site_name='your-tools',\n... site_url='https://example.com',\n... public='false',\n... gameworld='com1'\n... )\n>>>\n>>> # once we get api key, we can request public map data\n... map_data = t5apy.request_map_data(\n... private_api_key=api_key['privateApiKey'],\n... gameworld='com1'\n... )\n>>> map_data.keys()\ndict_keys(['gameworld', 'players', 'kingdoms', 'map'])\n>>>\n```\n"
},
{
"alpha_fraction": 0.5294825434684753,
"alphanum_fraction": 0.5367027521133423,
"avg_line_length": 22.08333396911621,
"blob_id": "e327c5047f141963224847bc965af18565a81656",
"content_id": "7872aa25822780dc85b18274ccca5b0c64bfded7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 831,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 36,
"path": "/t5apy/urllib_parse.py",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "\"\"\" Module `urllib_parse.py` only imported when `t5apy` being used on\npython 2. If this module is imported with python 3, it will raise\n`AttributeError`.\n\"\"\"\n\nimport urllib\n\n\ndef urlencode(params, safe=''):\n \"\"\" `urlencode` try to substitute missing `urllib.parse.urlencode`\n from python 3 if `t5apy` running on python 2.\n\n ---\n\n Parameters:\n params: `dict` data that want to be encoded into URL query\n string.\n safe: `string` string that didn't want to be encoded.\n\n return: `string`\n\n ---\n\n Usage:\n >>> urlencode({'a':'a', 'b':'b', 'c':'c'})\n 'a=a&b=b&c=c'\n \"\"\"\n \n return '&'.join(\n [\n '='.join(\n [urllib.quote_plus(k, safe), urllib.quote_plus(v, safe)]\n )\n for k, v in params.items()\n ]\n )\n"
},
{
"alpha_fraction": 0.5255449414253235,
"alphanum_fraction": 0.5316757559776306,
"avg_line_length": 26.961904525756836,
"blob_id": "eeee854ba7591f998cdec63d6ad29e660f35cf61",
"content_id": "6e6d382bf95b95cdcfd200394e830a2a35e1283e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2936,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 105,
"path": "/t5apy/__init__.py",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "__name__ = 't5apy'\n__version__ = '0.0.1'\n__author__ = 'didadadida93'\n__author_email__ = '[email protected]'\n__description__ = 'Request public map data from Travian 5 API easily'\n__license__ = 'MIT'\n\n\nimport json\n\nfrom . import http\nfrom .fixtures import BASE_URL\n\n\ndef request_api_key(email, site_name, site_url, public, gameworld):\n \"\"\" Request API key to Travian 5 API endpoints.\n Based on official thread, it need to get API key first before\n request public map data from Travian 5 API endpoints.\n\n ---\n\n Parameters:\n email: `string` valid email.\n site_name: `string` name of the tool.\n site_url: `string` url of the tool - needs to be a valid url.\n public: `string` it is either 'true' or 'false'.\n gameworld: `string` which gameworld that you want to request\n the api key.\n\n return: `dict`\n\n ---\n\n Usage:\n >>> import t5apy\n >>> api_key = t5apy.request_api_key(\n ... email='[email protected]',\n ... site_name='your-tools',\n ... site_url='https://example.com',\n ... public='false',\n ... gameworld='com1',\n ... )\n >>> api_key\n {'privateApiKey': '...', 'publicSiteKey': '...'}\n \"\"\"\n\n params = {\n 'action': 'requestApiKey',\n 'email': email,\n 'siteName': site_name,\n 'siteUrl': site_url,\n 'public': public,\n }\n\n raw_response = http.request(\n method='GET',\n url=BASE_URL % gameworld,\n params=params,\n )\n\n return json.loads(raw_response)['response']\n\n\ndef request_map_data(private_api_key, gameworld):\n \"\"\" Request public map data from Travian 5 API endpoints.\n Before use this function, make sure you have private api key from\n Travian 5. You can use :func:`request_api_key` for get the private\n api key.\n\n ---\n\n Parameters:\n private_api_key: `string` private api key from Travian 5.\n This private api key should be related\n to gameworld.\n gameworld: `string` which gameworld that you want to request\n public map data. This gameworld should be related\n with private api key.\n\n return: `dict`\n\n ---\n\n Usage:\n >>> # using api_key from t5apy.request_api_key() above\n ... r = t5apy.request_map_data(\n ... private_api_key=api_key['privateApiKey'],\n ... gameworld='com1',\n ... )\n >>> r.keys()\n dict_keys(['gameworld', 'players', 'kingdoms', 'map'])\n \"\"\"\n\n params = {\n 'action': 'getMapData',\n 'privateApiKey': private_api_key,\n }\n\n raw_response = http.request(\n method='GET',\n url=BASE_URL % gameworld,\n params=params,\n )\n\n return json.loads(raw_response)['response']\n"
},
{
"alpha_fraction": 0.5304396748542786,
"alphanum_fraction": 0.5349492430686951,
"avg_line_length": 27.612903594970703,
"blob_id": "374df6f8bf14dbde250dae2ce3b2d778561802d6",
"content_id": "2d253e8a8ec3e3b781fd122f88348ad7041d9bb3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1774,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 62,
"path": "/tests/test_t5apy.py",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "from t5apy import request_api_key\nfrom t5apy import request_map_data\nfrom t5apy.fixtures import BASE_URL\n\nimport unittest\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\n\nclass Testt5apy(unittest.TestCase):\n\n @patch('t5apy.http')\n @patch('t5apy.json')\n def testing_request_api_key(self, mock_json, mock_http):\n mock_json.loads.return_value = {'response': 'mocked'}\n r = request_api_key(\n email='[email protected]',\n site_name='unittest',\n site_url='https://example.com',\n public='false',\n gameworld='test',\n )\n\n mock_http.request.assert_called_with(\n method='GET',\n url=BASE_URL % 'test',\n params={\n 'action': 'requestApiKey',\n 'email': '[email protected]',\n 'siteName': 'unittest',\n 'siteUrl': 'https://example.com',\n 'public': 'false',\n },\n )\n mock_json.loads.assert_called_with(mock_http.request())\n self.assertEqual(r, 'mocked')\n\n @patch('t5apy.http')\n @patch('t5apy.json')\n def testing_request_map_data(self, mock_json, mock_http):\n mock_json.loads.return_value = {'response': 'mocked'}\n r = request_map_data(\n private_api_key='private api key',\n gameworld='test',\n )\n\n mock_http.request.assert_called_with(\n method='GET',\n url=BASE_URL % 'test',\n params={\n 'action': 'getMapData',\n 'privateApiKey': 'private api key',\n },\n )\n mock_json.loads.assert_called_with(mock_http.request())\n self.assertEqual(r, 'mocked')\n\n\nif __name__ == '__main__':\n unittest.run()\n"
},
{
"alpha_fraction": 0.5487214922904968,
"alphanum_fraction": 0.5556323528289795,
"avg_line_length": 25.309091567993164,
"blob_id": "b9809287e243ca0852f812acc6475cd8564c3a88",
"content_id": "e5bce8ba1f5455b73ffd7ba89be8607dfa435c4b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1447,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 55,
"path": "/tests/test_http.py",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "import unittest\nimport collections\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\nfrom t5apy import __name__\nfrom t5apy import __version__\nfrom t5apy.http import request\n\n\nclass TestHTTP(unittest.TestCase):\n\n def setUp(self):\n self.headers = {\n 'User-Agent': 'Python-%s/%s' % (__name__, __version__),\n }\n self.url = 'https://httpbin.org/get'\n\n @patch('t5apy.http.urllib_request')\n def test_request(self, mock_request):\n mock_request.urlopen().__enter__().read.return_value = 'mocked'\n\n r = request('GET', self.url)\n\n mock_request.Request.assert_called_with(\n url=self.url,\n method='GET',\n headers=self.headers,\n )\n mock_request.urlopen.assert_called_with(\n mock_request.Request(),\n timeout=60.0,\n )\n self.assertEqual(r, 'mocked')\n\n # testing again with params\n params = collections.OrderedDict([('a', 'a'), ('b', 'b'), ('c', 'c')])\n r = request('GET', self.url, params)\n\n mock_request.Request.assert_called_with(\n url=self.url + '?a=a&b=b&c=c',\n method='GET',\n headers=self.headers\n )\n mock_request.urlopen.assert_called_with(\n mock_request.Request(),\n timeout=60.0,\n )\n self.assertEqual(r, 'mocked')\n\n\nif __name__ == '__main__':\n unittest.run()\n"
},
{
"alpha_fraction": 0.6089850068092346,
"alphanum_fraction": 0.6272878646850586,
"avg_line_length": 21.259260177612305,
"blob_id": "8451e04c794ae9b498e4d7da79986218488fbf1d",
"content_id": "cfcfee8315a6325bda439c803510187f97e80db8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 601,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 27,
"path": "/setup.py",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "import setuptools\n\nimport t5apy\n\n\nwith open('README.md', 'r') as f:\n long_description = f.read()\n\n\nsetuptools.setup(\n name=t5apy.__name__,\n version=t5apy.__version__,\n description=t5apy.__description__,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/didadadida93/t5apy',\n author=t5apy.__author__,\n author_email=t5apy.__author_email__,\n packages=['t5apy'],\n license=t5apy.__license__,\n install_requires=[\n 'mock',\n ]\n classifiers=[\n 'License :: OIS Approved :: MIT License',\n ],\n)\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.686274528503418,
"avg_line_length": 9.199999809265137,
"blob_id": "a9a12a4ebfbc16afb9f492cca8edcab4ef5f4b17",
"content_id": "a9d612bd2f2ca99c6a4923410137607b25376461",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 51,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 5,
"path": "/.coveragerc",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "[run]\nsource = t5apy\n\n[report]\nshow_missing = True\n"
},
{
"alpha_fraction": 0.5900552272796631,
"alphanum_fraction": 0.5966851115226746,
"avg_line_length": 22.205127716064453,
"blob_id": "5f9606ba5f40a714056a2f3bf91598dd60f009f3",
"content_id": "35bcbb108f3f5d9a7da9b68ffe86120604320080",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 905,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 39,
"path": "/t5apy/http.py",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "\"\"\" Module `http.py` is for `t5apy` make HTTP request to Travian 5 API\nend points.\n\"\"\"\n\ntry:\n import urllib.request as urllib_request\n import urllib.parse as urllib_parse\nexcept ImportError:\n import urllib2 as urllib_request\n from . import urllib_parse\n\nfrom . import __version__\nfrom . import __name__\n\n\ndef request(method, url, params={}):\n \"\"\" Make HTTP request to URL.\n\n Parameters:\n method: `string` HTTP method.\n url: `string` URL.\n params: `dict` passing data to URL's query strings.\n \"\"\"\n\n headers = {\n 'User-Agent': 'Python-%s/%s' % (__name__, __version__),\n }\n\n if params:\n url = '?'.join([url, urllib_parse.urlencode(params, safe='@ : /')])\n\n req = urllib_request.Request(\n url=url,\n method=method,\n headers=headers,\n )\n\n with urllib_request.urlopen(req, timeout=60.0) as r:\n return r.read()\n"
},
{
"alpha_fraction": 0.6859503984451294,
"alphanum_fraction": 0.6859503984451294,
"avg_line_length": 19.16666603088379,
"blob_id": "87a7185732d133db4f3e03a337d2c4ac8882e1d5",
"content_id": "471f9e640ffd0ee0961752f4a50bc1c0b5e07025",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 6,
"path": "/t5apy/fixtures.py",
"repo_name": "didadadida93/t5apy",
"src_encoding": "UTF-8",
"text": "import os\n\n\nBASE_URL = 'https://%s.kingdoms.com/api/external.php'\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n"
}
] | 9 |
athola/PythonTheHardWay
|
https://github.com/athola/PythonTheHardWay
|
591fc67f6e0665b1c96d4b4cde05a9193677658d
|
a0b48c880b746a9a5c74b05347c2cfea7bb5c682
|
d9d397dbcfafd770ea8e48ac4e6619454651ee07
|
refs/heads/master
| 2021-01-21T11:30:20.824033 | 2020-01-16T06:07:32 | 2020-01-16T06:07:32 | 91,742,902 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5058479309082031,
"alphanum_fraction": 0.5263158082962036,
"avg_line_length": 21.799999237060547,
"blob_id": "dc7aab0b4b6085900cd4d43168a4c1802965ca88",
"content_id": "21b386788d84c68f7300e6c9a830f308ddd737ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 15,
"path": "/CommaCode.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "def separateWithComma(listItem):\n newStr = ''\n for item in listItem:\n if item == listItem[-1]:\n newStr += ('and ' + str(item))\n else:\n newStr += (str(item) + ', ')\n return newStr\n\ndef main():\n theList = [2, '4', 6, 8, 10, True]\n theStr = separateWithComma(theList)\n print(theStr)\n\nmain()\n"
},
{
"alpha_fraction": 0.5723905563354492,
"alphanum_fraction": 0.5757575631141663,
"avg_line_length": 28.13793182373047,
"blob_id": "9f1a4349a27a691037a94b6c06602bcc3a503ebb",
"content_id": "a93f4213b8ed5f0284a5884c8eda9049c2d41692",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 891,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 29,
"path": "/RegexStrip.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import re, sys\n\ndef stripString(string, char=\"\"):\n newStr = \"\"\n if (char==\"\"):\n frontStrippedStr = \"\"\n stripFrontSpaceRegex = re.compile(r'^\\s+')\n frontStrippedStr = stripFrontSpaceRegex.sub('', string)\n stripEndSpaceRegex = re.compile(r'\\s+$')\n newStr = stripEndSpaceRegex.sub('', frontStrippedStr)\n else:\n stripCharRegex = re.compile(char)\n newStr = stripCharRegex.sub('', string)\n return newStr\n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n if (useCommandLine):\n string = sys.argv[1]\n char = sys.argv[2]\n else:\n string = input(\"Please enter a string: \")\n char = input(\"Please enter a character to strip from the string: \")\n newString = stripString(string, char)\n print(newString)\n\nmain()\n \n \n"
},
{
"alpha_fraction": 0.5888158082962036,
"alphanum_fraction": 0.6019737124443054,
"avg_line_length": 15,
"blob_id": "c9e002e67d4ea066b8c11c18a85ebf08777ab5ad",
"content_id": "8b011561e5afa0363d4e8a678dd9a8ae669bd7f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 19,
"path": "/SpamSalutations.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import sys\n\nprint('Please enter a value for spam: ')\n\nuseCommandLine = False\nif (len(sys.argv) > 1):\n useCommandLine = True\n\nif (useCommandLine):\n spam = sys.argv[1]\nelse:\n spam = input()\n \nif spam == '1':\n print('Hello')\nelif spam == '2':\n print('Howdy')\nelse:\n print('Greetings!')\n"
},
{
"alpha_fraction": 0.5708289742469788,
"alphanum_fraction": 0.5739769339561462,
"avg_line_length": 26.22857093811035,
"blob_id": "2e89460a3657dd5bd6bb46368bfce09eef74acb2",
"content_id": "9ad0b316bc0d94a5d4aaee9507a9695a219e3a3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 953,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 35,
"path": "/CommandLineEmailer.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import re, sys\nfrom selenium import webdriver\n\ndef enterArgument(argument):\n while True:\n print(argument.txt)\n if (argument.flg):\n entry = sys.argv[argument.num]\n print(entry)\n else:\n entry = input()\n if re.match(argument.reg, entry):\n return (entry)\n else:\n argument.flg = False\n \nclass Argument:\n def __init__(self, text, flag, number, regex_string):\n self.txt = text\n self.flg = flag\n self.num = number\n self.reg = regex_string\n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n\n email = enterArgument(Argument('Enter your email: ', useCommandLine, 1, '(.*)@(.*)'))\n searchText = enterArgument(Argument('Enter text to search for: ', useCommandLine, 1, '.*'))\n browser = webdriver.Chrome()\n broswer.get(\"http://gmail.com\")\n print(type(browser))\n\nmain()\n"
},
{
"alpha_fraction": 0.5941828489303589,
"alphanum_fraction": 0.6080332398414612,
"avg_line_length": 30.39130401611328,
"blob_id": "8799702e3359ec0fda307227718ee467731e6c36",
"content_id": "2e3abfd9cc722a42a7f3015b60c18c7f1d4c7654",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 722,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 23,
"path": "/FantasyGameInventory.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "def displayInventory(inventory):\n print('Inventory:')\n inventorySum = 0\n for key in inventory.keys():\n itemCount = inventory[key]\n print(str(itemCount) + ' ' + key)\n inventorySum += itemCount\n print('Total number of items: ' + str(inventorySum))\n\ndef addToInventory(inventory, addedItems):\n for item in addedItems:\n if item in inventory.keys():\n inventory[item] += 1\n else:\n inventory.setdefault(item, 1) \n\ndef main():\n stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}\n dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\n addToInventory(stuff, dragonLoot)\n displayInventory(stuff)\n\nmain()\n"
},
{
"alpha_fraction": 0.6098238825798035,
"alphanum_fraction": 0.6126042604446411,
"avg_line_length": 32.20000076293945,
"blob_id": "685cf4a728fa9e857126909414ac669606cac712",
"content_id": "20c1fbc1fe8a37899fd165f847e58f85a647391a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2158,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 65,
"path": "/RegexSearch.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import os, re, sys\n\ndef checkFolderPath(folderPath):\n return (os.path.exists(folderPath))\n \ndef checkIfTxt(fileName):\n fileSplit = fileName.split('.')\n return (fileSplit[-1] == 'txt')\n\ndef searchFile(fullFilePath):\n fileContent = ''\n if (os.path.isfile(fullFilePath)):\n theFile = open(fullFilePath, 'r')\n fileContent = theFile.read()\n return fileContent\n\ndef findNumberOf(searchPhrase, fileContent):\n searchRegex = re.compile(searchPhrase, re.IGNORECASE)\n contentList = searchRegex.findall(fileContent)\n if (len(contentList) > 0):\n print(searchPhrase + ' found!')\n return len(contentList)\n else:\n return 0\n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n useDefaultValid = False\n usedDefault = True\n while (not useDefaultValid):\n if (useCommandLine and usedDefault):\n useDefault = sys.argv[1]\n else:\n useDefault = input('Use default filepath? (y/n): \\n')\n if (useDefault.lower() == 'y'):\n folderPath = str(os.path.expanduser('~/Documents'))\n useDefaultValid = True\n elif (useDefault.lower() == 'n'):\n useDefaultValid = True\n else:\n print('Could not recognize input. Please enter \\neither y for yes, or n for no\\n')\n usedDefault = False\n if (useDefaultValid):\n folderPathValid = True\n else:\n folderPathValid = False\n while (not folderPathValid): \n folderPath = input('Please enter valid folder path: \\n')\n folderPathValid = checkFolderPath(folderPath)\n if (not folderPathValid):\n print('Folder path invalid!\\n')\n if (useCommandLine):\n phrase = sys.argv[2]\n else:\n phrase = input('Enter a phrase to search text files for: \\n')\n for file in os.listdir(folderPath):\n if (checkIfTxt(file)):\n fullPath = folderPath + '/' + file\n content = searchFile(fullPath)\n searchContentOccurences = findNumberOf(phrase, content)\n print(str(searchContentOccurences) + ' occurences in ' + file)\n\nmain()\n"
},
{
"alpha_fraction": 0.5374220609664917,
"alphanum_fraction": 0.5457380414009094,
"avg_line_length": 20.953489303588867,
"blob_id": "e8f250ad8c5dc59aa605adb9abc1fc9d0aa59df0",
"content_id": "321049cf72b94545d98c5001dbd8f23e65bc3045",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 962,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 43,
"path": "/Collatz.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import sys\n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n startingNumber = enterNumber(useCommandLine)\n print()\n checkIfOne(startingNumber)\n \n\ndef collatz(number):\n if number % 2 == 0:\n expression = number // 2\n print(expression)\n return (expression)\n else:\n expression = 3 * number + 1\n print(expression)\n return (expression)\n\ndef enterNumber(commandLineFlag):\n while True:\n print('Enter a number: ')\n if (commandLineFlag):\n number = sys.argv[1]\n print(number)\n else:\n number = input()\n try:\n return (int(number))\n except ValueError:\n print('That is not a valid integer')\n\ndef checkIfOne(number):\n notOne = True\n value = number\n while notOne:\n value = collatz(value)\n if value == 1:\n notOne = False\n\nmain()\n \n \n"
},
{
"alpha_fraction": 0.5681381821632385,
"alphanum_fraction": 0.5786948204040527,
"avg_line_length": 32.6129035949707,
"blob_id": "43bbee259dafb31260755b348821808268852011",
"content_id": "857989368cd0cc3e79774a562cf81317eab24536",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1042,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 31,
"path": "/TablePrinter.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "def printTable(table):\n longestStringInRow = []\n longestStringLen = 0\n for row in table:\n for item in row:\n if (len(item) > longestStringLen):\n longestStringLen = len(item)\n longestStringInRow.append(longestStringLen)\n longestStringLen = 0\n rowIndex = 0\n itemIndex = 0\n countedItems = 0\n totalItems = len(table) * len(table[0])\n while (countedItems < totalItems):\n justifyLen = longestStringInRow[rowIndex] - len(table[rowIndex][itemIndex])\n if (rowIndex < len(table) - 1):\n print((\" \" * justifyLen) + table[rowIndex][itemIndex].rjust(justifyLen) + \" \", end=\"\")\n rowIndex += 1\n else:\n print((\" \" * justifyLen) + table[rowIndex][itemIndex].rjust(justifyLen))\n rowIndex = 0\n itemIndex += 1\n countedItems += 1\n\ndef main():\n tableData = [['apples', 'oranges', 'cherries', 'banana'],\n['Alice', 'Bob', 'Carol', 'David'],\n['dogs', 'cats', 'moose', 'goose']]\n printTable(tableData)\n\nmain()\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5791738629341125,
"avg_line_length": 29.578947067260742,
"blob_id": "662317fcafb9008dfc7a019e5e9cfcbc97c47bcd",
"content_id": "9d1658fec6416c9d086f8171294203d366211c33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1162,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 38,
"path": "/DeletingUnneededFiles.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import shutil, os, sys\n\ndef findAndDeleteFiles(folderpath, size):\n for folderName, subfolders, filenames in os.walk(folderpath):\n path = \"\"\n for filepath in filenames:\n path = folderName + \"/\" + filepath\n if (os.path.getsize(path) > size):\n print(path)\n #os.unlink(path)\n\ndef enterFolderPath(commandLineFlag, prompt):\n invalidFolderPath = True\n folderPath = \"\"\n while (invalidFolderPath):\n print(prompt)\n if (commandLineFlag):\n folderPath = sys.argv[1]\n if (os.path.exists(folderPath)):\n invalidFolderPath = False\n else:\n print(\"Invalid folder path!\")\n else:\n folderPath = input()\n if os.path.exists(folderPath):\n invalidFolderPath = False\n else:\n print(\"Invalid folder path!\")\n return folderPath\n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n folderPath = enterFolderPath(useCommandLine, \"Enter a folder to search through: \")\n findAndDeleteFiles(folderPath, 1000000)\n\nmain()\n"
},
{
"alpha_fraction": 0.40549829602241516,
"alphanum_fraction": 0.5292096138000488,
"avg_line_length": 19,
"blob_id": "b6f2298181a6a93427b2e9371fa4a7aea004c958",
"content_id": "a34d783795b081d63c7893a404079a73e8b78fcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 14,
"path": "/RollHighGame.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import random\n\ntruetotal = 0\nputIn = (125)\nfor j in range(0, 50):\n total = 0\n for i in range(0, 10000):\n i = random.randint(0, 10000);\n if i == 8888:\n total += 1000000\n total -= 125\n print(total)\n truetotal += total\nprint(truetotal/50)\n\n \n \n"
},
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.5690396428108215,
"avg_line_length": 31.79032325744629,
"blob_id": "ea95334b767a2dc01e0475493742f2c2807206b4",
"content_id": "2fb01001af3f6a4ebbd3ccb819e87bb1f426c446",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2093,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 62,
"path": "/SelectiveCopy.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "# python3\n# SelectiveCopy.py\n\nimport shutil, os, sys\n \ndef findAndMoveFiles(folderpath, extension, newfolderpath):\n for folderName, subfolders, filenames in os.walk(folderpath):\n path = \"\"\n for filepath in filenames:\n file_name, file_extension = os.path.splitext(filepath)\n if file_extension == extension:\n path = folderName + \"/\" + filepath\n shutil.move(path, newfolderpath)\n\ndef enterFolderPath(commandLineFlag, prompt):\n invalidFolderPath = True\n folderPath = \"\"\n while (invalidFolderPath):\n print(prompt)\n if (commandLineFlag):\n folderPath = sys.argv[1]\n if (os.path.exists(folderPath)):\n invalidFolderPath = False\n else:\n print(\"Invalid folder path!\")\n else:\n folderPath = input()\n if os.path.exists(folderPath):\n invalidFolderPath = False\n else:\n print(\"Invalid folder path!\")\n return folderPath\n\ndef enterExtension(commandLineFlag):\n invalidExtension = True\n extension = \"\"\n while (invalidExtension):\n print(\"Enter an extension: \")\n if (commandLineFlag):\n extension = sys.argv[2]\n if (extension[0] == '.' and len(extension) > 1):\n invalidExtension = False\n else:\n print(\"Invalid extension!\")\n else:\n extension = input()\n if (extension[0] == '.' and len(extension) > 1):\n invalidExtension = False\n else:\n print(\"Invalid extension!\")\n return extension\n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n folderPath = enterFolderPath(useCommandLine, \"Enter a folder to search through: \")\n extension = enterExtension(useCommandLine)\n newFolderPath = enterFolderPath(useCommandLine, \"Enter a folder to move to: \")\n findAndMoveFiles(folderPath, extension, newFolderPath)\n\nmain()\n \n \n \n\n\n \n \n \n"
},
{
"alpha_fraction": 0.6481069326400757,
"alphanum_fraction": 0.6503340601921082,
"avg_line_length": 31.071428298950195,
"blob_id": "5ef0f65ea0d23299ec04b0fee5dab2c21cd68b50",
"content_id": "c82c6de29cb1cd5a4651fa5536eb5a45603a5a4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1347,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 42,
"path": "/StrongPasswordDetection.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import re, sys\n\ndef getPassword(commandLineFlag):\n if (commandLineFlag):\n password = sys.argv[1]\n else:\n password = input(\"Please enter a valid password: \")\n return password\n\ndef validatePassword(password):\n validPassword = False\n uppercasePasswordRegex = re.compile(r'[A-Z]')\n lowercasePasswordRegex = re.compile(r'[a-z]')\n digitPasswordRegex = re.compile(r'\\d')\n eightCharsPasswordRegex = re.compile(r'(.*){8,}')\n if (uppercasePasswordRegex.search(password) != None and\n lowercasePasswordRegex.search(password) != None and\n digitPasswordRegex.search(password) != None and\n eightCharsPasswordRegex != None):\n\n validPassword = True\n return validPassword\n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n print(\"A strong password has at least eight characters,\")\n print(\"contains both uppercase and lowercase characters,\")\n print(\"and has at least one digit.\")\n passwordValidated = False\n while (not passwordValidated):\n thePw = getPassword(useCommandLine)\n if (validatePassword(thePw)):\n passwordValidated = True\n print(\"Valid password!\")\n else:\n print(\"Password invalid! Please try again.\")\n print(\"\")\n useCommandLine = False\n\nmain()\n"
},
{
"alpha_fraction": 0.6346153616905212,
"alphanum_fraction": 0.6431623697280884,
"avg_line_length": 21.238094329833984,
"blob_id": "aeee422d226c4cb412e7ad8f2d52674715243b2e",
"content_id": "e3aaf1628c1adb70ed04702b6e7324d74ae2d4d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 21,
"path": "/HelloWorld.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import sys\n\nuseCommandLine = False\nif (len(sys.argv) > 1):\n useCommandLine = True\n\nprint('Hello world!')\nprint('What is your name?')\nif (useCommandLine):\n myName = sys.argv[1]\nelse:\n myName = input()\nprint('It is good to meet you, ' + myName)\nprint('The length of your name is:')\nprint(len(myName))\nprint('What is your age?')\nif (useCommandLine):\n myName = sys.argv[2]\nelse:\n myAge = input()\nprint('You will be ' + str(int(myAge) + 1) + ' in a year.')\n\n"
},
{
"alpha_fraction": 0.628558337688446,
"alphanum_fraction": 0.6303948760032654,
"avg_line_length": 33.935482025146484,
"blob_id": "d7bbecd6436d9a860173ac761c3a53cd3abda5b6",
"content_id": "c63315cab0f82d2517db5c2a767389a727072878",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2178,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 62,
"path": "/MadLibs.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import os, re, string\n\ndef getFilePath():\n return str(os.path.expanduser('~/Documents/madlibs.txt'))\n\ndef stripChars(string, characters):\n exclude = set(characters)\n return ''.join(ch for ch in string if ch not in exclude)\n\ndef findWordTypes(content):\n acceptedTypes = ['ADJECTIVE', 'NOUN', 'ADVERB', 'VERB']\n wordTypesList = []\n\n wordTypes = content.split()\n for wordType in wordTypes:\n wordType = wordType.upper()\n wordTypeStrip = string.punctuation + \"\\n\"\n wordType = stripChars(wordType, wordTypeStrip)\n if (wordType in acceptedTypes):\n if (wordType in wordTypesList):\n count = 0\n for words in wordTypesList:\n words = stripChars(words, string.digits)\n if (wordType == words):\n count+=1\n wordTypesList.append(wordType + str(count))\n else:\n wordTypesList.append(wordType)\n return wordTypesList \n\ndef createWordDictionary(wordTypes, words):\n wordDictionary = dict(zip(wordTypes, words))\n return wordDictionary\n \ndef replaceWords(fileContent, wordDictionary):\n for wordType in wordDictionary.keys():\n strippedWordType = stripChars(wordType, string.digits)\n fileContentRegex = re.compile(strippedWordType)\n fileContent = fileContentRegex.sub(wordDictionary[wordType], fileContent, count=1)\n return fileContent \n\ndef main():\n theFilePath = getFilePath()\n theFile = open(theFilePath, 'r')\n fileContent = theFile.read()\n wordTypesList = findWordTypes(fileContent)\n wordList = []\n word = ''\n for wordType in wordTypesList:\n wordType = stripChars(wordType, string.digits)\n if (wordType[0] == 'a'):\n word = input('Enter an %s:\\n' % (wordType))\n else:\n word = input('Enter a %s:\\n' % (wordType))\n wordList.append(word)\n wordDictionary = createWordDictionary(wordTypesList, wordList)\n newContent = replaceWords(fileContent, wordDictionary)\n theFile = open(theFilePath, 'w')\n theFile.write(newContent)\n theFile.close()\n print(newContent)\nmain()\n \n \n\n\n"
},
{
"alpha_fraction": 0.6565144062042236,
"alphanum_fraction": 0.6683587431907654,
"avg_line_length": 33.764705657958984,
"blob_id": "b0351dcfd14a6f5d8c6da13efa911e1fe9932eb2",
"content_id": "e9af72b0da0f16ca41b88f1030196eaee3a1e7d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1182,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 34,
"path": "/multiplicationTable.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import sys, openpyxl\n\ndef getInteger():\n if (len(sys.argv) > 1):\n tableSize = sys.argv[1]\n else:\n print('Please enter a positive integer!')\n tableSize = input()\n while not (tableSize.isdigit):\n print('Please enter a positive integer!')\n tableSize = input()\n print(tableSize)\n return int(tableSize)\n\ndef createWorkbook():\n multiplicationWorkbook = openpyxl.Workbook()\n activeSheet = multiplicationWorkbook.active\n activeSheet.title = 'Multiplication Table'\n return multiplicationWorkbook\n\ndef addValuesToSheet(workbook, tableSize):\n sheet = workbook['Multiplication Table']\n for i in range(2, tableSize + 2):\n sheet.cell(row=i, column=1).value = i-1\n sheet.cell(row=i, column=1).font = openpyxl.styles.Font(bold=True)\n sheet.cell(row=1, column=i).value = i-1\n sheet.cell(row=1, column=i).font = openpyxl.styles.Font(bold=True)\n for j in range(1, tableSize+1):\n sheet.cell(row=i, column=j+1).value = (i-1)*(j)\n workbook.save('multiplicationTableSize{}.xlsx'.format(tableSize))\n\nmultiSize = getInteger()\nmultiWB = createWorkbook()\naddValuesToSheet(multiWB, multiSize)\n"
},
{
"alpha_fraction": 0.57485032081604,
"alphanum_fraction": 0.5798403024673462,
"avg_line_length": 30.809524536132812,
"blob_id": "47374e3691a6039c42145b169b230f9209457e96",
"content_id": "81f6ff5d215c58237d1bc397210a83cc45d8e282",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2004,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 63,
"path": "/FillingGaps.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import shutil, os, sys\n\ndef findGap(folderpath, prefix):\n for folderName, subfolders, filenames in os.walk(folderpath):\n path = \"\"\n temp = 0\n prefixSize = len(prefix)\n for filepath in filenames:\n filename, file_extension = os.path.split(filepath)\n if len(filename) > prefixSize:\n if filename[0:prefixSize] == prefix:\n path = folderName + \"/\" + filepath\n if (filename[-1] > temp + 1):\n addFile(filename[0:-1], file_extension, folderName, temp)\n temp += 1\n\ndef addFile(fileprefix, extension, folder, number):\n filename = fileprefix + str(number) + extension\n filepath = os.path.join(folder, filename)\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n else:\n print(\"File not added! Please check for an issue.\")\n \n\ndef enterFolderPath(commandLineFlag, prompt):\n invalidFolderPath = True\n folderPath = \"\"\n while (invalidFolderPath):\n print(prompt)\n if (commandLineFlag):\n folderPath = sys.argv[1]\n if (os.path.exists(folderPath)):\n invalidFolderPath = False\n else:\n print(\"Invalid folder path!\")\n else:\n folderPath = input()\n if os.path.exists(folderPath):\n invalidFolderPath = False\n else:\n print(\"Invalid folder path!\")\n return folderPath\n\ndef enterPrefix(commandLineFlag, prompt):\n prefix = \"\"\n print(prompt)\n if (commandLineFlag):\n prefix = sys.argv[2]\n else:\n prefix = input()\n return prefix\n \n\ndef main():\n useCommandLine = False\n if (len(sys.argv) > 1):\n useCommandLine = True\n folderPath = enterFolderPath(useCommandLine, \"Enter a folder to search through: \")\n thePrefix = enterFolderPath(useCommandLine, \"Enter a file prefix for gap location: \")\n findGap(folderPath, thePrefix)\n\nmain()\n"
},
{
"alpha_fraction": 0.5831533670425415,
"alphanum_fraction": 0.5950323939323425,
"avg_line_length": 24.72222137451172,
"blob_id": "5f3776a02057734382d5d893192bab962d2e1a47",
"content_id": "eb8e4a3466451b1d98a71d54e125b837d2179dfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 36,
"path": "/GuessTheNumber.py",
"repo_name": "athola/PythonTheHardWay",
"src_encoding": "UTF-8",
"text": "import random\n\ndef main():\n print('I am thinking of a number between 1 and 20')\n guessingNumber()\n \ndef inputGuess():\n while True:\n print('Take a guess.')\n guess = input()\n if guess.isdigit():\n if int(guess) <= 20 and int(guess) >= 1:\n return int(guess)\n\ndef checkGuess(guess, theNumber):\n if guess < theNumber:\n print('Your guess is too low')\n return True\n elif guess > theNumber:\n print('Your guess is too high')\n return True\n else:\n return False\n\ndef guessingNumber():\n number = random.randint(1, 20)\n incorrect = True\n numberOfGuesses = 0\n while incorrect:\n yourGuess = inputGuess()\n guessIncorrect = checkGuess(yourGuess, number)\n incorrect = guessIncorrect\n numberOfGuesses += 1\n print('Good job! You guessed my number in ' + str(numberOfGuesses) + ' guesses!')\n \nmain()\n"
}
] | 17 |
HassankSalim/Learning_Tensorflow
|
https://github.com/HassankSalim/Learning_Tensorflow
|
d33aa5253b4c29b23a4f1d900400959e75acf19c
|
29710f92c398572344ca477448e5bcdc8aa3086a
|
169668c9db0cf28d3cca0273731d572d714dde68
|
refs/heads/master
| 2021-01-20T09:00:44.531410 | 2017-08-08T03:15:47 | 2017-08-08T03:15:47 | 90,211,175 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.64361971616745,
"alphanum_fraction": 0.6685307621955872,
"avg_line_length": 33.50877380371094,
"blob_id": "4d40f7f67e7d5fc22630adfd24c658d67648abf6",
"content_id": "1f343406f8aa135a7d6669023e582b2a1a8e3d1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1967,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 57,
"path": "/nerual_net.py",
"repo_name": "HassankSalim/Learning_Tensorflow",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot = True)\n\n\ninput_layer = 784\nhidden_layer = 164\noutput_layer = 10\nlearning_rate = 0.1\nepochs = 25\nbatch_size = 100\n\nx = tf.placeholder(tf.float32, [None, input_layer])\ny = tf.placeholder(tf.float32, [None, output_layer])\n\nWeights = {\n 'w1' : tf.Variable(tf.random_normal([input_layer, hidden_layer])),\n 'w2' : tf.Variable(tf.random_normal([hidden_layer, output_layer]))\n}\nbias = {\n 'b1' : tf.Variable(tf.random_normal([hidden_layer])),\n 'b2' : tf.Variable(tf.random_normal([output_layer]))\n}\n\nlayer_1 = tf.add(tf.matmul(x, Weights['w1']), bias['b1'])\nlayer_1_activated = tf.nn.sigmoid(layer_1)\nlayer_output = tf.add(tf.matmul(layer_1_activated, Weights['w2']), bias['b2'])\nlayer_output_activated = tf.nn.sigmoid(layer_output)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=layer_output_activated, labels=y))\nopt = tf.train.AdamOptimizer().minimize(cost)\ninit = tf.global_variables_initializer()\n\nprint 'Starting traing'\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(epochs):\n iteration = int(mnist.train.num_examples/batch_size)\n average_cost = 0.0\n for j in range(iteration):\n x_batch, y_batch = mnist.train.next_batch(batch_size)\n _, mCost = sess.run([opt, cost], feed_dict={x:x_batch, y:y_batch})\n\n average_cost += mCost\n average_cost /= iteration\n print('Iteration %02d'%i, 'Cost {:9}'.format(average_cost))\n\n saver = tf.train.Saver()\n save_path = saver.save(sess, \"./model.ckpt\")\n print('Traing Complete and model saved')\n\n correct_pred = tf.equal(tf.argmax(layer_output_activated, 1), tf.argmax(y, 1))\n acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n print \"Accuracy: {:.9f}\".format(acc.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]}))\n"
},
{
"alpha_fraction": 0.6095617413520813,
"alphanum_fraction": 0.6407702565193176,
"avg_line_length": 30.375,
"blob_id": "dc71a0fcd9a1d1f988b878802520e27ceee2f025",
"content_id": "79abab058da662445f38a61f0d6bec8f3e32a808",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1506,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 48,
"path": "/log_reg.py",
"repo_name": "HassankSalim/Learning_Tensorflow",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\nmnist = input_data.read_data_sets('/home/hassan/python/MNIST_data/', one_hot=True)\n\nlearning_rate = 0.0001\nepochs = 35\nbatch_size = 100\ndisplay_time = 1\n\nx = tf.placeholder(tf.float32, [None, 784])\ny = tf.placeholder(tf.float32, [None, 10])\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\npred = tf.nn.softmax(tf.add(tf.matmul(x, W), b))\n\ncost = tf.reduce_mean(tf.reduce_sum(-y*tf.log(pred), reduction_indices=1))\nopt = tf.train.AdamOptimizer().minimize(cost)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for epoche in range(epochs):\n avg_cost = 0\n total_batch = int(mnist.train.num_examples/batch_size)\n for j in range(total_batch):\n x_batch, y_batch = mnist.train.next_batch(batch_size)\n _, c = sess.run([opt, cost], feed_dict={x:x_batch, y:y_batch})\n\n avg_cost += c\n\n avg_cost /= total_batch\n\n if not (epoche+1) % display_time:\n print 'Epoch: %04d '%(epoche+1), 'Cost {:.9f}'.format(avg_cost)\n print('Optimization Finished')\n\n saver = tf.train.Saver()\n save_path = saver.save(sess, \"./model.ckpt\")\n print('Saved')\n\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n print \"Accuracy: {:.9f}\".format(accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]}))\n"
}
] | 2 |
rogojagad/kompresi-data
|
https://github.com/rogojagad/kompresi-data
|
f70b3a947fabc9154973d7a8c1a6deb49ffe0e6b
|
caf82d9068ededb23e0e2af6ce4df0fbede5c86e
|
9b144edec3c73ce3b468523494ee934bd6fdc86d
|
refs/heads/master
| 2022-05-23T07:26:34.986324 | 2020-05-02T12:54:09 | 2020-05-02T12:54:09 | 260,686,612 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5962145328521729,
"alphanum_fraction": 0.5962145328521729,
"avg_line_length": 17.705883026123047,
"blob_id": "b623518b7f8fd84118decef80826229e64cc7be2",
"content_id": "bf9b646cb76ec6ddff938d3c31ff1a625888dee3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 317,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 17,
"path": "/shannon-fano/shannon-fano.py",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "from encoder import Encoder\nfrom decoder import Decoder\n\nclass ShannonFano:\n def __init__(self):\n self.encoder = Encoder()\n self.decoder = Decoder()\n\n def run(self):\n self.encoder.runEncode()\n\n self.decoder.runDecode()\n\nif __name__ == \"__main__\":\n sf = ShannonFano()\n\n sf.run()"
},
{
"alpha_fraction": 0.5759162306785583,
"alphanum_fraction": 0.5811518430709839,
"avg_line_length": 18.85714340209961,
"blob_id": "da6d1d929e70f9d49555b8f90aebaeb6e2c89e73",
"content_id": "6674baaaccc7b7822a8a6a71492fd0f7df21649d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1528,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 77,
"path": "/tunstall/main.py",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "from collections import Counter\nimport json\n\ndef getCountChar(text):\n data = dict(Counter(text))\n\n return data\n\ndef getCharProb(charCount, strLen):\n charProb = dict()\n\n for char, count in charCount.items():\n charProb[char] = count / strLen\n\n return charProb\n\ndef getCharProbList(charProb):\n probLst = list()\n charLst = list()\n\n for char, prob in charProb.items():\n charLst.append(char)\n probLst.append(prob)\n\n return charLst, probLst\n\ndef tunstall(alphabet, dist, n):\n size = len(alphabet)\n iterations = (2 ** n - size) // (size - 1)\n \n t = []\n for i, s in enumerate(alphabet):\n t.append( [s, dist[i]] )\n \n for _ in range(iterations):\n d = max(t, key=lambda p:p[1])\n ind = t.index(d)\n seq, seqProb = d\n \n for i, s in enumerate(alphabet):\n t.append( [seq + s, seqProb * dist[i]] )\n del t[ind]\n \n for i, entry in enumerate(t):\n entry[1] = '{:03b}'.format(i)\n \n return t\n\ndef toJson(result):\n temp = dict()\n\n for data in result:\n temp[data[0]] = data[1]\n\n return temp\n\nif __name__==\"__main__\":\n text = input()\n\n charCount = getCountChar(text)\n \n charProb = getCharProb(charCount, len(text))\n\n alphabet, prob = getCharProbList(charProb)\n\n print(alphabet)\n\n print(prob)\n\n n = len(alphabet)\n\n result = tunstall(alphabet, prob, n)\n\n resultInJson = toJson(result)\n\n with open(\"result.json\", \"w\") as result:\n json.dump(resultInJson, result)"
},
{
"alpha_fraction": 0.5249999761581421,
"alphanum_fraction": 0.5258620977401733,
"avg_line_length": 20.10909080505371,
"blob_id": "32eb10dd689eed063e5c986b2f5bf375f5da75e3",
"content_id": "9c05bf00dd152bbe45a03fdd331113782e07d97e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 55,
"path": "/shannon-fano/decoder.py",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "# Python 3\n\nimport json\nimport time\n\ndebug = \"\"\n\nclass Decoder:\n def __init__(self):\n self.encodedText = \"\"\n\n def runDecode(self): \n with open('tree.json', 'r') as tree:\n self.tree = json.load(tree)\n\n self.readDecoded()\n\n codeList = self.encodedText.split(' ')\n\n self.decodeAndPrint(codeList)\n\n self.performanceMeasure()\n\n def readDecoded(self):\n source = open(\"output-encoded.txt\", \"r\")\n self.encodedText = source.read()\n\n def decodeAndPrint(self, codeList):\n result = \"\"\n \n for char in codeList:\n # print(char)\n alphabet = self.getAlphabet(char)\n debug = char\n\n try:\n result += alphabet\n except TypeError:\n print(debug)\n # print(e)\n\n output = open(\"output-decoded.txt\", \"w\")\n output.write(result)\n\n def getAlphabet(self, target):\n for char, code in self.tree.items():\n if target == code: \n return char\n \n def performanceMeasure(self): pass\n\nif __name__ == \"__main__\":\n dc = Decoder()\n\n dc.runDecode()"
},
{
"alpha_fraction": 0.6078914999961853,
"alphanum_fraction": 0.6097410321235657,
"avg_line_length": 27.98214340209961,
"blob_id": "01806205f2f26e5e2d8d8ed9860ae247915ce24a",
"content_id": "1ec697bdc83a6f4ab083fc765c9db78e30314731",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 1624,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 56,
"path": "/kodingan joel/Site.Master.cs",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "using System;\nusing System.Collections;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Linq;\nusing System.Web;\nusing System.Web.UI;\nusing System.Web.UI.WebControls;\n\nnamespace Adaptive_Huffman\n{\n public partial class SiteMaster : MasterPage\n {\n HuffmanTree huffmanTree = new HuffmanTree();\n protected void Page_Load(object sender, EventArgs e)\n {\n\n }\n\n protected void ButtonCompress_Click(object sender, EventArgs e)\n {\n string DataToCompress = File.ReadAllText(@\"/Input.txt\");\n \n huffmanTree.Build(DataToCompress);\n\n BitArray encoded = huffmanTree.Encode(DataToCompress);\n\n StreamWriter file = new StreamWriter(@\"/Compressed_Input.txt\");\n foreach (bool bit in encoded)\n {\n file.Write((bit ? 1 : 0));\n }\n file.Close();\n\n string decoded = huffmanTree.Decode(encoded);\n\n StreamWriter fileuncompressed = new StreamWriter(@\"/Uncompressed_Input.txt\");\n fileuncompressed.Write(decoded);\n fileuncompressed.Close();\n\n }\n\n protected void ButtonUncompress_Click(object sender, EventArgs e)\n {\n string DataToUncompress = File.ReadAllText(@\"/Compressed_Input.txt\");\n\n BitArray bitarray = new BitArray(DataToUncompress.Select(c => c == '1').ToArray());\n\n string decoded = huffmanTree.Decode(bitarray);\n\n StreamWriter file = new StreamWriter(@\"/Uncompressed_Input.txt\");\n file.Write(decoded);\n file.Close();\n }\n }\n}"
},
{
"alpha_fraction": 0.6015037298202515,
"alphanum_fraction": 0.6045997142791748,
"avg_line_length": 26.25301170349121,
"blob_id": "3901f54d2d56629634463da28629fc4a2cc647ec",
"content_id": "e5b6f23804e99714127dd96aaf6aa0a258ef8f42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2261,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 83,
"path": "/arithmetic_coding/decoder.py",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "# Python 3\nimport operator\nimport json\nimport time\nimport random\nfrom decimal import *\nfrom collections import Counter\n\nclass Decoder:\n def __init__(self):\n self.inputNum = self.readInput()\n self.charProbability = self.readProbability()\n self.srcLength = self.readSrcLength()\n self.charRange = dict()\n self.resultStr = \"\"\n\n def readSrcLength(self):\n length = open(\"source-length.txt\", \"r\")\n return int(length.read())\n\n def readProbability(self):\n with open('probability-dict.json') as data:\n return json.load(data)\n\n def readInput(self):\n source = open(\"encode-result.txt\", \"r\")\n return float(source.read())\n\n def run(self):\n self.buildRange(0,1)\n\n self.decode(self.srcLength)\n\n self.dumpResultData()\n\n def dumpResultData(self):\n output = open(\"decode-result.txt\", \"w\")\n output.write(self.resultStr)\n\n with open(\"decoder-range-dict.json\", \"w\") as tree:\n json.dump(self.charRange, tree)\n\n def decode(self, length):\n print(\"\\nMemulai Proses Decoding\\n\")\n\n startTime = time.clock()\n\n for i in range(length):\n keyChar, selectedRange = self.getRangeAndChar()\n\n print(\"Range yang dipilih : \",end=' ')\n print(selectedRange)\n print(\"Karakter didapatkan : \" + keyChar + \"\\n\")\n\n self.resultStr += keyChar\n\n self.buildRange(selectedRange[0], selectedRange[1])\n\n endTime = time.clock() - startTime\n\n print(\"Waktu yang dibutuhkan untuk encoding : \" + str(endTime))\n\n def buildRange(self, mostLower, mostUpper):\n currentLower = mostLower\n rangeDist = mostUpper - mostLower\n\n for char, prob in self.charProbability.items():\n currentUpper = (rangeDist * prob) + currentLower\n\n currentRange = [currentLower, currentUpper]\n\n self.charRange[char] = currentRange\n\n currentLower = currentUpper\n\n def getRangeAndChar(self):\n for char, probRange in self.charRange.items():\n if self.inputNum >= probRange[0] and self.inputNum < probRange[1]:\n return char, probRange\n\nif __name__ == \"__main__\":\n dc = Decoder()\n dc.run()"
},
{
"alpha_fraction": 0.552595317363739,
"alphanum_fraction": 0.5643087029457092,
"avg_line_length": 28.828767776489258,
"blob_id": "c4ed2982ecae24ff7ebb25fbbd8e34faec1311ed",
"content_id": "5979dcc9ffb3a720c75abaf6466c209a10cdde9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4354,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 146,
"path": "/shannon-fano/encoder.py",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "# Python 3\nimport operator\nimport json\nimport time\nfrom math import log1p\nfrom collections import Counter\n\nclass Encoder:\n def __init__(self):\n self.charDict = dict()\n self.srcText = \"\"\n self.sortedCharByOccurence = list()\n self.tree = dict()\n \n def runEncode(self):\n self.readInput()\n\n # Start time\n\n startTime = time.clock()\n\n self.makeCount()\n\n splitIndex = self.getSplitterIndex(self.sortedCharByOccurence)\n\n left = self.sortedCharByOccurence[:splitIndex]\n right = self.sortedCharByOccurence[splitIndex:]\n\n self.updateTree(left, 1, \"0\")\n self.updateTree(right, 1, \"1\")\n\n self.buildTree(left, 1, \"0\")\n self.buildTree(right, 1, \"1\")\n\n procTime = time.clock() - startTime\n\n print(\"\\nCompression time : \" + str(procTime) + \" seconds\")\n \n # print(self.charDict)\n # print(json.dumps(self.tree, indent=1))\n\n self.performanceMeasure(procTime)\n\n self.writeEncoded()\n\n self.makeTreeBackup()\n\n # print(self.sortedCharByOccurence)\n\n def readInput(self):\n source = open(\"input.txt\", \"r\")\n self.srcText = source.read()\n\n def makeCount(self):\n self.charDict = dict(Counter(self.srcText))\n\n charList = sorted(self.charDict.items(), key=operator.itemgetter(1), reverse=True)\n\n for char in charList:\n self.sortedCharByOccurence.append(char[0])\n\n def getSplitterIndex(self, charList):\n total = 0\n\n for char in charList:\n total += self.charDict[char]\n\n count = 0\n splitterIndex = 0\n\n for i in range(len(charList) // 2):\n char = charList[i]\n count += self.charDict[char]\n\n if (count - (total/2) >= 0):\n splitterIndex = i + 1\n break\n \n return splitterIndex\n\n def buildTree(self, chrList, itrCount, bit):\n # print(chrList)\n if len(chrList) == 1:\n self.updateTree(chrList[0], itrCount + 1, \"0\")\n elif len(chrList) == 2:\n self.updateTree(chrList[0], itrCount + 1, \"0\")\n self.updateTree(chrList[1], itrCount + 1, \"1\")\n else:\n splitIndex = self.getSplitterIndex(chrList)\n\n self.updateTree(chrList[:splitIndex+1], itrCount + 1, \"0\")\n self.updateTree(chrList[splitIndex+1:], itrCount + 1, \"1\")\n \n self.buildTree(chrList[:splitIndex+1], itrCount + 1, \"0\")\n self.buildTree(chrList[splitIndex+1:], itrCount + 1, \"1\")\n\n def updateTree(self, chrList, itrCount, bit):\n for char in chrList:\n if char not in self.tree:\n # self.tree[char] = [itrCount, bit, self.charDict[char]]\n self.tree[char] = {\n 'Code length' : itrCount,\n 'Code' : bit,\n 'Frequency' : self.charDict[char],\n }\n else:\n self.tree[char]['Code length'] += 1\n self.tree[char]['Code'] += bit\n\n def performanceMeasure(self, procTime):\n # output = open(\"output-encoded.txt\", \"w\")\n compressedBitsCount = 0\n\n for char, data in self.tree.items():\n compressedBitsCount += data['Code length'] * data['Frequency']\n\n originalBitCounts = len(self.srcText) * 8\n\n print(\"\\nOriginal bits count : \" + str(originalBitCounts))\n print(\"Bits count after compression : \" + str(compressedBitsCount))\n print(\"Compression ratio : \" + str( originalBitCounts /compressedBitsCount))\n print(\"Compression Speed :\" + str(originalBitCounts/procTime) + \" bit/sec\")\n print(\"Space Savings : \" + str( 1 - (compressedBitsCount / originalBitCounts) ))\n print(\"Compression Gain : \" + str( 100 * log1p( originalBitCounts /compressedBitsCount) ))\n\n def writeEncoded(self): \n output = open(\"output-encoded.txt\", \"w\")\n\n for char in self.srcText:\n output.write(self.tree[char]['Code'] + ' ')\n\n def makeTreeBackup(self):\n backup = dict()\n\n for char, data in self.tree.items():\n backup[char] = data[\"Code\"]\n\n with open(\"tree.json\", \"w\") as tree:\n json.dump(backup, tree)\n\nif __name__ == \"__main__\":\n sf = Encoder()\n\n sf.runEncode()\n\n # sf.runDecode()"
},
{
"alpha_fraction": 0.5601374506950378,
"alphanum_fraction": 0.5601374506950378,
"avg_line_length": 16.176469802856445,
"blob_id": "7329f7bfb1dbc3299b9a75effd00b12a106f9971",
"content_id": "030b5584f12627c18de5491ba340b78a7d22d9bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 17,
"path": "/arithmetic_coding/app.py",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "from encoder import Encoder\nfrom decoder import Decoder\n\nclass App:\n def __init__(self):\n self.encoder = Encoder()\n self.decoder = Decoder()\n\n def run(self):\n self.encoder.run()\n\n self.decoder.run()\n\nif __name__ == \"__main__\":\n app = App()\n\n app.run()"
},
{
"alpha_fraction": 0.5916174054145813,
"alphanum_fraction": 0.5983342528343201,
"avg_line_length": 26.992481231689453,
"blob_id": "d033161c3aac2a2ff88c98cd1d169ec23f70523f",
"content_id": "2c234a87a5828c18ebc616cf06177cbdf69928f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3722,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 133,
"path": "/arithmetic_coding/encoder.py",
"repo_name": "rogojagad/kompresi-data",
"src_encoding": "UTF-8",
"text": "# Python 3\nimport operator\nimport json\nimport time\nimport random\nfrom floatToBin import *\nfrom decimal import *\nfrom collections import Counter\nfrom math import log1p\n\n# Input generated at http://www.unit-conversion.info/texttools/random-string-generator/\n\nclass Encoder:\n def __init__(self):\n self.probabilityDict = dict()\n self.charRange = dict()\n self.srcText = \"\"\n self.finalResult = 0\n getcontext().prec = 2\n\n def run(self):\n self.prepare()\n \n self.buildRange(0, 1)\n\n self.encode()\n\n self.performanceMeasure()\n\n self.dumpLookupData()\n\n def prepare(self):\n self.readSource()\n\n self.setCharProb()\n\n self.checkProbTruth()\n\n def dumpLookupData(self):\n with open(\"probability-dict.json\", \"w\") as tree:\n json.dump(self.probabilityDict, tree)\n\n with open(\"encoder-range-dict.json\", \"w\") as tree:\n json.dump(self.charRange, tree)\n\n length = open(\"source-length.txt\", \"w\")\n length.write(str(len(self.srcText)))\n\n def buildRange(self, mostLower, mostUpper):\n currentLower = mostLower\n for char, prob in self.probabilityDict.items():\n currentUpper = ((mostUpper - mostLower) * prob) + currentLower\n\n currentRange = [currentLower, currentUpper]\n\n self.charRange[char] = currentRange\n\n currentLower = currentUpper\n # print(self.charRange)\n\n def readSource(self):\n source = open(\"raw_src.txt\", \"r\")\n self.srcText = source.read().strip()\n\n def setCharProb(self):\n charNum = int(input(\"Berapa banyak karakter yang ingin dimasukkan?\\n\"))\n\n for i in range(charNum):\n charAndProb = input(\"Masukkan karakter dan probability nomor \" + str(i + 1) + \"\\n\")\n \n self.assignCharProb(charAndProb.split(' ')[0], float(charAndProb.split(' ')[1]))\n\n def assignCharProb(self, key, prob):\n self.probabilityDict[key] = prob\n\n def checkProbTruth(self):\n sum = 0\n\n for char, prob in self.probabilityDict.items():\n sum += prob\n print(sum)\n if sum != 1:\n print(\"\\nJumlah total probability tidak sama dengan 1 !!!!\")\n exit()\n\n def encode(self):\n print(\"\\nMemulai Proses Encoding\\n\")\n\n startTime = time.clock()\n\n selectedRange = []\n\n for char in self.srcText:\n selectedRange = self.charRange[char]\n\n print(\"Karakter saat ini adalah : \" + char)\n print(self.charRange)\n print(\"Selected range : \", end='')\n print(selectedRange, end=\"\\n\\n\")\n\n self.buildRange(selectedRange[0], selectedRange[1])\n\n randomPicked = random.uniform(selectedRange[0],selectedRange[1])\n\n self.finalResult = randomPicked\n\n print(\"Angka yang diambil : \" + str(randomPicked))\n\n print(\"Waktu yang dibutuhkan untuk encoding : \" + str(time.clock() - startTime))\n\n result = open(\"encode-result.txt\", \"w\")\n result.write(str(self.finalResult))\n\n def performanceMeasure(self):\n resultSize = self.getResultSize()\n\n sourceSize = len(self.srcText) * 8\n\n print(\"Compression ratio : \" + str(sourceSize / resultSize))\n print(\"Space Savings : \" + str( 1 - (resultSize / sourceSize) ))\n print(\"Compression Gain : \" + str( 100 * log1p( sourceSize / resultSize) ))\n\n def getResultSize(self):\n converted = float_bin(self.finalResult, places = 7)\n\n decimalLen = len(converted.split(\".\")[0])\n floatingLen = len(converted.split(\".\")[1])\n return decimalLen + floatingLen\n\nif __name__ == \"__main__\":\n enc = Encoder()\n\n enc.run()"
}
] | 8 |
batiquinianchris/function-based
|
https://github.com/batiquinianchris/function-based
|
c8ffba7b9339a740d378f7c1c732c623fda5da19
|
739e9d78798d43384e8ba5fd240e40900236b694
|
284196d3617e1b4740d6d4c59a3286119a157004
|
refs/heads/master
| 2021-01-01T17:13:44.424791 | 2017-07-22T12:03:57 | 2017-07-22T12:03:57 | 98,028,621 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 10,
"blob_id": "c1dab42593a812e981e38969aa04dca6b0469cdd",
"content_id": "5755904f3de97e0bce3cf5ece4d684028c6764e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 2,
"path": "/README.md",
"repo_name": "batiquinianchris/function-based",
"src_encoding": "UTF-8",
"text": "# function-based\nwang\n"
},
{
"alpha_fraction": 0.6533032059669495,
"alphanum_fraction": 0.6583850979804993,
"avg_line_length": 39.20454406738281,
"blob_id": "d15a2c4bbe59ceeb2b72c243324c2bd7a903ae91",
"content_id": "328e93e1fdbf3a520c9f6717a8325f09181b9598",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1771,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 44,
"path": "/FirstProj/shop/views.py",
"repo_name": "batiquinianchris/function-based",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.forms import ModelForm\nfrom shop.models import Job, Supplier, Customer, Parts, Technician, Vehicle\nfrom django.views import generic\n\n\nclass IndexView(generic.TemplateView):\n template_name = \"shop/index.html\"\n\nclass ProfileView(generic.TemplateView):\n template_name = \"shop/profile.html\"\n\nclass Technician(ModelForm):\n class Meta:\n model = Technician\n fields = ['last_name', 'first_name', 'mid_name', 'gender', 'birth_date', 'address', 'spec_area']\n\n def technician_list(request, template_name='shop/technician.html'):\n all_tech = Technician.objects.all()\n data = {}\n data['object_list'] = all_tech\n return render(request, template_name, {'all_tech': all_tech})\n\n def technician_create(request, template_name='servers/technician_form.html'):\n form = Technician(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('technician_list')\n return render(request, template_name, {'form': form})\n\n def technician_update(request, pk, template_name='shop/technician_form.html'):\n technician = get_object_or_404(Technician, pk=pk)\n form = Technician(request.POST or None, instance=technician)\n if form.is_valid():\n form.save()\n return redirect('server_list')\n return render(request, template_name, {'form': form})\n\n def technician_delete(request, pk, template_name='shop/technician.html'):\n technician = get_object_or_404(Technician, pk=pk)\n if request.method == 'POST':\n technician.delete()\n return redirect('tech_list')\n return render(request, template_name, {'object': technician})\n\n\n"
},
{
"alpha_fraction": 0.6892884373664856,
"alphanum_fraction": 0.7049885988235474,
"avg_line_length": 33.79294967651367,
"blob_id": "59237b011e444eda755e992934e6a178f71853fd",
"content_id": "10e445e83e19ed481b31ee681102683e4bfbf2ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7898,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 227,
"path": "/FirstProj/shop/models.py",
"repo_name": "batiquinianchris/function-based",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.core.urlresolvers import reverse\n\nclass JobType(models.Model):\n remarks = models.CharField(max_length=100)\n\n def __str__(self):\n return self.remarks\n\nclass Customer(models.Model):\n cust_name = models.CharField(max_length=100)\n address = models.CharField(max_length=200)\n tel_no = models.CharField(max_length=50)\n allow_ar = models.BooleanField(default=False)\n bad_acc = models.BooleanField(default=False)\n\n def get_absolute_url(self):\n return reverse('shop:customer')\n\n def __str__(self):\n return self.cust_name\n\nclass Vehicle(models.Model):\n plate_no = models.CharField(max_length=15, primary_key=True)\n make = models.CharField(max_length=15)\n model = models.CharField(max_length=15)\n engine_no = models.CharField(max_length=50)\n chasis_no = models.CharField(max_length=50)\n color = models.CharField(max_length=50)\n type = models.CharField(max_length=15)\n fkid_cust = models.ForeignKey(Customer, on_delete=models.CASCADE, default=0)\n\n def get_absolute_url(self):\n return reverse('shop:vehicle')\n\n def __str__(self):\n return self.plate_no\n\n\nclass Technician(models.Model):\n last_name = models.CharField(max_length=30)\n first_name = models.CharField(max_length=20)\n mid_name = models.CharField(max_length=20)\n gender = models.CharField(max_length=6)\n birth_date = models.DateField(null=True, blank=True)\n address = models.CharField(max_length=200)\n spec_area = models.CharField(max_length=15)\n\n def get_absolute_url(self):\n return reverse('tech_edit', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.last_name\n\n\nclass Supplier(models.Model):\n supplier_name = models.CharField(max_length=100)\n address = models.CharField(max_length=200)\n tel_no = models.CharField(max_length=50)\n\n def get_absolute_url(self):\n return reverse('shop:supplier')\n\n def __str__(self):\n return self.supplier_name\n\n\nclass Parts(models.Model):\n part_no = models.CharField(max_length=20)\n part_desc = models.CharField(max_length=50)\n srp = models.FloatField(default=0)\n latest_rr = models.CharField(max_length=45)\n latest_rr_date = models.DateField(null=True, blank=True)\n latest_supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE, default=0)\n uom = models.CharField(max_length=5)\n\n def get_absolute_url(self):\n return reverse('shop:parts')\n def __str__(self):\n return self.part_desc\n\n\nclass Job(models.Model):\n job_desc = models.CharField(max_length=50)\n service_fee = models.FloatField(default=0)\n fkid_jobType = models.ForeignKey(JobType, on_delete=models.CASCADE, default=0)\n service_time = models.FloatField(default=0)\n service_uom = models.CharField(max_length=15)\n\n def get_absolute_url(self):\n return reverse('shop:jobs')\n\n def __str__(self):\n return self.job_desc\n\n\nclass JobOrders(models.Model):\n job_date = models.DateTimeField(null=True, blank=True)\n target_date = models.DateTimeField(null=True, blank=True)\n stat = models.CharField(max_length=10)\n remarks = models.CharField(max_length=200)\n fkid_cust = models.ForeignKey(Customer, on_delete=models.CASCADE, default=0)\n fkplate_no = models.ForeignKey(Vehicle, on_delete=models.CASCADE, default=0)\n balance_acc = models.FloatField(default=0)\n dtlast_updated = models.DateTimeField(null=True, blank=True)\n id_user = models.IntegerField(default=0)\n\n def __str__(self):\n return self.job_date\n\n\nclass JobOrderDetail(models.Model):\n fkid_jobOrder = models.ForeignKey(JobOrders, on_delete=models.CASCADE, default=0)\n fkid_job = models.ForeignKey(Job, on_delete=models.CASCADE, default=0)\n id_qty = models.FloatField(default=0)\n service_fee = models.FloatField(default=0)\n fkid_technician = models.ForeignKey(Technician, on_delete=models.CASCADE, default=0)\n\n def __str__(self):\n return self.fkid_jobOrder\n\n\nclass JobPartUsage(models.Model):\n fkid_jobOrder = models.ForeignKey(JobOrders, on_delete=models.CASCADE, default=0)\n fkid_parts = models.ForeignKey(Parts, on_delete=models.CASCADE, default=0)\n qty = models.FloatField(default=0)\n srp = models.FloatField(default=0)\n unit_cost = models.FloatField(default=0)\n balance_qty = models.FloatField(default=0)\n balance_cost = models.FloatField(default=0)\n\n def __str__(self):\n return self.fkid_jobOrder\n\n\nclass PartsOrder(models.Model):\n order_date = models.DateTimeField(null=True, blank=True)\n fkid_supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE, default=0)\n status = models.CharField(max_length=12)\n processed = models.DateTimeField(null=True, blank=True)\n dtlast_updated = models.DateTimeField(null=True, blank=True)\n id_user = models.IntegerField(default=0)\n\n def __str__(self):\n return self.order_date\n\n\nclass PartsOrderDetail(models.Model):\n fkid_parts = models.ForeignKey(Parts, on_delete=models.CASCADE, default=0)\n qty = models.FloatField(default=0)\n unit_cost = models.FloatField(default=0)\n balance_qty = models.FloatField(default=0)\n balance_cost = models.FloatField(default=0)\n\n def __str__(self):\n return self.fkid_parts\n\n\nclass PartsReceived(models.Model):\n receipt_dt = models.DateField(null=True, blank=True)\n fkid_supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE, default=0)\n status = models.CharField(max_length=10)\n fkid_partsOrder = models.ForeignKey(PartsOrder, on_delete=models.CASCADE, default=0)\n processed = models.DateTimeField(null=True, blank=True)\n balance_acc = models.FloatField(default=0)\n\n def __str__(self):\n return self.receipt_dt\n\n\nclass PartsRecievedDetail(models.Model):\n id_parts = models.IntegerField(null=True)\n qty = models.FloatField(default=0)\n unit_cost = models.FloatField(default=0)\n balance_qty = models.FloatField(default=0)\n balance_cost = models.FloatField(default=0)\n dtlast_upd = models.DateTimeField(null=True, blank=True)\n id_user = models.IntegerField(default=0)\n fkidParts_received = models.ForeignKey(PartsReceived, on_delete=models.CASCADE, default=0)\n\n def __str__(self):\n return self.id_parts\n\n\nclass Collections(models.Model):\n collection_date = models.DateField(null=True, blank=True)\n fkid_cust = models.ForeignKey(Customer, on_delete=models.CASCADE, default=0)\n status = models.CharField(max_length=10)\n remarks = models.CharField(max_length=200)\n processed = models.DateTimeField(null=True, blank=True)\n balance_acc = models.FloatField(default=0)\n dtlast_upd = models.DateTimeField(null=True, blank=True)\n\n def __str__(self):\n return self.fkid_cust\n\n\nclass CollectionDetail(models.Model):\n fkid_collections = models.ForeignKey(Collections, on_delete=models.CASCADE, default=0)\n fkid_jobOrders = models.ForeignKey(JobOrders, on_delete=models.CASCADE, default=0)\n amount = models.FloatField(default=0)\n\n def __str__(self):\n return self.fkid_collections\n\n\nclass Payments(models.Model):\n payment_dt = models.DateField(null=True, blank=True)\n id_supplier = models.IntegerField(default=0)\n remarks = models.CharField(max_length=100)\n status = models.CharField(max_length=10)\n processed = models.DateTimeField(null=True, blank=True)\n balance_acc = models.FloatField(default=0)\n dtLast_upd = models.DateField(null=True, blank=True)\n id_user = models.IntegerField(default=0)\n\n def __str__(self):\n return self.payment_dt\n\n\nclass PaymentsDetail(models.Model):\n fkid_payment = models.ForeignKey(Payments, on_delete=models.CASCADE, default=0)\n fkidParts_received = models.ForeignKey(PartsReceived, on_delete=models.CASCADE, default=0)\n amount = models.FloatField(default=0)\n\n def __str__(self):\n return self.fkid_payment\n"
},
{
"alpha_fraction": 0.5444377064704895,
"alphanum_fraction": 0.5550937056541443,
"avg_line_length": 48.37313461303711,
"blob_id": "6372651e28db51914c2639ca42055fab864b45ee",
"content_id": "574358f41c18a2b214998b372815ae0fd79ffb55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13232,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 268,
"path": "/FirstProj/shop/migrations/0001_initial.py",
"repo_name": "batiquinianchris/function-based",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.2 on 2017-07-15 03:01\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='CollectionDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('amount', models.FloatField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Collections',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('collection_date', models.DateField(blank=True, null=True)),\n ('status', models.CharField(max_length=10)),\n ('remarks', models.CharField(max_length=200)),\n ('processed', models.DateTimeField(blank=True, null=True)),\n ('balance_acc', models.FloatField(default=0)),\n ('dtlast_upd', models.DateTimeField(blank=True, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('cust_name', models.CharField(max_length=100)),\n ('address', models.CharField(max_length=200)),\n ('tel_no', models.CharField(max_length=50)),\n ('allow_ar', models.BooleanField(default=False)),\n ('bad_acc', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='Job',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('job_desc', models.CharField(max_length=50)),\n ('service_fee', models.FloatField(default=0)),\n ('service_time', models.FloatField(default=0)),\n ('service_uom', models.CharField(max_length=15)),\n ],\n ),\n migrations.CreateModel(\n name='JobOrderDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('id_qty', models.FloatField(default=0)),\n ('service_fee', models.FloatField(default=0)),\n ('fkid_job', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Job')),\n ],\n ),\n migrations.CreateModel(\n name='JobOrders',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('job_date', models.DateTimeField(blank=True, null=True)),\n ('target_date', models.DateTimeField(blank=True, null=True)),\n ('stat', models.CharField(max_length=10)),\n ('remarks', models.CharField(max_length=200)),\n ('balance_acc', models.FloatField(default=0)),\n ('dtlast_updated', models.DateTimeField(blank=True, null=True)),\n ('id_user', models.IntegerField(default=0)),\n ('fkid_cust', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Customer')),\n ],\n ),\n migrations.CreateModel(\n name='JobPartUsage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('qty', models.FloatField(default=0)),\n ('srp', models.FloatField(default=0)),\n ('unit_cost', models.FloatField(default=0)),\n ('balance_qty', models.FloatField(default=0)),\n ('balance_cost', models.FloatField(default=0)),\n ('fkid_jobOrder', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.JobOrders')),\n ],\n ),\n migrations.CreateModel(\n name='JobType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('remarks', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Parts',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('part_no', models.CharField(max_length=20)),\n ('part_desc', models.CharField(max_length=50)),\n ('srp', models.FloatField(default=0)),\n ('latest_rr', models.CharField(max_length=45)),\n ('latest_rr_date', models.DateField(blank=True, null=True)),\n ('latest_supplier', models.IntegerField(default=0)),\n ('uom', models.CharField(max_length=5)),\n ],\n ),\n migrations.CreateModel(\n name='PartsOrder',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('order_date', models.DateTimeField(blank=True, null=True)),\n ('status', models.CharField(max_length=12)),\n ('processed', models.DateTimeField(blank=True, null=True)),\n ('dtlast_updated', models.DateTimeField(blank=True, null=True)),\n ('id_user', models.IntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='PartsOrderDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('qty', models.FloatField(default=0)),\n ('unit_cost', models.FloatField(default=0)),\n ('balance_qty', models.FloatField(default=0)),\n ('balance_cost', models.FloatField(default=0)),\n ('fkid_parts', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Parts')),\n ],\n ),\n migrations.CreateModel(\n name='PartsReceived',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('receipt_dt', models.DateField(blank=True, null=True)),\n ('status', models.CharField(max_length=10)),\n ('processed', models.DateTimeField(blank=True, null=True)),\n ('balance_acc', models.FloatField(default=0)),\n ('fkid_partsOrder', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.PartsOrder')),\n ],\n ),\n migrations.CreateModel(\n name='PartsRecievedDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('id_parts', models.IntegerField(null=True)),\n ('qty', models.FloatField(default=0)),\n ('unit_cost', models.FloatField(default=0)),\n ('balance_qty', models.FloatField(default=0)),\n ('balance_cost', models.FloatField(default=0)),\n ('dtlast_upd', models.DateTimeField(blank=True, null=True)),\n ('id_user', models.IntegerField(default=0)),\n ('fkidParts_received', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.PartsReceived')),\n ],\n ),\n migrations.CreateModel(\n name='Payments',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('payment_dt', models.DateField(blank=True, null=True)),\n ('id_supplier', models.IntegerField(default=0)),\n ('remarks', models.CharField(max_length=100)),\n ('status', models.CharField(max_length=10)),\n ('processed', models.DateTimeField(blank=True, null=True)),\n ('balance_acc', models.FloatField(default=0)),\n ('dtLast_upd', models.DateField(blank=True, null=True)),\n ('id_user', models.IntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='PaymentsDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('amount', models.FloatField(default=0)),\n ('fkidParts_received', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.PartsReceived')),\n ('fkid_payment', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Payments')),\n ],\n ),\n migrations.CreateModel(\n name='Supplier',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('supplier_name', models.CharField(max_length=100)),\n ('address', models.CharField(max_length=200)),\n ('tel_no', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Technician',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('last_name', models.CharField(max_length=30)),\n ('first_name', models.CharField(max_length=20)),\n ('mid_name', models.CharField(max_length=20)),\n ('gender', models.CharField(max_length=6)),\n ('birth_date', models.DateField(blank=True, null=True)),\n ('address', models.CharField(max_length=200)),\n ('spec_area', models.CharField(max_length=15)),\n ],\n ),\n migrations.CreateModel(\n name='Vehicle',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('plate_no', models.CharField(max_length=15)),\n ('make', models.CharField(max_length=15)),\n ('model', models.CharField(max_length=15)),\n ('engine_no', models.CharField(max_length=50)),\n ('chasis_no', models.CharField(max_length=50)),\n ('color', models.CharField(max_length=50)),\n ('type', models.CharField(max_length=15)),\n ('fkid_cust', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Customer')),\n ],\n ),\n migrations.AddField(\n model_name='partsreceived',\n name='fkid_supplier',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Supplier'),\n ),\n migrations.AddField(\n model_name='partsorder',\n name='fkid_supplier',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Supplier'),\n ),\n migrations.AddField(\n model_name='jobpartusage',\n name='fkid_parts',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Parts'),\n ),\n migrations.AddField(\n model_name='joborders',\n name='fkplate_no',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Vehicle'),\n ),\n migrations.AddField(\n model_name='joborderdetail',\n name='fkid_jobOrder',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.JobOrders'),\n ),\n migrations.AddField(\n model_name='joborderdetail',\n name='fkid_technician',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Technician'),\n ),\n migrations.AddField(\n model_name='job',\n name='fkid_jobType',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.JobType'),\n ),\n migrations.AddField(\n model_name='collections',\n name='fkid_cust',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Customer'),\n ),\n migrations.AddField(\n model_name='collectiondetail',\n name='fkid_collections',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.Collections'),\n ),\n migrations.AddField(\n model_name='collectiondetail',\n name='fkid_jobOrders',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='shop.JobOrders'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.8398133516311646,
"alphanum_fraction": 0.8398133516311646,
"avg_line_length": 29.571428298950195,
"blob_id": "a862637e6ac1c156976d76440876c203ec54958e",
"content_id": "d47ea1da39a25c8855c64ead933ef6f3ef7a270a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 643,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 21,
"path": "/FirstProj/shop/admin.py",
"repo_name": "batiquinianchris/function-based",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import *\n\nadmin.site.register(JobType)\nadmin.site.register(Customer)\nadmin.site.register(Vehicle)\nadmin.site.register(Technician)\nadmin.site.register(Supplier)\nadmin.site.register(Job)\nadmin.site.register(JobOrders)\nadmin.site.register(JobOrderDetail)\nadmin.site.register(JobPartUsage)\nadmin.site.register(PartsOrder)\nadmin.site.register(PartsOrderDetail)\nadmin.site.register(PartsReceived)\nadmin.site.register(PartsRecievedDetail)\nadmin.site.register(Parts)\nadmin.site.register(Collections)\nadmin.site.register(CollectionDetail)\nadmin.site.register(Payments)\nadmin.site.register(PaymentsDetail)\n\n"
},
{
"alpha_fraction": 0.6285046935081482,
"alphanum_fraction": 0.6285046935081482,
"avg_line_length": 27.53333282470703,
"blob_id": "1e1113df45ca051ad8d2bb8d0e91a1cfc8568ec0",
"content_id": "7e9f23aeb514fb64d1de2fce88213f26c548d86e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 15,
"path": "/FirstProj/shop/urls.py",
"repo_name": "batiquinianchris/function-based",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\n\napp_name = 'shop'\n\nurlpatterns = (\n\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^list$', views.technician_list, name='tech_list'),\n url(r'^new$$', views.technician_create, name='tech_new'),\n url(r'^edit/(?P<pk>\\d+)$', views.technician_update, name='tech_edit'),\n url(r'^delete/(?P<pk>\\d+)$', views.technician_delete, name='tech_delete'),\n\n)\n"
}
] | 6 |
Sluwayu/whiskey-cronjob
|
https://github.com/Sluwayu/whiskey-cronjob
|
28717a15dc988745fe3306a1c11a04194e947a15
|
30050bbfae205b0d9d3c7a98839e49a0b5d9b877
|
abb779c12ac4f88f0fee4940e11e5c2bb64959dd
|
refs/heads/main
| 2023-01-21T12:10:26.565136 | 2020-11-20T04:58:30 | 2020-11-20T04:58:30 | 314,444,813 | 0 | 0 | null | 2020-11-20T04:18:49 | 2020-11-17T18:30:26 | 2020-11-17T18:30:24 | null |
[
{
"alpha_fraction": 0.8152173757553101,
"alphanum_fraction": 0.8152173757553101,
"avg_line_length": 29.66666603088379,
"blob_id": "a9cb6163ab6c3b07d65150b2b5dbaa2fb0208d0a",
"content_id": "96385891ae11f78a83ffd32bf28d51adedf94501",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 92,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 3,
"path": "/README.md",
"repo_name": "Sluwayu/whiskey-cronjob",
"src_encoding": "UTF-8",
"text": "# whiskey-cronjob\n\nNew whisky deals scraped from website, to demo cronjob running operation\n"
},
{
"alpha_fraction": 0.6183784008026123,
"alphanum_fraction": 0.638378381729126,
"avg_line_length": 30.355932235717773,
"blob_id": "fb61ffff77d00bafeddaf3bc3e31d2b9f986f2ed",
"content_id": "50a9eece57ce0e1eacb35ef875fb68bb2d33c5d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1850,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 59,
"path": "/new-whisky.py",
"repo_name": "Sluwayu/whiskey-cronjob",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\nimport smtplib, ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nimport smtplib\nimport sys\nimport creds\nimport pandas as pd\n\ndef get_whisky():\n newlist = []\n url = 'https://www.thewhiskyexchange.com/new-products/standard-whisky'\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'}\n r = requests.get(url, headers=headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n new_whisky = soup.find('li', {'class': 'np-postlist__item'}).find_all('li', {'class': 'product-list-item'})\n\n for item in new_whisky:\n new = {\n 'name': item.find('p', {'class': 'name'}).text,\n 'spec': item.find('p', {'class': 'spec'}).text,\n 'desc': item.find('p', {'class': 'description'}).text.strip(),\n 'price': item.find('p', {'class': 'price'}).text,\n }\n newlist.append(new)\n \n df = pd.DataFrame(newlist)\n return df\n\n\ndef emailnew(df):\n sender_email = \"[email protected]\"\n receiver_email = \"[email protected]\"\n password = creds.password\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = \"New Whisky Today\"\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n\n text = \"\"\"\\\n New stuff in today!\"\"\"\n html = df.to_html(index=False)\n part1 = MIMEText(text, \"plain\")\n part2 = MIMEText(html, \"html\")\n\n message.attach(part1)\n message.attach(part2)\n\n\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(\n sender_email, receiver_email, message.as_string()\n )\n\nemailnew(get_whisky())\n"
}
] | 2 |
andrewsen/remote-input-server
|
https://github.com/andrewsen/remote-input-server
|
4567eafc7af8c91faae2ad1cf4f07fd092a12fbb
|
32f4042d6193f83b09f1cba76204a049aa5de95a
|
a0d97cbd601e097277d59765bc47569b0f56d9d7
|
refs/heads/master
| 2023-01-21T06:59:55.591052 | 2020-11-26T18:37:10 | 2020-11-26T18:37:10 | 316,310,498 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.660479724407196,
"alphanum_fraction": 0.6713306903839111,
"avg_line_length": 32.67307662963867,
"blob_id": "a42c340bea95e2d46a109b37cf4da3f499f3714e",
"content_id": "8db9b06da3c746bc30c4f21185153795c57a52b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3502,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 104,
"path": "/server.py",
"repo_name": "andrewsen/remote-input-server",
"src_encoding": "UTF-8",
"text": "import socket\nimport grpc\nimport netifaces\nimport uinput\n\nfrom zeroconf import ServiceInfo, IPVersion, Zeroconf\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom typing import Tuple, Optional\n\nimport service_pb2_grpc\nfrom service_pb2 import ConnectDataMsg, ConnectResponseMsg, Empty, ScrollDataMsg, MouseDataMsg, ButtonDataMsg\n\n\nclass RemoteInputServer(service_pb2_grpc.RemoteInputServiceServicer):\n def __init__(self, device: uinput.Device):\n self._device = device\n\n def SendConnectData(self, request: ConnectDataMsg, context: grpc.ServicerContext):\n print(f\"Connect data received, code: {request.check}\")\n return ConnectResponseMsg(check=request.check)\n\n def SendScrollData(self, request: ScrollDataMsg, context: grpc.ServicerContext):\n if request.valueX != 0:\n self._device.emit(uinput.REL_HWHEEL, request.valueX)\n if request.valueY != 0:\n self._device.emit(uinput.REL_WHEEL, request.valueY)\n return Empty()\n\n def SendMouseData(self, request: MouseDataMsg, context: grpc.ServicerContext):\n self._device.emit(uinput.REL_X, request.deltaX, syn=False)\n self._device.emit(uinput.REL_Y, request.deltaY)\n return Empty()\n\n def SendButtonData(self, request: ButtonDataMsg, context: grpc.ServicerContext):\n print(f\"Button data received, id: {request.button}, pressed: {request.pressed}\")\n\n pressed = 1 if request.pressed else 0\n\n if request.button == 1:\n self._device.emit(uinput.BTN_LEFT, pressed)\n elif request.button == 2:\n self._device.emit(uinput.BTN_RIGHT, pressed)\n elif request.button == 3:\n self._device.emit(uinput.BTN_MIDDLE, pressed)\n elif request.button == 201:\n self._device.emit(uinput.KEY_VOLUMEUP, pressed)\n elif request.button == 202:\n self._device.emit(uinput.KEY_VOLUMEDOWN, pressed)\n return Empty()\n\n\ndef serve(device: uinput.Device):\n server = grpc.server(ThreadPoolExecutor(max_workers=10))\n service_pb2_grpc.add_RemoteInputServiceServicer_to_server(RemoteInputServer(device), server)\n server.add_insecure_port(\"[::]:17863\")\n server.start()\n server.wait_for_termination()\n\n\ndef get_ip() -> Optional[str]:\n for iface in netifaces.interfaces():\n for record in netifaces.ifaddresses(iface).values():\n if len(record) > 0 and record[0][\"addr\"].startswith(\"192.168.\"):\n return record[0][\"addr\"]\n return None\n\n\ndef register_zeroconf_service() -> Tuple[Zeroconf, ServiceInfo]:\n ip = get_ip()\n\n if not ip:\n raise ValueError(\"Can't obtain IP address\")\n\n info = ServiceInfo(\n \"_grpc._tcp.local.\",\n f\"Input Server ({socket.gethostname()})._grpc._tcp.local.\",\n addresses=[socket.inet_aton(ip)],\n port=17863\n )\n\n zeroconf = Zeroconf(ip_version=IPVersion.All)\n zeroconf.register_service(info)\n return zeroconf, info\n\n\ndef unregister_zeroconf_service(zeroconf: Zeroconf, info: ServiceInfo):\n zeroconf.unregister_service(info)\n zeroconf.close()\n\n\ndef main():\n with uinput.Device([\n uinput.REL_WHEEL, uinput.REL_HWHEEL,\n uinput.REL_X, uinput.REL_Y,\n uinput.BTN_LEFT, uinput.BTN_MIDDLE, uinput.BTN_RIGHT,\n uinput.KEY_VOLUMEUP, uinput.KEY_VOLUMEDOWN\n ]) as device:\n zeroconf, info = register_zeroconf_service()\n serve(device)\n unregister_zeroconf_service(zeroconf, info)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.4521739184856415,
"alphanum_fraction": 0.686956524848938,
"avg_line_length": 15.428571701049805,
"blob_id": "1f639d692d5353619552a7566fdff8a0d6ab318c",
"content_id": "16130790f83f86499b18cd969336fcb13d6b8221",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "andrewsen/remote-input-server",
"src_encoding": "UTF-8",
"text": "grpcio==1.33.2\nifaddr==0.1.7\nnetifaces==0.10.9\nprotobuf==3.14.0\npython-uinput==0.11.2\nsix==1.15.0\nzeroconf==0.28.6\n"
},
{
"alpha_fraction": 0.6949999928474426,
"alphanum_fraction": 0.7049999833106995,
"avg_line_length": 18.899999618530273,
"blob_id": "0e1d6ef5f923ab37f2d3dd769f8687d76d76685d",
"content_id": "7280f09f9c5b3e3fddafca54977d68d7e87ecc3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 10,
"path": "/Makefile",
"repo_name": "andrewsen/remote-input-server",
"src_encoding": "UTF-8",
"text": "PYTHON=python\n\n.PHONY: all\nall: protobuf\n\nprotobuf:\n\t$(PYTHON) -m grpc_tools.protoc -I./proto/ --python_out=. --grpc_python_out=. ./proto/service.proto\n\nclean:\n\trm service_pb2_grpc.py service_pb2.py\n\n"
}
] | 3 |
Sayoojk221/tic-tac-toe-scratch
|
https://github.com/Sayoojk221/tic-tac-toe-scratch
|
ea814bac95c3902df17717fd1d3ee12b9f8bea8f
|
04cfd200890b91024cf79639471e4b787b6ca9a2
|
26a7ba3f5a196e897302d659c66424f2ae5d1b4e
|
refs/heads/master
| 2022-12-21T14:43:48.475538 | 2020-09-02T02:04:49 | 2020-09-02T02:04:49 | 292,152,714 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4029933512210846,
"alphanum_fraction": 0.4298780560493469,
"avg_line_length": 27.871999740600586,
"blob_id": "50cab19928846a7657028299bd94954f17ed96dc",
"content_id": "c9b8fba38d4eddd1310aa93b442e0128e6ea17ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3608,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 125,
"path": "/main_file.py",
"repo_name": "Sayoojk221/tic-tac-toe-scratch",
"src_encoding": "UTF-8",
"text": "#Tic Tac Toe Game Design\nboard =[\n ['_','_','_'],\n ['_','_','_'],\n ['_','_','_']\n]\n\nboard_number = {'1':[0,0],'2':[0,1],'3':[0,2],'4':[1,0],'5':[1,1],'6':[1,2],\n '7':[2,0],'8':[2,1],'9':[2,2]}\n\nfor i in range(3):\n for j in range(3):\n print(board[i][j],end=' ')\n print('\\n')\n print('')\n\n#player value add(X,O)\ndef data_insertion(item,Value):\n \n if Value == 'X':\n list = board_number[item]\n board[list[0]][list[1]]=Value\n elif Value == 'O':\n list = board_number[item]\n board[list[0]][list[1]]=Value\n for i in range(3):\n for j in range(3):\n print(board[i][j],end=' ')\n print('\\n')\n print('')\n game_stop = winner_check()\n return game_stop\n \n\n#To check Winner\ndef winner_check():\n for i in range(3):\n j = 0\n if board[i][j] == 'X' and board[i][j+1] == 'X' and board[i][j+2] == 'X':\n print('Winner A')\n flag=1\n return flag\n elif board[i][j] == 'O' and board[i][j+1] == 'O' and board[i][j+2] == 'O':\n print('Winner B')\n flag=1\n return flag\n for j in range(3):\n i = 0 \n if board[i][j] == 'X' and board[i+1][j] == 'X' and board[i+2][j] == 'X':\n print('Winner A')\n flag=1\n return flag\n elif board[i][j] == 'O' and board[i+1][j] == 'O' and board[i+2][j] == 'O':\n print('Winner B')\n flag=1\n return flag\n if True:\n if board[0][0] == 'X' and board[1][1] == 'X' and board[2][2] == 'X':\n print('Winner A')\n flag=1\n return flag\n elif board[0][2] == 'X' and board[1][1] == 'X' and board[2][0] == 'X':\n print('Winner A')\n flag=1\n return flag\n elif board[0][0] == 'O' and board[1][1] == 'O' and board[2][2] == 'O':\n print('Winner B')\n flag=1\n return flag\n elif board[0][2] == 'O' and board[1][1] == 'O' and board[2][0] == 'O':\n print('Winner B')\n flag=1\n return flag\n \n \n#Draw checking function\ndef game_draw():\n count = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == 'X' or board[i][j] == 'O' :\n count += 1\n return count \n\n\n\n#Players Option(A,B)\n\ndef game_start():\n Player_A = True\n Player_B = False\n for Player_Selection in range(100):\n draw_value = game_draw()\n if draw_value == 9:\n print('A & B Draw!')\n break\n elif Player_A == True:\n print('Player A')\n value_A = 'X'\n user_entered_value = input('choose (1-9): ')\n list = board_number[user_entered_value]\n if board[list[0]][list[1]] == '_':\n game_stop = data_insertion(user_entered_value,value_A)\n if game_stop == 1:\n break\n Player_A = False\n Player_B = True\n else:\n print('location have value')\n\n elif Player_B == True:\n print('Player B')\n value_B = 'O'\n user_entered_value = input('choose (1-9): ')\n list = board_number[user_entered_value]\n if board[list[0]][list[1]] == '_':\n game_stop = data_insertion(user_entered_value,value_B)\n if game_stop == 1:\n break\n Player_A = True\n Player_B = False\n else:\n print('location have value')\n\ngame_start()"
}
] | 1 |
BunnyBu/New_repo
|
https://github.com/BunnyBu/New_repo
|
3ca296ddc88265e25b7927bf5fea06305d193497
|
4478eedf55c5814d8515f88fd8f290fcc707602d
|
6a2ac753e86cc53b8bba1df83d45d47dad9d86c8
|
refs/heads/main
| 2023-03-05T01:47:34.375592 | 2021-02-09T11:53:34 | 2021-02-09T11:53:34 | 337,064,981 | 0 | 0 | null | 2021-02-08T12:13:36 | 2021-02-09T11:53:53 | 2021-02-09T13:45:21 |
Python
|
[
{
"alpha_fraction": 0.6772475838661194,
"alphanum_fraction": 0.6866680383682251,
"avg_line_length": 37.15625,
"blob_id": "f527c6c8fc24cfc9a9506d17af2f2985792fd0d8",
"content_id": "d6f20e00c9784ca49b714208ef197a91240ad724",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6960,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 128,
"path": "/Lesson 03/03_Lesson.py",
"repo_name": "BunnyBu/New_repo",
"src_encoding": "UTF-8",
"text": "#Denis Trushin\n\n# В теле программы есть вызовы всех процедур\n\n\"\"\"1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.\n Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.\"\"\"\n\ndef divide(dividend,denominator):\n try:\n return dividend/denominator\n except ZeroDivisionError:\n return \"Неопределённость\"\n except TypeError:\n return \"Неправильный тип аргументов\"\n except Exception as ex:\n return ex\n\n\"\"\"2. Реализовать функцию, принимающую несколько параметров, описывающих данные пользователя: \n имя, фамилия, год рождения, город проживания, email, телефон. \n Функция должна принимать параметры как именованные аргументы. \n Реализовать вывод данных о пользователе одной строкой.\"\"\"\n\ndef personal_data(first_name:str, last_name:str, year_of_birth:int, place:str, email:str=None, mobile:int=None):\n print(f'Товарищ {last_name} {first_name} {year_of_birth} года рождения,'\n f'проживающий в {place}. E-mail: {email}, телефон: {mobile}')\n\n\"\"\"3. Реализовать функцию my_func(), которая принимает три позиционных аргумента, \n и возвращает сумму наибольших двух аргументов.\"\"\"\n\ndef sum_two_out_three(a, b, c):\n try:\n temp = sorted([a, b, c], reverse=True)\n return temp[0] + temp[1]\n except TypeError:\n return \"нельзя сравнивать строки и числа\"\n\n\"\"\"4. Программа принимает действительное положительное число x и целое отрицательное число y. \nНеобходимо выполнить возведение числа x в степень y. Задание необходимо реализовать в виде функции my_func(x, y). \nПри решении задания необходимо обойтись без встроенной функции возведения числа в степень.\nПодсказка: попробуйте решить задачу двумя способами. Первый — возведение в степень с помощью оператора **. \nВторой — более сложная реализация без оператора **, предусматривающая использование цикла.\"\"\"\n\ndef power(a, b):\n result = a\n for index in range(1,abs(b)):\n result *= a\n if b < 0:\n try:\n return 1/result\n except Exception as ex:\n return ex\n elif b > 0:\n return result\n else:\n return 1 # n^0\n\n\"\"\"5. Программа запрашивает у пользователя строку чисел, разделенных пробелом. \nПри нажатии Enter должна выводиться сумма чисел. Пользователь может продолжить ввод чисел, \nразделенных пробелом и снова нажать Enter. Сумма вновь введенных чисел будет добавляться к уже подсчитанной сумме. \nНо если вместо числа вводится специальный символ, выполнение программы завершается. \nЕсли специальный символ введен после нескольких чисел, то вначале нужно добавить сумму этих чисел к полученной \nранее сумме и после этого завершить программу.\"\"\"\n\ndef typewriter():\n summ = 0 #Набегающая сумма\n while True:\n #получение обрезка, разбиение строки\n input_string = input(\"Введите строку из чисел через пробел.\\n \"\n \"Для завершения введите любой символ или слово: \").strip().split(' ')\n try: #попытка привести к числу и сложить\n input_string = list(map(int, input_string)) #приведение к числу\n summ += sum(input_string)\n print(f'Промежуточная сумма: {summ}. \\n')\n except ValueError:\n break\n #если в последовательности есть строки, то сичтаем сумму до них\n for item in input_string:\n try:\n summ += int(item)\n except ValueError:\n print(f'Конечная сумма: {summ}. \\nРабота завершена')\n except Exception as ex:\n print(ex)\n\n\"\"\"6. Реализовать функцию int_func(), принимающую слово из маленьких латинских букв и возвращающую его же,\n но с прописной первой буквой. Например, print(int_func(‘text’)) -> Text.\"\"\"\n\ndef capital_leter(text:str):\n return text.capitalize() #smile\n\ndef capital_leter_v2(text:str):\n return text[0].upper()+text[1:]\n\ndef capital_leter_v3(text:str):\n #ord('A') = 65, ord('a') = 97 -- 97-65=32\n first_letter = ord(text[0])\n if first_letter >= 97:\n return chr(first_letter-32)+text[1:]\n return text\n\n\"\"\"Продолжить работу над заданием. В программу должна попадать строка из слов, разделенных пробелом. \nКаждое слово состоит из латинских букв в нижнем регистре. Сделать вывод исходной строки, \nно каждое слово должно начинаться с заглавной буквы. \nНеобходимо использовать написанную ранее функцию int_func().\"\"\"\n\ndef capitaliser(text:str):\n return ' '.join(list(map(capital_leter,text.strip().split(' '))))\n\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n\n print(divide(3,5))\n\n# personal_data(\"Denis\", \"Trushin\", 1981, \"г. Балашиха\")\n\n# print(sum_two_out_three(\"a\",7,\"c\"))\n\n# print(power(3,-3))\n\n# typewriter()\n\n# print(capital_leter('vasily'))\n# print(capital_leter_v2('vasily'))\n# print(capital_leter_v3('vasily'))\n\n# print(capitaliser(\"Как тебе такое илон маск?\"))"
},
{
"alpha_fraction": 0.7091673016548157,
"alphanum_fraction": 0.7309117913246155,
"avg_line_length": 40.73196029663086,
"blob_id": "6a2f4a5692d8a03c983c3b0fce4c2b2a7dc6b753",
"content_id": "61a6cc5875ab67cc5c97c6c69222244e63ffeb4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5946,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 97,
"path": "/Lesson 01/Lesson 01.py",
"repo_name": "BunnyBu/New_repo",
"src_encoding": "UTF-8",
"text": "#Трушин Денис\n#Задание к первому уроку\n\n#1. Поработайте с переменными, создайте несколько, выведите на экран, запросите у пользователя\n# несколько чисел и строк и сохраните в переменные, выведите на экран.\n\none = 'Привет!'\nonemore = 2.76\ngreeting = one\nuser_data = input(greeting+\" Введите строку: \")\nprint(f'Вы ввели \"{user_data}\"')\n\n\n# 2. Пользователь вводит время в секундах. Переведите время в часы, минуты и секунды и выведите в формате чч:мм:сс.\n# Используйте форматирование строк.\n\nuser_data = int(input(\"Введите время в секундах: \"))\nseconds = user_data%60\nminuts = (user_data//60)%60\nours = (user_data)//3600\nprint(f'{user_data} секунд - это {ours} часов {minuts} минут {seconds} секунд')\n\n\n# 3. Узнайте у пользователя число n. Найдите сумму чисел n + nn + nnn.\n# Например, пользователь ввёл число 3. Считаем 3 + 33 + 333 = 369.\n\nuser_data = input(\"Введите целое число: \")\n\nif user_data[0] != \"-\":\n # умножаем строку, приводим к числу. Работает для любых натуральных чисел\n print(int(user_data*3)+int(user_data*2)+int(user_data))\nelse: #для отрицательных\n user_data = str(-1 * int(user_data)) #приводим к положительному\n print(-1*(int(user_data * 3) + int(user_data * 2) + int(user_data))) #домножаем на -1\n\n#если на входе именно число, а не строка\nuser_data = int(input(\"Введите целое число: \"))\nMultiplicator = 10\ncopy_user_data = user_data # сначала используем как делимое в цикле определения порядков, потом для расчёта конечного числа\n\nwhile copy_user_data // 10 > 0: # считаем сколько порядков в числе. Готовим мультипликатор.\n Multiplicator *= 10\n copy_user_data //= 10\n\n# nn = user_data * Multiplicator + user_data\n# nnn = nn * Multiplicator + user_data\n# copy_user_data = (user_data * Multiplicator + user_data) * Multiplicator + user_data + (user_data * Multiplicator + user_data) + user_data\ncopy_user_data = user_data * Multiplicator * Multiplicator + 2 * user_data * Multiplicator + 3 * user_data # раскрыты скобки, упрощено выражение\nprint(copy_user_data)\n\n\n#4. Пользователь вводит целое положительное число. Найдите самую большую цифру в числе.\n# Для решения используйте цикл while и арифметические операции.\n\nuser_data = int(input(\"Введите натуральное число: \")) # 67324765345321\nmax_digital = user_data % 10\n\nwhile user_data // 10 > 0:\n if (user_data // 10) % 10 > max_digital:\n max_digital = (user_data // 10) % 10\n user_data //= 10\n\nprint(f'Максимальная цифра {max_digital}')\n\n# 5. Запросите у пользователя значения выручки и издержек фирмы.\n# Определите, с каким финансовым результатом работает фирма (прибыль — выручка больше издержек,\n# или убыток — издержки больше выручки). Выведите соответствующее сообщение.\n# Если фирма отработала с прибылью, вычислите рентабельность выручки (соотношение прибыли к выручке).\n# Далее запросите численность сотрудников фирмы и определите прибыль фирмы в расчете на одного сотрудника.\n\nrevenue = int(input(\"Введите размер выручки: \"))\nloss = int(input(\"Введите размер издержек: \"))\ngain = revenue - loss\n\nif gain > 0:\n print(f'Ваша компания прибыльна.\\nДоходность {round(gain/revenue * 100,2)}%')\n workers_count = int(input(\"Введите количество сотрудников: \"))\n print(f'Доходность на одного сотрудника {round(gain/workers_count,2)}')\nelse:\n print(\"Ваша компания убыточна.\")\n\n\n#Спортсмен занимается ежедневными пробежками. В первый день его результат составил a километров.\n# Каждый день спортсмен увеличивал результат на 10 % относительно предыдущего.\n# Требуется определить номер дня, на который общий результат спортсмена составить не менее b километров.\n# Программа должна принимать значения параметров a и b и выводить одно натуральное число — номер дня.\n\nodometer = int(input(\"Введите результат первого дня: \"))\ntarget = int(input(\"Введите целевой результат: \"))\nprogress = odometer ##прогресс тренеровок\ndays = 1 ##требуемые для достижения дни\n\nwhile progress < target:\n days += 1\n progress += progress / 10\n\nprint(f'Для достижения результата {target} км с начального результата {odometer}, потребуется {days} дней')"
},
{
"alpha_fraction": 0.6050968170166016,
"alphanum_fraction": 0.6254444718360901,
"avg_line_length": 39.49599838256836,
"blob_id": "dd7c5ee4b3bac82a012ce91fb5675ecdca2ede51",
"content_id": "73694c93247111050d1a9a8bc5fac7846777b939",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7331,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 125,
"path": "/Lesson 02/Lesson 02.py",
"repo_name": "BunnyBu/New_repo",
"src_encoding": "UTF-8",
"text": "#Трушин Денис\n\nif __name__ == '__main__':\n\n#1. Создать список и заполнить его элементами различных типов данных.\n# Реализовать скрипт проверки типа данных каждого элемента.\n# Использовать функцию type() для проверки типа.\n# Элементы списка можно не запрашивать у пользователя, а указать явно, в программе.\n\n any_list = [None,\n 2,\n 'n',\n 8.34,\n ('к', 'о', 'р', 'т', 'э', 'ж'),\n {'пуля':'дура', 'штык':'молодец'},\n ['ещё один список']]\n for item in any_list:\n print(f'переменная {item} имеет тип {type(item)}')\n\n#2. Для списка реализовать обмен значений соседних элементов, т.е. Значениями обмениваются элементы\n# с индексами 0 и 1, 2 и 3 и т.д. При нечетном количестве элементов последний сохранить на своем месте.\n# Для заполнения списка элементов необходимо использовать функцию input().\n\n next = None\n any_list = []\n\n while True:\n next = input(\"Введите список поэлементно (пусто для завершения ввода): \")\n if next != '':\n any_list.append(next)\n else:\n break\n\n print(any_list)\n\n for index in range(len(any_list) // 2): #опорным индексом будет range(len // 2) * 2 --0,2,4,6...\n any_list[index * 2], any_list[index * 2 + 1] = any_list[index * 2 + 1], any_list[index * 2]\n\n print(any_list)\n\n#3. Пользователь вводит месяц в виде целого числа от 1 до 12.\n# Сообщить к какому времени года относится месяц (зима, весна, лето, осень).\n# Напишите решения через list и через dict.\n\n sezons_list = ['Зима', 'Весна', 'Лето', 'Осень']\n sezons_dict = {1:'Зима', 2:'Зима', 3:'Весна', 4:'Весна', 5:'Весна', 6:'Лето', 7:'Лето',\n 8:'Лето', 9:'Осень', 10:'Осень', 11:'Осень', 12:'Зима',}\n\n month = int(input(\"Введите номер месяца: \"))\n\n print(\"\\nЧерез лист\")\n\n if month == 1 or month == 2 or month == 12:\n print(sezons_list[0])\n elif month == 3 or month == 4 or month == 5:\n print(sezons_list[1])\n elif month == 6 or month == 7 or month == 8:\n print(sezons_list[2])\n elif month == 9 or month == 10 or month == 11:\n print(sezons_list[3])\n else:\n print(\"Вы ввели неверный номер\")\n\n print(\"\\nЧерез словарь\")\n\n print(sezons_dict.get(month, \"Такого месяца нет\"))\n\n#4. Пользователь вводит строку из нескольких слов, разделённых пробелами.\n# Вывести каждое слово с новой строки. Строки необходимо пронумеровать.\n# Если в слово длинное, выводить только первые 10 букв в слове.\n\n input_string = input(\"Введите произвольную строку: \") #' Пример моей строки с пробелами '\n index = 1\n\n for item in input_string.strip().split(' '): #тримаем пробелы с концов строки перед разбиением\n print(f'{index} {item[:10]}')\n index += 1\n\n#5. Реализовать структуру «Рейтинг», представляющую собой не возрастающий набор натуральных чисел.\n# У пользователя необходимо запрашивать новый элемент рейтинга.\n# Если в рейтинге существуют элементы с одинаковыми значениями,\n# то новый элемент с тем же значением должен разместиться после них.\n\n my_list = [7, 5, 3, 3, 2]\n number = int(input(\"введите натуральное число: \"))\n#\"должен разместиться после них\" не отличим от результата \"перед ними\" или \"между ними\" пока мы не используем\n# ссылочные типы. \"+ my_list.count(number)\" добавлен именно из-за этого условия задачи.\n my_list.insert(my_list.index(number) + my_list.count(number), number)\n print(my_list)\n\n#6. Реализовать структуру данных «Товары».\n# Она должна представлять собой список кортежей.\n# Каждый кортеж хранит информацию об отдельном товаре.\n# В кортеже должно быть два элемента — номер товара и словарь с параметрами\n# (характеристиками товара: название, цена, количество, единица измерения).\n# Структуру нужно сформировать программно, т.е. запрашивать все данные у пользователя.\n#Пример готовой структуры:\n#[\n#(1, {“название”: “компьютер”, “цена”: 20000, “количество”: 5, “eд”: “шт.”}),\n#(2, {“название”: “принтер”, “цена”: 6000, “количество”: 2, “eд”: “шт.”}),\n#(3, {“название”: “сканер”, “цена”: 2000, “количество”: 7, “eд”: “шт.”})\n#]\n\n\n my_merch = [\n (1, {\"цена\":\"большая\", \"производитель\":\"нормальный\", \"приблуды\":True}),\n (2, {\"цена\":\"средняя\", \"производитель\":\"такое себе\", \"приблуды\":False}),\n (3, {\"цена\":\"малая\", \"производитель\":\"no name\", \"приблуды\":False})\n ]\n next_pice = None\n index = len(my_merch) #\n\n while True:\n next_pice = input(\"\\nВведите цену (пусто для завершения): \")\n if next_pice != '':\n my_merch.append((index+1, {\"цена\": next_pice, \"производитель\": None, \"приблуды\": None}))\n my_merch[index][1]['производитель'] = input(\"Введите название производителя: \")\n my_merch[index][1]['приблуды'] = bool(input(\"Укажите наличие приблуд (True or False): \"))\n else:\n break\n\n for item in my_merch:\n print(f'цена {item[1].get(\"цена\")}, '\n f'производитель \"{item[1].get(\"производитель\")}\", '\n f'приблуды {item[1].get(\"приблуды\")};')\n"
},
{
"alpha_fraction": 0.7222222089767456,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 8,
"blob_id": "571cbff124a72431910a1c79d50994d819016ce2",
"content_id": "5c01909bcd0f36b8f6b51137bbdde50dccb5cf3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 18,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/README.md",
"repo_name": "BunnyBu/New_repo",
"src_encoding": "UTF-8",
"text": "# New_repo\nPython\n"
},
{
"alpha_fraction": 0.6439436674118042,
"alphanum_fraction": 0.7233802676200867,
"avg_line_length": 44.53845977783203,
"blob_id": "586d7d65dd6f839e212eba4bb046fd97982f1ee8",
"content_id": "a24ae23eebc93704228315b232b3e89bf54758e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2573,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 39,
"path": "/Lesson 04/other_tasks.py",
"repo_name": "BunnyBu/New_repo",
"src_encoding": "UTF-8",
"text": "#Denis Trushin\n\n\"\"\"Представлен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента.\nПодсказка: элементы, удовлетворяющие условию, оформить в виде списка. Для его формирования используйте генератор.\nПример исходного списка: [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55].\nРезультат: [12, 44, 4, 10, 78, 123].\"\"\"\n\n\norigin_list = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]\ntarget_list = [origin_list[index] for index in range(1,len(origin_list)) if (origin_list[index] > origin_list[index-1])]\n\nprint(origin_list)\nprint(target_list)\n\n\"\"\"Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Необходимо решить задание в одну строку.\nПодсказка: использовать функцию range() и генератор.\"\"\"\n\ntarget_list= [el for el in range(20,241) if el % 20 == 0 or el % 21 ==0]\nprint(target_list)\n\n\"\"\"Представлен список чисел. Определите элементы списка, не имеющие повторений. \nСформируйте итоговый массив чисел, соответствующих требованию. \nЭлементы выведите в порядке их следования в исходном списке. \nДля выполнения задания обязательно используйте генератор.\nПример исходного списка: [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11].\nРезультат: [23, 1, 3, 10, 4, 11]\"\"\"\n\norigin_list = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]\ntarget_list = [el for el in origin_list if origin_list.count(el) == 1]\nprint(target_list)\n\n\"\"\"Реализовать формирование списка, используя функцию range() и возможности генератора. \nВ список должны войти чётные числа от 100 до 1000 (включая границы). \nНужно получить результат вычисления произведения всех элементов списка.\nПодсказка: использовать функцию reduce().\"\"\"\nfrom functools import reduce\n\ntarget_list = [el for el in range(100, 1001, 2)]\nprint(reduce(lambda x, y: x * y, target_list))"
},
{
"alpha_fraction": 0.6751739978790283,
"alphanum_fraction": 0.685614824295044,
"avg_line_length": 46.88888931274414,
"blob_id": "6fa592be548deaa9c080bbde5ad18da16752736b",
"content_id": "8588505665b86c5d1a7b4772a69b4dbc10af33ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1271,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 18,
"path": "/Lesson 04/01_task.py",
"repo_name": "BunnyBu/New_repo",
"src_encoding": "UTF-8",
"text": "#Denis Trushin\n\n\"\"\"Реализовать скрипт, в котором должна быть предусмотрена функция расчёта заработной платы сотрудника.\nИспользуйте в нём формулу: (выработка в часах*ставка в час) + премия.\nВо время выполнения расчёта для конкретных значений необходимо запускать скрипт с параметрами.\"\"\"\n\nfrom sys import argv\n\nif len(argv) == 4:\n if argv[1].replace('.', '').isdigit() and argv[2].replace('.', '').isdigit() and argv[3].replace('.', '').isdigit():\n print(f'Заработная плата сотрудника составит: {float(argv[1]) * float(argv[2]) + float(argv[3])}')\n else:\n print(\"Неверный тип аргументов\")\nelif argv[1] == '/?' or argv[1].lower() == '/help':\n print(\"Скрипт ожидает три параметра. \\nВведите через пробел: выработку в часах, ставку в час и премию.\\n\"\n \"Дробные значения отделяйте точкой.\")\nelse:\n print(\"Неверное количество параметров\")\n"
}
] | 6 |
lrhunt/LCBGS_IN_CLUSTERS
|
https://github.com/lrhunt/LCBGS_IN_CLUSTERS
|
cbfbd3667cedf71481e56ba05097bff8354ab5e8
|
0ffebaf8eef6b8b76414a3fdd95d0b0204aa5384
|
c32e5cfae8023407bfecbe9b0cbdcfc83dae0637
|
refs/heads/master
| 2021-07-14T11:49:07.897648 | 2019-02-27T15:01:17 | 2019-02-27T15:01:17 | 141,153,478 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6932547092437744,
"alphanum_fraction": 0.7627596855163574,
"avg_line_length": 53.887325286865234,
"blob_id": "75391e9de24776953a9c28a36c85020dbe608b0c",
"content_id": "10141ec1296384c2e0b41ed15e30044419b57ac4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3899,
"license_type": "no_license",
"max_line_length": 744,
"num_lines": 71,
"path": "/Calculate_kcorrections.py",
"repo_name": "lrhunt/LCBGS_IN_CLUSTERS",
"src_encoding": "UTF-8",
"text": "\n# Trying to get k-corrections for LCBG in cluster data. \n# Testing with Molino et al. catalog 2017 for cluster \n# MACS2129.\n#\n#Editor\tDate\t\t\tChange\n#\n# LH\t\t10/12/2018\t\tFirst write\n\nimport numpy as np\nimport pandas as pd\nimport kcorrect \nimport kcorrect.utils as ut\n\ninfile='/home/lrhunt/Documents/LCBG_CLUSTER_GROUP/hlsp_clash_hst_ir_macs2129_cat-molino.txt'\n\ndataset=pd.read_table(infile,delim_whitespace=True,header=133)\n\n# Example of how to select rows in pandas\n\ncluster_member=dataset[(dataset.zb_1<dataset.clusterz+0.2)&(dataset.zb_1>dataset.clusterz-0.2)]\n\n# Example of how to select columns in pandas\n\ncluster_member_photometry=cluster_member[['CLASHID','F435W_ACS_MASS','dF435W_ACS_MASS','F475W_ACS_MASS','dF475W_ACS_MASS','F606W_ACS_MASS','dF606W_ACS_MASS','F625W_ACS_MASS','dF625W_ACS_MASS','F775W_ACS_MASS','dF775W_ACS_MASS','F814W_ACS_MASS','dF814W_ACS_MASS','F850LP_ACS_MASS','dF850LP_ACS_MASS','F105W_WFC3_MASS','dF105W_WFC3_MASS','F110W_WFC3_MASS','dF110W_WFC3_MASS','F125W_WFC3_MASS','dF125W_WFC3_MASS','F140W_WFC3_MASS','dF140W_WFC3_MASS','F160W_WFC3_MASS','dF160W_WFC3_MASS','F225W_WFC3_MASS','dF225W_WFC3_MASS','F275W_WFC3_MASS','dF275W_WFC3_MASS','F336W_WFC3_MASS','dF336W_WFC3_MASS','F390W_WFC3_MASS','dF390W_WFC3_MASS','zb_1']]\n\ncluster_member_maggies=cluster_member_photometry.copy(deep=True)\n\ncluster_member_maggies=cluster_member_maggies[(cluster_member_maggies.F435W_ACS_MASS<80)&(cluster_member_maggies.F475W_ACS_MASS<80)&(cluster_member_maggies.F606W_ACS_MASS<80)&(cluster_member_maggies.F625W_ACS_MASS<80)&(cluster_member_maggies.F775W_ACS_MASS<80)&(cluster_member_maggies.F814W_ACS_MASS<80)&(cluster_member_maggies.F850LP_ACS_MASS<80)&(cluster_member_maggies.F105W_WFC3_MASS<80)&(cluster_member_maggies.F110W_WFC3_MASS<80)&(cluster_member_maggies.F125W_WFC3_MASS<80)&(cluster_member_maggies.F140W_WFC3_MASS<80)&(cluster_member_maggies.F160W_WFC3_MASS<80)&(cluster_member_maggies.F225W_WFC3_MASS<80)&(cluster_member_maggies.F275W_WFC3_MASS<80)&(cluster_member_maggies.F336W_WFC3_MASS<80)&(cluster_member_maggies.F390W_WFC3_MASS<80)]\n\ncluster_member_maggies.iloc[:,np.arange(1,33,2)]=ut.mag2maggies(cluster_member_maggies.iloc[:,np.arange(1,33,2)])\n\nfor var in np.arange(2,33,2):\n\tcluster_member_maggies.iloc[:,var]=ut.invariance(cluster_member_maggies.iloc[:,var-1],cluster_member_maggies.iloc[:,var])\n\ncluster_member_maggies[\"c1\"]=np.nan\ncluster_member_maggies[\"c2\"]=np.nan\ncluster_member_maggies[\"c3\"]=np.nan\ncluster_member_maggies[\"c4\"]=np.nan\ncluster_member_maggies[\"c5\"]=np.nan\ncluster_member_maggies[\"c6\"]=np.nan\n\n\ncluster_member_maggies_from_kcorr=cluster_member_maggies.iloc[:,np.arange(1,31,2)]\ncluster_member_maggies_from_kcorr.insert(loc=0,column='redshift',value=np.nan)\n\ncmm_ind=cluster_member_maggies.index.values\n\n# May be able to create a new filter list based on the column names\n# that are already loadable from the text file. If that is possible,\n# don't have to make a bunch of different templates. May need to\n# write a new file.\n\nkcorrect.load_templates()\nkcorrect.load_filters('/home/lrhunt/programs/kcorrect/data/templates/LCBG_CLUSTER_FLITS.dat')\n\nfor i in cmm_ind:\n\t#cluster_member_maggies.loc[i,'c1':'c6']=kcorrect.fit_nonneg(np.array(cluster_member_maggies.loc[i,'zb_1'],dtype=float),np.array(cluster_member_maggies.loc[i, np.array(list(cluster_member_maggies))[np.arange(1,31,2)]],dtype=float),np.array(cluster_member_maggies.loc[i,np.array(list(cluster_member_maggies))[np.arange(1,31,2)]],dtype=float))\n\n#Need to add column for redshift in cluster_member_maggies_from_kcorr\n\nfor i in cmm_ind:\n\tcluster_member_maggies_from_kcorr.loc[i]=kcorrect.reconstruct_maggies(cluster_member_maggies.loc[i,'c1':'c6'])\n\ncolumns=list(cluster_member_maggies_from_kcorr)\n\nnew_columns={}\n\nfor column in columns:\n new_columns.update({column:column+'_s'})\n\ncluster_member_maggies_from_kcorr=cluster_member_maggies_from_kcorr.rename(columns=new_columns)\n\n"
},
{
"alpha_fraction": 0.684686541557312,
"alphanum_fraction": 0.744501531124115,
"avg_line_length": 31.637584686279297,
"blob_id": "87b512084ebd9577d71c4afb25fa9b8afe73e58f",
"content_id": "e5c62dc27ad4f1c7f12d7a48be869d07fb1582d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4865,
"license_type": "no_license",
"max_line_length": 798,
"num_lines": 149,
"path": "/MOLINO_KCORR.py",
"repo_name": "lrhunt/LCBGS_IN_CLUSTERS",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\n# Below will be some code to calculate k-corrections for clusters in the Molino HST Cluster catalogs, following their example\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport kcorrect \nimport kcorrect.utils as ut\nimport matplotlib.pyplot as plt\n\n\n# The next line reads the catalog into a pandas dataframe. Whitespace=True sets the delimiter (separator) as whitespace, and header=133 sets the line that contains header information. This line is the one that labels each column correctly in this document. The final line selects objects that have a photometric redshift within the cluster redshift range\n\n# In[2]:\n\n\ninfile='/Users/lucashunt/projects/LCBGS_IN_CLUSTERS/hlsp_clash_hst_ir_macs2129_cat-molino.txt'\ndataset=pd.read_table(infile,delim_whitespace=True,header=133)\ncluster_member=dataset[(dataset.zb_1<dataset.clusterz+0.2)&(dataset.zb_1>dataset.clusterz-0.2)]\n\n\n# This line selects only ID, photometry, and redshift columns\n\n# In[3]:\n\n\ncluster_member_photometry=cluster_member[['CLASHID','F435W_ACS_MASS','dF435W_ACS_MASS','F475W_ACS_MASS','dF475W_ACS_MASS','F606W_ACS_MASS','dF606W_ACS_MASS','F625W_ACS_MASS','dF625W_ACS_MASS','F775W_ACS_MASS','dF775W_ACS_MASS','F814W_ACS_MASS','dF814W_ACS_MASS','F850LP_ACS_MASS','dF850LP_ACS_MASS','F105W_WFC3_MASS','dF105W_WFC3_MASS','F110W_WFC3_MASS','dF110W_WFC3_MASS','F125W_WFC3_MASS','dF125W_WFC3_MASS','F140W_WFC3_MASS','dF140W_WFC3_MASS','F160W_WFC3_MASS','dF160W_WFC3_MASS','F225W_WFC3_MASS','dF225W_WFC3_MASS','F275W_WFC3_MASS','dF275W_WFC3_MASS','F336W_WFC3_MASS','dF336W_WFC3_MASS','F390W_WFC3_MASS','dF390W_WFC3_MASS','zb_1']]\n\n\n# In[4]:\n\n\ncluster_member_photometry=cluster_member_photometry[(cluster_member_photometry.F435W_ACS_MASS<80)&(cluster_member_photometry.F475W_ACS_MASS<80)&(cluster_member_photometry.F606W_ACS_MASS<80)&(cluster_member_photometry.F625W_ACS_MASS<80)&(cluster_member_photometry.F775W_ACS_MASS<80)&(cluster_member_photometry.F814W_ACS_MASS<80)&(cluster_member_photometry.F850LP_ACS_MASS<80)&(cluster_member_photometry.F105W_WFC3_MASS<80)&(cluster_member_photometry.F110W_WFC3_MASS<80)&(cluster_member_photometry.F125W_WFC3_MASS<80)&(cluster_member_photometry.F140W_WFC3_MASS<80)&(cluster_member_photometry.F160W_WFC3_MASS<80)&(cluster_member_photometry.F225W_WFC3_MASS<80)&(cluster_member_photometry.F275W_WFC3_MASS<80)&(cluster_member_photometry.F336W_WFC3_MASS<80)&(cluster_member_photometry.F390W_WFC3_MASS<80)]\n\n\n# Below create a copy to generate maggies and select objects that actually have photometry\n\n# In[5]:\n\n\ncluster_member_maggies=cluster_member_photometry.copy(deep=True)\n\n\n# In[6]:\n\n\ncluster_member_maggies.iloc[:,np.arange(1,33,2)]=ut.mag2maggies(cluster_member_maggies.iloc[:,np.arange(1,33,2)])\n\n\n# In[7]:\n\n\nfor var in np.arange(2,33,2):\n\tcluster_member_maggies.iloc[:,var]=ut.invariance(cluster_member_maggies.iloc[:,var-1],cluster_member_maggies.iloc[:,var])\n\n\n# In[8]:\n\n\ncluster_member_maggies[\"c1\"]=np.nan\ncluster_member_maggies[\"c2\"]=np.nan\ncluster_member_maggies[\"c3\"]=np.nan\ncluster_member_maggies[\"c4\"]=np.nan\ncluster_member_maggies[\"c5\"]=np.nan\ncluster_member_maggies[\"c6\"]=np.nan\n\n\n# In[9]:\n\n\ncluster_member_maggies_from_kcorr=cluster_member_maggies.iloc[:,np.arange(1,31,2)]\ncluster_member_maggies_from_kcorr.insert(loc=0,column='redshift',value=np.nan)\n\n\n# In[10]:\n\n\ncmm_ind=cluster_member_maggies.index.values\n\n\n# In[11]:\n\n\nkcorrect.load_templates()\nkcorrect.load_filters('/Users/lucashunt/programs/kcorrect/data/templates/LCBG_CLUSTER_FLITS.dat')\n\n\n# In[ ]:\n\n\nfor i in cmm_ind:\n\tcluster_member_maggies.loc[i,'c1':'c6']=kcorrect.fit_nonneg(np.array(cluster_member_maggies.loc[i,'zb_1'],dtype=float),np.array(cluster_member_maggies.loc[i, np.array(list(cluster_member_maggies))[np.arange(1,31,2)]],dtype=float),np.array(cluster_member_maggies.loc[i,np.array(list(cluster_member_maggies))[np.arange(1,31,2)]],dtype=float))\n\n\n# In[ ]:\n\n\nfor i in cmm_ind:\n\tcluster_member_maggies_from_kcorr.loc[i]=kcorrect.reconstruct_maggies(cluster_member_maggies.loc[i,'c1':'c6'])\n\n\n# In[ ]:\n\n\nkcorrect.load_templates()\nkcorrect.load_filters('/Users/lucashunt/programs/kcorrect/data/templates/bessell_ubv.dat')\n\n\n# In[ ]:\n\n\nbessel_ubv=pd.DataFrame({\"U\":[],\"B\":[],\"V\":[]})\n\n\n# In[ ]:\n\n\nprint(len(cluster_member_maggies))\nprint(kcorrect.reconstruct_maggies(cluster_member_maggies[['c1','c2','c3','c4','c5','c6']].iloc[1]))\n\n\n# In[ ]:\n\n\ncolumns=list(cluster_member_maggies_from_kcorr)\n\nnew_columns={}\n\nfor column in columns:\n new_columns.update({column:column+'_s'})\n\ncluster_member_maggies_from_kcorr=cluster_member_maggies_from_kcorr.rename(columns=new_columns)\n\n\n# In[ ]:\n\n\nprint(cmm_ind)\nprint(-2.5*np.log10(cluster_member_maggies_from_kcorr.iloc[:,np.arange(1,len(new_columns))]))\nprint(cluster_member_photometry)\n\n\n# In[ ]:\n\n\nplt.plot(-2.5*np.log10(cluster_member_maggies_from_kcorr.iloc[:,3]),cluster_member_photometry.iloc[:,21],'.')\n\n"
},
{
"alpha_fraction": 0.796875,
"alphanum_fraction": 0.796875,
"avg_line_length": 191,
"blob_id": "b06f5d6bb058b3c532bb4257ab04dafc8d554d68",
"content_id": "f42c580e5bd217f590cab2fcc7d0b974b1acbd31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 191,
"num_lines": 1,
"path": "/README.md",
"repo_name": "lrhunt/LCBGS_IN_CLUSTERS",
"src_encoding": "UTF-8",
"text": "This is a repository used to calculate the k-correction for one of the MOLINO CLASH clusters. This makes use of the kcorrect code from michael blanton and the python wrapper found on github by nirin-a\n"
}
] | 3 |
JakeConway/FoundationToFHIR
|
https://github.com/JakeConway/FoundationToFHIR
|
3492323f5c8980f9f4a9e3ed08186ec7924ec177
|
55e1af2dbe37cf3b63208203bcfac5dfe14ac32c
|
e9ab85dc4d39ba650db1146c6294097472ae8f9a
|
refs/heads/master
| 2021-01-19T19:33:11.364041 | 2017-05-04T17:23:59 | 2017-05-04T17:23:59 | 83,726,799 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7039473652839661,
"alphanum_fraction": 0.7039473652839661,
"avg_line_length": 18.125,
"blob_id": "31de56afc996f7ea27d6fb73094869eb53b966dc",
"content_id": "d49eb49fc9c49dff42594f5e88608f514d53ff20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 8,
"path": "/info/urls.py",
"repo_name": "JakeConway/FoundationToFHIR",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\napp_name = \"info\"\n\nurlpatterns = [\n url(r'^sourcecode', views.sourcecode, name='sourcecode'),\n]"
},
{
"alpha_fraction": 0.7681159377098083,
"alphanum_fraction": 0.7681159377098083,
"avg_line_length": 18.714284896850586,
"blob_id": "8029cedb7a5ba9037eee847dc0fe0ac325db9230",
"content_id": "5b59f4d1e098bc6a7fc626a9da150cf10057183f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 138,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 7,
"path": "/xmlToServer/apps.py",
"repo_name": "JakeConway/FoundationToFHIR",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nfrom django.apps import AppConfig\n\n\nclass XmltoserverConfig(AppConfig):\n name = 'xmlToServer'\n"
},
{
"alpha_fraction": 0.7217805981636047,
"alphanum_fraction": 0.7233704328536987,
"avg_line_length": 26.2608699798584,
"blob_id": "1c56c4f8176764006e9b3f126092c20cddd9f54f",
"content_id": "8db525030e5b815c67ec1291883d0db61b37dd27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 629,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 23,
"path": "/xmlToServer/views.py",
"repo_name": "JakeConway/FoundationToFHIR",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\n# Create your views here.\n\ndef index(request):\n return render(request, \"xmlToServer/XmlToServer.html\", {})\n\n\ndef checkpatients(request):\n resources = request.session['resources']\n context = {\n 'resources': json.dumps(resources)\n }\n return render(request, \"xmlToServer/checkPatients.html\", context)\n\n@csrf_exempt\ndef transfer(request):\n resources = request.body\n resources = json.loads(resources.decode('utf-8'))\n request.session['resources'] = resources\n return HttpResponse()\n\n\n"
},
{
"alpha_fraction": 0.5057471394538879,
"alphanum_fraction": 0.7011494040489197,
"avg_line_length": 16.600000381469727,
"blob_id": "253f0cf11180f64f5871e54e702744c02caac853",
"content_id": "578c51ac3ccc9800141848ecc2cb2d8cc7e8486a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "JakeConway/FoundationToFHIR",
"src_encoding": "UTF-8",
"text": "Django==1.9.7\ndj-database-url==0.4.1\ngunicorn==19.6.0\nwhitenoise==3.2.2\npsycopg2==2.6.2"
},
{
"alpha_fraction": 0.6828358173370361,
"alphanum_fraction": 0.6828358173370361,
"avg_line_length": 25.899999618530273,
"blob_id": "2fe191bef1c293544eca1bba9e05ca5978f8536b",
"content_id": "e42926160d580bfdbd92e266e325b9a8131a1706",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 10,
"path": "/xmlToServer/urls.py",
"repo_name": "JakeConway/FoundationToFHIR",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\napp_name = \"xmlToServer\"\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^checkpatients/', views.checkpatients, name='checkpatients'),\n url(r'^transfer/', views.transfer, name='transfer')\n]"
}
] | 5 |
zak1234us/scripts
|
https://github.com/zak1234us/scripts
|
67c8560ab900255fdd6dceb03f67b97c525dafda
|
b9b4a55a4075f7f7334a07a441f3b187e4317631
|
58dfd51cde03e611ceecc1177b744d2fe1f95083
|
refs/heads/master
| 2021-01-25T06:24:16.971769 | 2018-03-15T16:19:49 | 2018-03-15T16:19:49 | 93,564,345 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5614407062530518,
"alphanum_fraction": 0.5656779408454895,
"avg_line_length": 23.842105865478516,
"blob_id": "8358add1df802283552c148ae1abe5c7159c318d",
"content_id": "fcaa31973193641698f461ebe2af853c366a11fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 19,
"path": "/wordCount.py",
"repo_name": "zak1234us/scripts",
"src_encoding": "UTF-8",
"text": "#Counting words in a string\n\ndef wordCount():\n lst = list()\n count = dict()\n textToProcess = input('Please provide text or file: ')\n try:\n fhand = open(textToProcess)\n for line in fhand:\n line = line.rstrip()\n lst = line.split()\n for word in lst:\n count[word] = count.get(word, 0) + 1\n\n except:\n textToStr = textToProcess.split()\n return len(textToStr)\n return count\nprint(wordCount())\n"
},
{
"alpha_fraction": 0.5235008001327515,
"alphanum_fraction": 0.5332252979278564,
"avg_line_length": 29.850000381469727,
"blob_id": "61d1c434b5907cdbd9fc787d0b51d2069cb39764",
"content_id": "77e4f25fd1f0dcad1d96bcd2b62631085a0bed62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 617,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 20,
"path": "/pigLatin.py",
"repo_name": "zak1234us/scripts",
"src_encoding": "UTF-8",
"text": "# Translate to pig latin\n\nlst = ['sh', 'gl', 'ch', 'ph', 'tr', 'br', 'fr', 'bl', 'gr', 'st', 'sl', 'cl', 'pl', 'fl']\nlatinAdd = 'ay'\nsenToConvert = str(input('What would you like to convert to pig latin? '))\n\ndef convToPl(sentence):\n senList = sentence.split()\n vowels = ['a','e','i','o','u']\n plSen = []\n for word in senList:\n if word[0] in vowels:\n plSen.append(word + 'way')\n elif word[:2] in lst:\n plSen.append(word[2:] + word[:2] + latinAdd)\n else:\n plSen.append(word[1:] + word[:1] + latinAdd)\n print(' '.join(plSen))\n\nconvToPl(senToConvert)\n"
},
{
"alpha_fraction": 0.7435897588729858,
"alphanum_fraction": 0.752136766910553,
"avg_line_length": 22.399999618530273,
"blob_id": "2de07dc9ca12fea22fff79d4018c76a387d07d5a",
"content_id": "7e276be678886f002c0f4c6a969996c724b502bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 5,
"path": "/stringrev.py",
"repo_name": "zak1234us/scripts",
"src_encoding": "UTF-8",
"text": "def stringReverse(string):\n reverseString = string[::-1]\n print(reverseString)\n\nstringReverse(stringToReverse)\n"
}
] | 3 |
DhruvDh/convolutions
|
https://github.com/DhruvDh/convolutions
|
5c9524a8d390aaf22a8dccb5e8312a1fe4fa2f85
|
667f0c75ef0aa22222352d35e0d904206f6cf772
|
eb8c610a694d09acc1c45004a65e3a7da29f7cdb
|
refs/heads/master
| 2022-06-18T17:26:04.547168 | 2022-06-07T18:25:20 | 2022-06-07T18:25:20 | 212,900,849 | 0 | 0 | null | 2019-10-04T20:53:48 | 2022-06-07T18:19:57 | 2022-06-07T18:25:20 |
Rust
|
[
{
"alpha_fraction": 0.40907856822013855,
"alphanum_fraction": 0.4444142282009125,
"avg_line_length": 30.991304397583008,
"blob_id": "afa0384aed97c60a1ff37f2a9be7255b52e93d58",
"content_id": "70c3242b2b5819b8c9313d360a9808e5ae5459d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 3679,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 115,
"path": "/src/main.rs",
"repo_name": "DhruvDh/convolutions",
"src_encoding": "UTF-8",
"text": "use itertools::iproduct;\nuse ndarray::prelude::*;\nuse ndarray::Zip;\nuse rayon::prelude::*;\nuse std::time::Instant;\nuse std::sync::Arc;\nuse parking_lot::{Mutex};\n\nconst NUM_THREADS: usize = 16;\n\nfn do_it(kernel_shape: (usize, usize), img_shape: (usize, usize)) -> f32 {\n let mut img = Array::from_elem(img_shape, 1.32f32);\n\n let mut output = Arc::new(Mutex::new(Vec::new()));\n let kernel_offset = kernel_shape.0 / 2;\n \n let now = Instant::now();\n\n rayon::scope(|s| {\n let y_range: Vec<usize> = (0..img_shape.1).collect();\n let y_range = y_range.as_slice();\n\n // for patch in img.windows(kernel_shape).into_par_iter() {\n // println!(\"{:?}\", patch);\n // break;\n // }\n\n for y_window in y_range.windows(kernel_shape.1) {\n let slab = img.slice(s![.., y_window[0]..*y_window.last().unwrap() + 1]);\n let kernel = Array::from_elem(kernel_shape.0 * kernel_shape.1, 1.5f32);\n let out = output.clone();\n\n s.spawn(move |s| {\n let x_range: Vec<usize> = (0..img_shape.0).collect();\n let x_range = x_range.as_slice();\n\n let mut patch_matrix = Array::from_elem(\n (\n img_shape.0 - kernel_shape.0 + 1,\n kernel_shape.0 * kernel_shape.1,\n ),\n 0f32,\n );\n\n // println!(\"{:?}\", patch_matrix);\n // println!(\"{:?}\", x_range.windows(kernel_shape.0).last());\n for x_window in x_range.windows(kernel_shape.0) {\n patch_matrix\n .row_mut(x_window[0])\n .assign(&\n slab.slice(s![x_window[0]..*x_window.last().unwrap() + 1, ..])\n .to_owned()\n .into_shape(kernel_shape.0 * kernel_shape.1)\n .unwrap()\n );\n }\n \n let mut _output = out.lock();\n (*_output).append(&mut kernel.dot(&(patch_matrix.t())).to_vec() );\n\n });\n }\n // output.par_extend(coords.into_par_iter().map(|(x, y)| {\n // Zip::from(img.slice( s![\n // (x - kernel_offset)..=(x + kernel_offset),\n // (y - kernel_offset)..=(y + kernel_offset)\n // ] )).and(&kernel)\n // .fold(0f32, |acc, i, k| acc + (i * k))\n // }));\n\n // println!(\"{:?}\", output.len());\n });\n\n \n let mut gaurd = output.lock();\n let out_vec: Vec<f32> = (*gaurd).drain(0..).collect();\n\n let output = Array::from_shape_vec(\n (img_shape.0 - (kernel_offset * 2), img_shape.1 - (kernel_offset * 2)),\n out_vec\n ).unwrap();\n \n let time_taken = now.elapsed().as_secs_f32();\n \n let pixels = (img_shape.0 - kernel_shape.0 + 1) * (img_shape.1 - kernel_shape.1 + 1);\n (pixels as f32 * 10e-9) / time_taken\n}\n\nfn main() {\n rayon::ThreadPoolBuilder::new()\n .num_threads(NUM_THREADS)\n .build_global()\n .unwrap();\n\n // let kernels = vec![(3, 3),]; \n let kernels = vec![(3, 3), (5, 5), (9, 9), (11, 11), (13, 13), (15, 15)];\n let imgs = vec![\n (1024, 768),\n (2048, 2048),\n (8192, 8192),\n (4194304, 768),\n (16777216, 768),\n ];\n\n for (i, k) in iproduct!(imgs, kernels) {\n println!(\n \"{}x{}\\tconvolution of\\t{}x{}\\t image proccessed at\\t{} gigapixels/sec.\",\n k.0,\n k.1,\n i.0,\n i.1,\n do_it(k, i)\n );\n }\n}\n"
},
{
"alpha_fraction": 0.4920273423194885,
"alphanum_fraction": 0.5694760680198669,
"avg_line_length": 23.44444465637207,
"blob_id": "573dbc562ea08e9f23d9c97d3e0df5d82811bc65",
"content_id": "0d60b9722008d221072d738a1cf7088c9302b5b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 18,
"path": "/plot.py",
"repo_name": "DhruvDh/convolutions",
"src_encoding": "UTF-8",
"text": "def perf(k, n, m):\n flops = ((2 * k * k) - 1) * (n - k + 1) * (m - k + 1)\n flops = flops * 10e-9\n\n time_taken = flops / 1638.4\n\n pixels_produced = (n - k + 1) * (m - k + 1)\n\n pixels_per_second = pixels_produced / time_taken\n return pixels_per_second * 10e-9\n\nimport matplotlib.pyplot as plt\n\nkernels = [3, 5, 7, 9, 11, 13, 15]\n\nplt.figure()\nplt.plot(kernels, [perf(k, 1024, 768) for k in kernels])\nplt.savefig('plot.png')"
},
{
"alpha_fraction": 0.6091954112052917,
"alphanum_fraction": 0.6695402264595032,
"avg_line_length": 19.52941131591797,
"blob_id": "835e7c67f834c8897e19d4bf1cf4013c8dd60f17",
"content_id": "5f95a241742a1e51fe0236f6476788838ab80146",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 348,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 17,
"path": "/Cargo.toml",
"repo_name": "DhruvDh/convolutions",
"src_encoding": "UTF-8",
"text": "[package]\nname = \"convo-scratchpad\"\nversion = \"0.1.0\"\nauthors = [\"dhruvdh <[email protected]>\"]\nedition = \"2018\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\nndarray = \"0.13.0\"\nitertools = \"0.8.0\"\nrayon = \"1.2.0\"\nparking_lot = \"0.9.0\"\n\n[profile.release]\n# lto = \"thin\"\n# codegen-units = 1"
},
{
"alpha_fraction": 0.6210247278213501,
"alphanum_fraction": 0.6908127069473267,
"avg_line_length": 62.74285888671875,
"blob_id": "b9dbc12a95e991ad196459a7fcec3a8879509d49",
"content_id": "9a6f03a28fa40e2b0b7e23be4fdfdd29652e4167",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2276,
"license_type": "no_license",
"max_line_length": 281,
"num_lines": 35,
"path": "/README.MD",
"repo_name": "DhruvDh/convolutions",
"src_encoding": "UTF-8",
"text": "# Assignment 4: Performance Modeling: 2D Convolution\r\n\r\n## Modelling\r\n\r\n- **How many Flops needs to be done to compute a convolution of dimension k on a image of size n × m?**\r\n\r\nAssuming the edge pixels are ignored, the dimensions of the output image would be (`n - k + 1`) x (`m - k + 1`). Meaning that we'd have to calculate the values for `(n - k + 1) * (m - k + 1)` pixels; which implies that we'd need to perform `(n - k + 1) * (m - k + 1)` convolutions.\r\n\r\nThe amount of flops needed for one convolution is `k * k` multiplications and `k * k - 1` additions, coming to `(2 * k * k) - 1` flops.\r\n\r\nThusly, the total amount of floating point operations needed are `((2 * k * k) - 1) * (n - k + 1) * (m - k + 1)`.\r\n\r\n- **How much memory needs to be moved to compute a convolution of dimension k on a image of size n × m?**\r\n\r\nAs calculated above, we need to perform `(n - k + 1) * (m - k + 1)` convolutions. For each, convolution we'll need to move `k * k` floating point numbers for the kernel, and `k * k` pixels from the image.\r\n\r\nTotal memory moved would be `(2 * k * k) * (n - k + 1) * (m - k + 1) * 32` bits.\r\n\r\n- **Assuming the performance numbers you measured in assignment 1 and 2, how long should computing a convolution of dimension 3 on an image of 1024 × 768 take?**\r\n\r\nAccording to what we found in question 1 it would take `13,308,484` flops or `0.013308484` GFlops to get to the final output image. According to my previous estimations a compute node on mamba should have a peak performance of `1638.4` GFlops/sec.\r\n\r\nIt should thusly take `0.013308484 ÷ 1638.4` seconds which comes to `8.12285400390625e-6` seconds or `8.12285` microseconds.\r\n\r\n- **What about a convolution of dimension 11 ? On this kind of problem, performance is usually reported in pixel processed per second.**\r\n\r\nFor a `1024 x 768` image, a kernel of dimension `11` would take `185,235,492` flops. Which would take `1.130587719726563e-4` seconds.\r\n\r\nSince in all we processed and produced `1014 x 758` output pixels,we produced `6,798,340,248.962656` pixels per second, or `6.7983` gigapixels/sec.\r\n\r\n- **Plot maximum expected performance as a function of k.**\r\n\r\n\r\n\r\nY-axis is pixels processed in gigapixels per second, and X-axis is the kernel size."
}
] | 4 |
MustafaK99/Final-Year-Project
|
https://github.com/MustafaK99/Final-Year-Project
|
13889955dae8cd2fcd520c36cbf5a192cdcfbec5
|
53969ec355d7d82cfe8771a92d891e8b03013d8d
|
64fbeadeb4f7f12f3ee32ce244432ea4fbd4284a
|
refs/heads/master
| 2023-04-11T21:06:30.811789 | 2021-05-15T01:27:15 | 2021-05-15T01:27:15 | 316,584,932 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.6994871497154236,
"avg_line_length": 32.620689392089844,
"blob_id": "de1d07e62b41d2b3886f1c4885e452463efbbfa8",
"content_id": "8873f44b976188ea07514528b371fed70ae9a476",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 975,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 29,
"path": "/Social Distancing App/FlaskApp/app/models.py",
"repo_name": "MustafaK99/Final-Year-Project",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nfrom app import db, login_manager\nfrom flask_login import UserMixin\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(15), unique=True, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n password = db.Column(db.String(60), nullable=False)\n detections = db.relationship('Detections', backref='user', lazy=True)\n\n def __repr__(self):\n return f\"User('{self.username}')\"\n\n\nclass Detections(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n numberOfViolations = db.Column(db.Integer)\n date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n def __repr__(self):\n return f\"detected('{self.numberOfViolations}')\"\n"
},
{
"alpha_fraction": 0.534246563911438,
"alphanum_fraction": 0.5430843830108643,
"avg_line_length": 24.410112380981445,
"blob_id": "aef095868ace633638c445a361878162d7a18eee",
"content_id": "3b6058b257b037a2ba4a9be18c1ff58cfca0ff85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4526,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 178,
"path": "/Social Distancing App/COVID Data/test2.py",
"repo_name": "MustafaK99/Final-Year-Project",
"src_encoding": "UTF-8",
"text": "from requests import get\nfrom json import dumps\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport pytz\n\n\n\n\n\n\n#now = datetime.now()\n#today4pm = now.replace\n\n\n\ndef getLatestByRegion(ENDPOINT, AREA_TYPE, DATE):\n\n\n filters = [\n f\"areaType={ AREA_TYPE }\",\n f\"date={ DATE }\",\n \n ]\n\n structure = {\n \"areaName\":\"areaName\",\n \"date\": \"date\",\n \"newCases\": \"newCasesByPublishDate\",\n \"newDeaths\": \"newDeaths28DaysByPublishDate\",\n \"cumDeaths\" : \"cumDeaths28DaysByPublishDateRate\"\n \n \n }\n\n api_params = {\n \"filters\": str.join(\";\", filters),\n \"structure\": dumps(structure, separators=(\",\", \":\"))\n }\n\n\n response = get(ENDPOINT, params=api_params, timeout=10)\n\n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n\n #print(response.url)\n #print(response.json())\n rawJ = response.json()\n df = pd.DataFrame(rawJ[\"data\"])\n # print(df)\n return(df)\n\ndef getLatestByNation(ENDPOINT, AREA_TYPE, DATE):\n filters = [\n f\"areaType={ AREA_TYPE }\",\n f\"date={ DATE }\",\n \n ]\n\n structure = {\n\n \"areaName\":\"areaName\",\n \"date\": \"date\",\n \"newCases\": \"newCasesByPublishDate\",\n \"cumulative\": \"cumCasesByPublishDate\",\n \"newDeaths\": \"newDeaths28DaysByPublishDate\",\n \"cumDeaths\" : \"cumDeaths28DaysByPublishDateRate\"\n \n \n }\n\n api_params = {\n \"filters\": str.join(\";\", filters),\n \"structure\": dumps(structure, separators=(\",\", \":\"))\n }\n\n\n response = get(ENDPOINT, params=api_params, timeout=10)\n\n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n\n # print(response.url)\n #print(response.json())\n rawJ = response.json()\n df = pd.DataFrame(rawJ[\"data\"])\n # print(df)\n return df\n\n \n\n \ndef latestGraphByRegion():\n ENDPOINT = \"https://api.coronavirus.data.gov.uk/v1/data\"\n AREA_TYPE = \"region\"\n Now = (datetime.now(pytz.timezone('Europe/London')))\n currentNow = Now.strftime(\"%H:%M:%S\")\n if (currentNow >= \"16:00:00\"):\n DATE = (datetime.today().strftime('%Y-%m-%d'))\n else:\n DATE = datetime.now() - timedelta(days=1)\n DATE = DATE.strftime('%Y-%m-%d')\n\n df = getLatestByRegion(ENDPOINT, AREA_TYPE, DATE)\n areaName = df['areaName'].values.tolist()\n newCases = df['newCases'].values.tolist()\n newDeaths = df['newDeaths'].values.tolist()\n cumulative = df['cumDeaths'].values.tolist()\n print(df)\n\n \n sns.barplot(data=df, x=\"areaName\", y=\"newCases\");\n plt.xlabel('')\n plt.ylabel('New cases')\n plt\n plt.title('Number of new cases recorded on {} in England by Region'.format(DATE))\n plt.xticks(size=9)\n sns.despine();\n plt.show()\n sns.barplot(data=df, x=\"areaName\", y=\"newDeaths\");\n plt.xlabel('')\n plt.ylabel('New Deaths')\n plt\n plt.title('Number of new deaths recorded on {} in England by Region'.format(DATE))\n plt.xticks(size=9)\n sns.despine();\n plt.show()\n\n\n\n \n\n \n\ndef latestGraphByNation():\n ENDPOINT = \"https://api.coronavirus.data.gov.uk/v1/data\"\n AREA_TYPE = \"nation\"\n #DATE = (datetime.datetime.today().strftime('%Y-%m-%d'))\n DATE = \"2021-01-31\"\n df = getLatestByNation(ENDPOINT, AREA_TYPE,DATE)\n\n \n print(df)\n\n sns.barplot(data=df, x=\"areaName\", y=\"newCases\");\n plt.xlabel('')\n plt.ylabel('New cases')\n plt\n plt.title('Number of new cases recorded on {} in the UK by nation'.format(DATE))\n plt.xticks(size=9)\n sns.despine();\n plt.show()\n\n areaName = df['areaName'].values.tolist()\n newCases = df['newCases'].values.tolist()\n \n \n\n sns.barplot(data=df, x=\"areaName\", y=\"newDeaths\");\n plt.xlabel('')\n plt.ylabel('New Deaths')\n plt\n plt.title('Number of new deaths recorded on {} in the UK by nation'.format(DATE))\n plt.xticks(size=9)\n sns.despine();\n plt.show()\n\n\n\n\n \n\nlatestGraphByRegion()\n#latestGraphByNation()\n\n\n\n"
},
{
"alpha_fraction": 0.6807268261909485,
"alphanum_fraction": 0.6809431314468384,
"avg_line_length": 32.992645263671875,
"blob_id": "da1189b5c3934b70ad491c27e4d10443c493e6a6",
"content_id": "20c125820515af93688e7a0eb8c2a1e8a38ec2de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4623,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 136,
"path": "/Social Distancing App/FlaskApp/app/routes.py",
"repo_name": "MustafaK99/Final-Year-Project",
"src_encoding": "UTF-8",
"text": "from flask import render_template, url_for, flash, redirect, request, Response\nfrom app.social_distance_advanced import Detection\nfrom app import app, dataScraper, db, bcrypt\nfrom app.forms import RegistrationForm, LoginForm, UpdatedAccountInfoForm, DetectionMade\nfrom app.models import User, Detections\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport json\nimport random\nimport time\nfrom datetime import datetime\n\ndetector = Detection()\nrandom.seed()\n\n\[email protected]('/home')\[email protected]('/index')\[email protected]('/')\ndef welcome():\n return render_template('home.html')\n\n\n\n\n\[email protected]('/country')\[email protected]('/nation')\ndef nation():\n area_name, new_cases, new_deaths, cumulative = dataScraper.latestGraphByNation()\n return render_template('nation.html', area_name=area_name, new_cases=new_cases, new_deaths=new_deaths,\n cumulative=cumulative)\n\n\[email protected]('/county')\[email protected]('/region')\ndef region():\n area_name, new_cases, new_deaths, cumulative = dataScraper.latestGraphByRegion()\n return render_template('region.html', area_name=area_name, new_cases=new_cases, new_deaths=new_deaths,\n cumulative=cumulative)\n\n\[email protected](\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('welcome'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash(f'Account created! please log in', 'Success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\[email protected](\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('detection_made'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('detection_made'))\n else:\n flash('Login Unsuccessful, email and/or password are incorrect please try again', 'danger')\n return render_template('login.html', title='login', form=form)\n\n\[email protected](\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('welcome'))\n\n\[email protected](\"/account\", methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdatedAccountInfoForm()\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('your account info has been updated', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n return render_template('account.html', title='Account', form=form)\n\n\[email protected](\"/detectionsMade\")\n@login_required\ndef detection_made():\n user = current_user\n detections = user.detections\n return render_template('detectionsMade.html', title='Detections made', detections=detections)\n\n\[email protected](\"/graph\")\n@login_required\ndef graphs():\n user = current_user\n detections = user.detections\n return render_template(\"graph.html\", title='Graphical breakdown', detections=detections)\n\n\[email protected](\"/detection\", methods=['Get', 'POST'])\n@login_required\ndef new_detection():\n form = DetectionMade()\n if form.validate_on_submit():\n NumberOfTotalViolations = detector.getNumberOfViolations()\n detection = Detections(numberOfViolations=NumberOfTotalViolations, user=current_user)\n db.session.add(detection)\n db.session.commit()\n flash('detection complete', 'success')\n return redirect(url_for('detection_made'))\n return render_template('detection.html', title='New detection', form=form)\n\n\ndef gen(social_distance_advanced):\n while True:\n frame = social_distance_advanced.startProcess()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n\[email protected](\"/video_feed\")\n@login_required\ndef video_feed():\n return Response(gen(detector),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n"
},
{
"alpha_fraction": 0.8148148059844971,
"alphanum_fraction": 0.8148148059844971,
"avg_line_length": 26,
"blob_id": "8ad60f3fb4aa5d48d1115de88f10b1d32b444543",
"content_id": "886a5beb8f4637fb098f19b8b8f9df1989661252",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 2,
"path": "/README.md",
"repo_name": "MustafaK99/Final-Year-Project",
"src_encoding": "UTF-8",
"text": "# Final-Year-Project\nMy Final Year University Project\n"
},
{
"alpha_fraction": 0.6685840487480164,
"alphanum_fraction": 0.6716814041137695,
"avg_line_length": 40.85185241699219,
"blob_id": "5bde67bdf0d846101bfbb93ec0f035de5b25ff68",
"content_id": "a17fda855748ae2f83d437b0ec775b65d24c1a9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2260,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 54,
"path": "/Social Distancing App/FlaskApp/app/forms.py",
"repo_name": "MustafaK99/Final-Year-Project",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom flask_login import current_user\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\nfrom app.models import User\n\n\nclass RegistrationForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired(), Length(min=5, max=15)])\n email = StringField('Email', validators=[DataRequired(), Email()])\n password = PasswordField('Password',\n validators=[DataRequired(), EqualTo('confirm', message='Passwords do not match'),\n Length(min=8)])\n confirm = PasswordField('Repeat Password', validators=[DataRequired()])\n submit = SubmitField('Sign Up')\n\n def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError(\"Username unavailable, please try again \")\n\n def validate_email(self, email):\n email = User.query.filter_by(email=email.data).first()\n if email:\n raise ValidationError(\"Email unavailable, please try again\")\n\n\nclass LoginForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember = BooleanField('Remember Me')\n submit = SubmitField('Login')\n\n\nclass UpdatedAccountInfoForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired(), Length(min=5, max=15)])\n email = StringField('Email', validators=[DataRequired(), Email()])\n submit = SubmitField('Update')\n\n def validate_username(self, username):\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError(\"Username unavailable, please try again \")\n\n def validate_email(self, email):\n if email.data != current_user.email:\n email = User.query.filter_by(email=email.data).first()\n if email:\n raise ValidationError(\"Email unavailable, please try again\")\n\n\nclass DetectionMade(FlaskForm):\n submit = SubmitField(\"End Detection\")\n"
},
{
"alpha_fraction": 0.5744909048080444,
"alphanum_fraction": 0.5809217691421509,
"avg_line_length": 19.733333587646484,
"blob_id": "06966bcbed0656d84c38a3fcd1139cd34447d813",
"content_id": "e12c2ca2a880b53e37e8f2f47076919bb6a83f2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 45,
"path": "/Social Distancing App/COVID Data/test.py",
"repo_name": "MustafaK99/Final-Year-Project",
"src_encoding": "UTF-8",
"text": "from requests import get\nimport pandas as pd\nfrom datetime import datetime\n\n\n\n\ndef get_data(url):\n response = get(endpoint, timeout=10)\n \n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n \n return response.json()\n \n\nif __name__ == '__main__':\n\n endpoint = (\n 'https://api.coronavirus.data.gov.uk/v1/data?'\n 'filters=areaType=nation \n 'structure={\"areaName\":\"areaName\",\"date\":\"date\",\"newCases\":\"newCasesByPublishDate\"}'\n )\n \n data = get_data(endpoint)\n\n df = pd.DataFrame(data[\"data\"])\n print(df.head())\n\n\n \n for p in data['data']:\n print('area: ' + p['areaName'])\n print('date: ' + p['date'])\n print('New Cases: ' + str(p['newCases']))\n\n\n areaNames = df['areaName'].values.tolist()\n print(areaNames)\n newCases = df('newCases'].values.tolist()\n print(newCases)\n\n\n \n #print(data)\n"
}
] | 6 |
mkramlich/tutu
|
https://github.com/mkramlich/tutu
|
1d72176383472aa17c9caa1041c4b7d9e26137c2
|
8dd314b0536199ec78444a1ff89bf26a6f0ba930
|
b42d9acb4e51161be229c75fe300affae9f8cf4f
|
refs/heads/master
| 2015-07-28T10:11:45 | 2008-09-13T08:23:37 | 2008-09-13T08:23:37 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7114556431770325,
"alphanum_fraction": 0.7200689315795898,
"avg_line_length": 47.375,
"blob_id": "f39e24f4994ff844fd096efa1fe745ce8128942b",
"content_id": "9e361210e8077a5c88ebe873a8ff3bc00104e208",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1161,
"license_type": "no_license",
"max_line_length": 341,
"num_lines": 24,
"path": "/gen_index",
"repo_name": "mkramlich/tutu",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2.5\n\nfrom __future__ import with_statement\nfrom tutu import *\n\n# Determine set of all search parameter permutations supported by the index\n\n#TODO list of supported 'title_exact' values should be determined better way; one idea, scour the raw jobs database and create a list of all actual job titles found; regardless, enduser searches for unindexed job titles should fail gracefully, possibly by falling back to 'live' searches, but there are risks and tradeoffs with that approach\ntitle_exact__supported = ('Software Engineer', 'Hair Stylist')\nresults_per_page__supported = (5, 10, 20, 50)\n\nparam_perms = []\n\nfor rpp in results_per_page__supported:\n for te in title_exact__supported:\n params = {TITLE_EXACT:te, RESULTS_PER_PAGE:rpp, PAGE:0}\n param_perms.append(params)\n\n# Build index by determining actual search results for each search param permutation, and storing as a list of job id's\nwith file('index','w') as f:\n for params in param_perms:\n results, found_qty, total_qty = determine_results_by_live_search(params)\n ids = map(lambda x: x['id'], results)\n f.write('%s | %s\\n' % (params, ids))\n"
},
{
"alpha_fraction": 0.5943719744682312,
"alphanum_fraction": 0.6142759323120117,
"avg_line_length": 32,
"blob_id": "0e234f3f65fe049ddba45d4d18e7081ea5128ac3",
"content_id": "6ee37db8e8af166f07fa1382ef5971bd01917259",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1457,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 44,
"path": "/pop_memcache",
"repo_name": "mkramlich/tutu",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2.5\n\nfrom __future__ import with_statement\n\nimport memcache # tummy.com python_memcache-1.43 or compatible\nimport sys\nimport tutu\n\n\ndef identify_bad_chars_in_key(key):\n #TODO hack: logic copied from guts of python_memcache 1.43 memcache.py line ~955; should be done a better way\n for i,ch in enumerate(key):\n if ord(ch) < 33 or ord(ch) == 127:\n print 'bad char in key: \"%s\" at pos %s' % (ch, i)\n\n#print ' '.join(sys.argv)\n\nmc = memcache.Client(['127.0.0.1:11211',])\n\n# populate with all job postings, keyed on job id\njobs = tutu.load_file_as_python_to_dict('jobs')['root']['jobs']\nfor job in jobs:\n key = 'job-%s' % job['id']\n value = job\n #print 'populating %s' % key\n mc.set(key,value)\n\n# populate with all mappings of \"search-param-permutation to job id list\", using the disk-based index and jobs database\nwith file('index') as f:\n #TODO refactor to only read one line at a time, to reduce memory use:\n lns = f.readlines()\n for ln in lns:\n #print 'read line: %s' % ln\n pcs = ln.split('|')\n #print 'pcs: %s' % pcs\n key = 'params-' + pcs[0].strip()\n key = key.replace(' ','+') #TODO tmp hack to workaround API puking on space chars in the key\n value = pcs[1].strip()\n #print 'setting:\\n\\tkey: %s\\n\\tvalue: %s' % (key, value)\n identify_bad_chars_in_key(key)\n \n mc.set(key,value)\n\nprint 'stats: %s' % mc.get_stats()\n \n"
},
{
"alpha_fraction": 0.6600000262260437,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 11.5,
"blob_id": "5d28520abc39a129e8270b9dab5b2508fafe9e40",
"content_id": "1823ae7190c5933bad1d6ba3131f898363adbf58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 4,
"path": "/tutu",
"repo_name": "mkramlich/tutu",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2.5\n\nimport tutu\ntutu.main()\n"
},
{
"alpha_fraction": 0.5608351230621338,
"alphanum_fraction": 0.5687544941902161,
"avg_line_length": 27.91666603088379,
"blob_id": "038611f856b3b87d5cacbae2688122457d2da994",
"content_id": "dece4a4d72cb8a6a6528f09307fe48520b9004d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1389,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 48,
"path": "/gen_jobs",
"repo_name": "mkramlich/tutu",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2.5\n\nfrom __future__ import with_statement\n\nimport pprint\nimport sys\n\nqty = 100\nif len(sys.argv) > 1:\n qty = int(sys.argv[1])\n\n#TODO job id could be left out and have it auto-assign\n\nroot = {\n 'jobs' : [\n {'id' : 0,\n 'title' : 'Software Engineer',\n 'desc' : 'Need reel gud software engineer for exciting company in Redmond.',\n 'email' : '[email protected]'},\n\n {'id' : 1,\n 'title' : 'Hair Stylist',\n 'desc' : 'We have an urgent need for a hair stylist. Must know hair. Must have style.',\n 'email' : '[email protected]'},\n ],\n}\n\n\nif qty < len(root['jobs']):\n root['jobs'] = root['jobs'][:qty]\nelif qty > len(root['jobs']):\n addtl = qty - len(root['jobs'])\n #TODO determine last_id the right way\n last_id = 1\n for i in xrange(addtl):\n last_id = last_id + 1\n #TODO determine title via random selection from pool of valid titles\n #TODO generate a random description which can be mostly nonsense but have a few choice words of interest, like 'salary' or 'Java', etc.\n j = {'id' : last_id,\n 'title' : 'Software Engineer',\n 'desc' : 'Need reel gud software engineer for exciting company in Redmond.',\n 'email' : '[email protected]'}\n\n root['jobs'].append(j)\n\nwith file('jobs','w') as f:\n f.write('root = ')\n pprint.pprint(root,f)\n\n"
},
{
"alpha_fraction": 0.6067610383033752,
"alphanum_fraction": 0.6131175756454468,
"avg_line_length": 29.086956024169922,
"blob_id": "071b7aded5c6e02ee90146b3d7c690a2cfdb754c",
"content_id": "97a407649f0ab6fc154ee165265e6fdd9a777101",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3461,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 115,
"path": "/tutu.py",
"repo_name": "mkramlich/tutu",
"src_encoding": "UTF-8",
"text": "'''\nTuTu: Job Search Made ... Er... Text-tastic?\nby Mike Kramlich\n'''\n\nfrom __future__ import with_statement\n\nimport optparse\n\nTITLE_EXACT = 'title_exact'\nRESULTS_PER_PAGE = 'results_per_page'\nPAGE = 'page'\n\ndef read_file(filename): \n with file(filename) as f:\n return f.read()\n\ndef load_string_as_python_to_dict(s):\n gd = {}\n ld = {}\n exec s in gd, ld\n return ld\n\ndef load_file_as_python_to_dict(filename):\n s = read_file(filename)\n ld = load_string_as_python_to_dict(s)\n return ld\n\ndef determine_search_params(sys_argv):\n parser = optparse.OptionParser()\n parser.add_option('--title-exact', '-t')\n parser.add_option('--page', '-p', type='int', default=0)\n parser.add_option('--results-per-page','-r', type='int', default=10)\n opts, args = parser.parse_args(sys_argv)\n #print 'optparse gave:\\n%s\\n%s' % (opts, args)\n #print 'opts vars %s' % vars(opts)\n return vars(opts)\n\ndef determine_results_by_live_search(params):\n data = load_file_as_python_to_dict('jobs')\n jobs = data['root']['jobs']\n total_qty = len(jobs)\n results = []\n if TITLE_EXACT in params:\n for j in jobs:\n if j['title'] == params[TITLE_EXACT]:\n results.append(j)\n else:\n results = list(jobs)\n found_qty = len(results)\n # limit results to the slice that falls within the page desired\n start = params[PAGE] * params[RESULTS_PER_PAGE]\n end = min(start + params[RESULTS_PER_PAGE], len(results))\n #print 'results len %s' % len(results)\n #print '%s %s' % (start, end)\n results = results[start:end]\n return results, found_qty, total_qty\n\ndef determine_results_by_memcache_lookup(params):\n results = []\n found_qty = 0\n total_qty = 0\n\n # access memcache to lookup the job id list mapped from given search params\n import memcache\n #TODO wrap in 'with' block and/or ensure that mc.disconnect_all() called\n mc = memcache.Client(['127.0.0.1:11211',])\n #print '%s' % mc.get_stats()\n key = ('params-%s' % str(params)).replace(' ','+')\n #print 'memcache fetching: %s' % key\n value = mc.get(key)\n #print 'got value: %s' % value\n job_id_list = eval(value)\n\n # fetch jobs based on id\n for id in job_id_list:\n key = 'job-%s' % id\n #print 'memcache fetching: %s' % key\n value = mc.get(key)\n #print 'got value: %s' % value\n #print 'type: %s' % type(value)\n job = value\n results.append(job)\n\n found_qty = len(results)\n #TODO total_qty\n \n return results, found_qty, total_qty\n\ndef display_results(params, results, found_qty, total_qty):\n print 'Search Params: %s' % params\n print 'Results: %s displayed of %s matched of %s total jobs in database' % (len(results), found_qty, total_qty)\n print '-' * 20\n\n for job in results:\n print job['title']\n print job['desc']\n print job['email']\n print 'id: %s' % job['id']\n print '-' * 20\n\ndef main():\n import sys\n\n print __doc__.strip() + '\\n' + '-' * 80\n\n params = determine_search_params(sys.argv)\n \n #NOTE: to see how much slower a live search would be, comment out the determine_results_by_memcache_lookup fn assignment \n determine_results = determine_results_by_live_search\n determine_results = determine_results_by_memcache_lookup\n\n results, found_qty, total_qty = determine_results(params)\n\n display_results(params,results,found_qty,total_qty)\n\n"
}
] | 5 |
alpakido/tgscrape
|
https://github.com/alpakido/tgscrape
|
dfc90f346f4c28d89c8dd68d055aa38f363dce1a
|
f3ad779c7569028ecff5b1c5cc4f36f77ad1c1d4
|
6b59fb2eed5e7798a2a967b25fa687316beaa8a5
|
refs/heads/master
| 2021-10-25T04:13:34.984692 | 2019-03-31T16:30:33 | 2019-03-31T16:30:33 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6397759318351746,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 28.26229476928711,
"blob_id": "08b2fdd2f9ec40fec93d6c505622997f0247dbcb",
"content_id": "867a53507ba17300a2baa70ecb5da0ec5e46cb6d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1785,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 61,
"path": "/README.md",
"repo_name": "alpakido/tgscrape",
"src_encoding": "UTF-8",
"text": "# tgscrape\nQuick and dirty public Telegram group message scraper\n\n# Usage\n## To dump messages from a public group\n```bash\n$ python3 tgscrape.py <groupname> [minid] [maxid]\n```\n### Examples\nTo dump all messages in the group _fun_with_friends_ type:\n```bash\n$ python3 tgscrape.py fun_with_friends\n```\nYou can specify the message id you want to start and stop. For instance, to dump messages with id's 1000 through 2000 type:\n```bash\n$ python3 tgscrape.py fun_with_friends 1000 2000\n```\nIf you want to start at message id 1000 and dump all messages after it, just skip the last parameter:\n```bash\n$ python3 tgscrape.py fun_with_friends 1000\n```\nRetrieved messages are stored in json format in the `conversations` folder.\n\n## To read and search dumped messages\n```bash\n$ python3 tgscape_cli.py <groupname>\n```\n\nThe following is the list and description of available commands:\n```\nCommands:\n search <terms> search words or strings (in quotes) in messages and names\n all returns all dumped messages\n last <num> returns last <num> messages (default: 10)\n date <date> returns all messages for a date (format: YYYY-MM-DD)\n wordcloud returns the top 20 words (wordlen > 3)\n exit exits the program\n help this\n```\n### Examples\nIf you want to search all messages and names containing _either_ \"foo\" and \"bar\" type:\n```\n> search foo bar\n```\nIf you want to search all messages and names containing the string \"foo bar\" type:\n```\n> search \"foo bar\"\n```\nTo read all messages written on January 3rd, 2018, type:\n```\n> date 2018-03-01\n```\n\n# Requirements\n```\nBeautifulSoup4\n```\nTo install dependencies:\n```bash\n$ pip install -r requirements.txt\n```\n"
},
{
"alpha_fraction": 0.5009781122207642,
"alphanum_fraction": 0.5076290965080261,
"avg_line_length": 28.54913330078125,
"blob_id": "7bfb6eafd7e9a87937e25a42fceed0c0312c1d28",
"content_id": "7a969a46cc2fb2a482bce24a1caad023c09bea1c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5112,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 173,
"path": "/tgscrape_cli.py",
"repo_name": "alpakido/tgscrape",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ntgscrape cli to search and read scraped conversations\n\"\"\"\nimport sys\nimport re\nimport db\nfrom util import *\n\ndef usage(pyfile):\n \"\"\" Usage \"\"\"\n return 'Usage: {} <groupname>'.format(pyfile)\n\n\ndef print_help():\n \"\"\"Prints program's help\"\"\"\n print(\"\"\"\n Commands:\n search <terms> search words or strings (in quotes) in messages and names\n all returns all dumped messages\n last <num> returns last <num> messages (default: 10)\n date <date> returns all messages for a date (format: YYYY-MM-DD)\n wordcloud returns the top 20 words (wordlen > 3)\n exit exits the program\n help this\n \"\"\")\n\ndef search_cmd(search_args):\n \"\"\" Search text in conversation \"\"\"\n if search_args[0] in [\"'\", '\"']:\n search_entries = search_args.strip('\"\\'')\n else:\n search_entries = search_args.split(' ')\n\n results = []\n for entry_key in DATABASE.keys():\n msg = DATABASE[entry_key]\n search_content = msg['msg'] + \\\n msg['quote'] + \\\n msg['name'] + \\\n msg['username'] + \\\n msg['fwd_name'] + \\\n msg['fwd_username']\n if isinstance(search_entries, str):\n if search_entries.lower() in search_content.lower():\n results.append(entry_key)\n else:\n for search_entry in search_entries:\n if search_entry.lower() in search_content.lower():\n results.append(entry_key)\n\n results = set(results)\n for db_id in sorted(results):\n print_object(DATABASE[db_id])\n print(\"- Total Entries: {}\".format(len(results)))\n\ndef print_wordcloud():\n \"\"\" Generate wordcloud from messages \"\"\"\n wcloud = {}\n for entry_key in DATABASE.keys():\n msg = DATABASE[entry_key]['msg']\n if msg and msg[0] == '|':\n continue\n msg = re.sub(r'[^\\w ]', '', msg, flags=re.IGNORECASE)\n if msg:\n msg = msg.split(' ')\n msg = [m for m in msg if len(m) > 3]\n for word in filter(None, msg):\n if word in wcloud.keys():\n wcloud[word] += 1\n else:\n wcloud[word] = 1\n\n wcloud = sorted(wcloud.items(), key=lambda v: v[1], reverse=True)\n for word in wcloud[0:20]:\n print(word, end='')\n print()\n\n\ndef print_all_messages():\n \"\"\"Prints all dumped messages\"\"\"\n for entry_key in sorted(DATABASE.keys()):\n print_object(DATABASE[entry_key])\n\n\ndef print_last_messages(num=20):\n \"\"\"Prints last dumped messages\"\"\"\n for entry_key in sorted(DATABASE.keys())[-num:]:\n print_object(DATABASE[entry_key])\n\n\ndef print_date(ldate):\n \"\"\"Prints all messages on ldate\"\"\"\n date_regex = r'\\d{4}-\\d{2}-\\d{2}'\n results = []\n if re.match(date_regex, ldate):\n for entry_key in DATABASE.keys():\n msg = DATABASE[entry_key]\n if msg['datetime'][0:10] == ldate:\n results.append(entry_key)\n else:\n print_error(\"Invalid date format. Expected: YYYY-MM-DD\")\n\n for result in sorted(results):\n print_object(DATABASE[result])\n print(\"- Total Entries: {}\".format(len(results)))\n\n\ndef main():\n \"\"\"Main code\"\"\"\n while True:\n cmd = input('> ')\n try:\n (cmd, args) = cmd.split(' ', 1)\n except ValueError:\n (cmd, args) = cmd, None\n\n if cmd == 'search':\n if args:\n search_cmd(args)\n else:\n print_error(\"Enter search terms\")\n\n elif cmd == 'wordcloud':\n print_wordcloud()\n \n elif cmd == 'help':\n print_help()\n \n elif cmd == 'all':\n print_all_messages()\n \n elif cmd == 'last':\n if not args:\n args = 20\n print_last_messages(int(args))\n\n elif cmd == 'date':\n date = args.split(' ')[0]\n print_date(date)\n\n elif cmd in ['exit', 'quit']:\n break\n\n else:\n print_error('Command not valid')\n\nif __name__ == '__main__':\n try:\n print('> tgscrape console\\n')\n ARGNUM = len(sys.argv)\n if ARGNUM < 2:\n print(usage(sys.argv[0]))\n raise ValueError('Not enough parameters')\n\n (EXIT_CODE, EXIT_MSG) = 0, 'Goodbye!'\n\n GROUPNAME = sys.argv[1]\n DH = db.DB(GROUPNAME)\n DATABASE = DH.load_data(False)\n if not DATABASE:\n raise FileNotFoundError(\"No conversations found for {}\".format(GROUPNAME))\n\n main()\n except KeyboardInterrupt:\n (EXIT_CODE, EXIT_MSG) = 1, 'Stopped'\n print('\\b\\b', end='')\n except BaseException as exception_msg:\n (EXIT_CODE, EXIT_MSG) = 1, 'ERROR: {}'.format(exception_msg)\n finally:\n print('{}\\nExiting...'.format(EXIT_MSG))\n exit(EXIT_CODE)\n"
},
{
"alpha_fraction": 0.5359342694282532,
"alphanum_fraction": 0.5379877090454102,
"avg_line_length": 27.647058486938477,
"blob_id": "9b293758776c257e5cce0ef1dd881f6a2f3b432c",
"content_id": "efd6e03744152fa2f86113aa04c7ed9c2a534474",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 974,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 34,
"path": "/db.py",
"repo_name": "alpakido/tgscrape",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"DB class\"\"\"\n\nimport json\nimport os\nimport config\n\nclass DB:\n \"\"\" Output handling \"\"\"\n logfile = ''\n\n def __init__(self, lgroup):\n \"\"\" Class constructor \"\"\"\n self.logfile = '{}{}.json'.format(config.output_folder, lgroup.lower())\n if not os.path.exists(config.output_folder):\n os.makedirs(config.output_folder)\n\n def load_data(self, create=True):\n \"\"\" Returns current conversation \"\"\"\n try:\n with open(self.logfile) as fp:\n return {int(key): value for key, value in json.load(fp).items()} \n except IOError:\n if create:\n with open(self.logfile, 'w') as fp:\n pass\n return {}\n\n def write_data(self, ldb):\n \"\"\" Saves conversation to file \"\"\"\n print('Writing to {}...'.format(self.logfile))\n with open(self.logfile, 'w') as fp:\n json.dump(ldb, fp)\n"
},
{
"alpha_fraction": 0.6036414504051208,
"alphanum_fraction": 0.6134454011917114,
"avg_line_length": 27.559999465942383,
"blob_id": "a3850ca14374496db5fd81fea614d436f2401721",
"content_id": "4bdaf06064e7a72dced5ad2d2d06fd3751ce01d2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1428,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 50,
"path": "/config.py",
"repo_name": "alpakido/tgscrape",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\" Configuration file \"\"\"\n\nimport json\n\n# script parameters\nmax_err = 20\nmin_id = 1 # first message\nmax_id = -1 # no limit\nsleeptime = 0.1 # 0.1 seconds sleep between message downloads\noutput_folder = './conversations/' # output folder\nmessages_dump_cnt = 100 # number of messages dumped to periodically write on disk\n\n# classes for messages\ntext_class = 'tgme_widget_message_text'\nphoto_class = 'tgme_widget_message_photo_wrap'\nvideo_class = 'tgme_widget_message_video_wrap'\nvoice_class = 'tgme_widget_message_voice'\nlink_class = 'tgme_widget_message_link_preview'\nlink_title_class = 'link_preview_site_name'\nlink_description_class = 'link_preview_description'\nlink_preview_class = 'link_preview_right_image'\nauthor_class = 'tgme_widget_message_owner_name'\nservice_class = 'message_media_not_supported_label'\nsticker_class = 'tgme_widget_message_sticker'\nforward_class = 'tgme_widget_message_forwarded_from_name'\n\n# message object\nmessage_object = \"\"\"\n{\n \"datetime\": \"\",\n \"name\": \"\",\n \"username\": \"\",\n \"quote\": \"\",\n \"deleted\": \"0\",\n \"msg\": \"\",\n \"photo\": \"\",\n \"video\": \"\",\n \"voice\": \"\",\n \"fwd_name\": \"\",\n \"fwd_username\": \"\",\n \"link\": {\n \"title\": \"\",\n \"description\": \"\",\n \"preview\": \"\"\n }\n}\n\"\"\"\nmessage_object = json.loads(message_object)\n"
},
{
"alpha_fraction": 0.5440340638160706,
"alphanum_fraction": 0.5533459782600403,
"avg_line_length": 32.70212936401367,
"blob_id": "9ff8f6d5fb3bbcc088478fc973f96e8337bb1888",
"content_id": "1fe799da6cddb25113e43e0da619a723468d1013",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6336,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 188,
"path": "/tgscrape.py",
"repo_name": "alpakido/tgscrape",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nQuick and dirty public Telegram group message scraper\n\nUsage:\n $ python3 tgscrape.py <groupname> [minid] [maxid]\n\nExample:\n $ python3 tgscrape.py fun_with_friends 1 1000\n - dumps messages 1 through 1000 from the group @fun_with_friends\n\nThe loop stops when it finds 20 consecutive empty messages (defined by max_err in config.py)\n\"\"\"\n\nimport sys\nimport time\nimport copy\nimport urllib.request\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom util import *\nimport config\nimport db\n\ndef usage(pyfile):\n \"\"\" Usage \"\"\"\n return 'Usage: {} <groupname> [minid] [maxid]'.format(pyfile)\n\n\ndef get_sender(obj, lclass):\n \"\"\" Retrieves the sender of a message \"\"\"\n author = obj.find('', class_=lclass)\n return_name = ''\n return_username = ''\n if author:\n return_name = author.text\n if author.name == 'a':\n return_username = author['href'].split('/')[3]\n return (return_name, return_username)\n\n\ndef parse_message(soup):\n \"\"\" Parses a message, returns object \"\"\"\n m_datetime = soup.find_all('time')[-1]['datetime']\n if m_datetime:\n return_object = copy.deepcopy(config.message_object)\n (return_object['name'], return_object['username']) = \\\n get_sender(soup, config.author_class)\n return_object['datetime'] = m_datetime\n\n msg = soup.find_all('', class_=config.text_class)\n if msg:\n if len(msg) == 2:\n quote = msg[0].text\n msg = msg[1].text\n else:\n quote = None\n msg = msg[0].text\n\n return_object['msg'] = msg\n if quote:\n return_object['quote'] = quote\n\n service_msg = soup.find('', class_=config.service_class)\n if service_msg:\n return_object['msg'] = \\\n '|{}|'.format(service_msg.text\n if service_msg.text\n else 'SERVICE MESSAGE')\n\n fwd = soup.find('', class_=config.forward_class)\n if fwd:\n (return_object['fwd_name'], return_object['fwd_username']) = \\\n get_sender(soup, config.forward_class)\n\n media = soup.find('', class_=config.photo_class) or \\\n soup.find('', class_=config.video_class) or \\\n soup.find('', class_=config.voice_class) or \\\n soup.find('', class_=config.link_class) or \\\n soup.find('', class_=config.sticker_class)\n\n if media:\n if media['class'][0] == config.photo_class:\n return_object['photo'] = media['style'].split(\"'\")[1]\n\n if media['class'][0] == config.video_class:\n return_object['video'] = media.video['src']\n\n if media['class'][0] == config.voice_class:\n return_object['voice'] = media['src']\n\n if media['class'][0] == config.link_class:\n title_class = soup.find('', class_=config.link_title_class)\n description_class = soup.find('', class_=config.link_description_class)\n preview_class = soup.find('', class_=config.link_preview_class)\n\n if title_class:\n return_object['link']['title'] = title_class.text\n if description_class:\n return_object['link']['description'] = description_class.text\n if preview_class:\n return_object['link']['preview'] = preview_class['style'].split(\"'\")[1]\n\n if media['class'][0] == config.sticker_class:\n return_object['photo'] = media['style'].split(\"'\")[1]\n\n return return_object\n\n\ndef guess_if_last(lmsg):\n \"\"\"Guesses if message is the last one in a group\"\"\"\n msg_day = lmsg['datetime'].split('T')[0]\n msg_day = datetime.datetime.strptime(msg_day, '%Y-%m-%d')\n check_day = datetime.datetime.today() - datetime.timedelta(days=1)\n if msg_day >= check_day:\n return True\n return False\n\n\ndef scrape_run(lgroupname, lmin_id, lmax_id, ldb):\n \"\"\" Main logic \"\"\"\n msg_id = lmin_id\n cnt_err = 0\n url = 'https://t.me/{}/'.format(lgroupname)\n while True:\n if msg_id not in ldb.keys():\n r_url = url + str(msg_id) + '?embed=1'\n with urllib.request.urlopen(r_url) as response:\n response = response.read()\n if len(response) > 3000:\n cnt_err = 0\n soup_object = BeautifulSoup(response, 'html.parser')\n ldb[msg_id] = parse_message(soup_object)\n time.sleep(config.sleeptime)\n else:\n ldb[msg_id] = copy.deepcopy(config.message_object)\n ldb[msg_id]['deleted'] = '1'\n\n if ldb[msg_id]['deleted'] == '1':\n cnt_err += 1\n if cnt_err == config.max_err:\n for id_to_delete in range(msg_id, msg_id - config.max_err - 1, -1):\n del ldb[id_to_delete]\n last_id = msg_id - config.max_err - 1\n if guess_if_last(ldb[last_id]):\n return '{} consecutive empty messages and last message is recent (maybe last?)'.format(config.max_err)\n return '{} consecutive empty messages. Current ID: {}'.format(config.max_err, msg_id)\n\n\n if (msg_id - lmin_id + 1) % config.messages_dump_cnt == 0:\n dh.write_data(ldb)\n\n print_object(ldb[msg_id])\n\n if msg_id == lmax_id:\n return 'All messages retrieved'\n\n msg_id += 1\n\ntry:\n print('> Telegram Public Groups Scraper', end='\\n\\n')\n argnum = len(sys.argv)\n\n if argnum < 2:\n print(usage(sys.argv[0]))\n raise ValueError('Not enough parameters')\n\n groupname = sys.argv[1]\n min_id = int(sys.argv[2]) if argnum >= 3 else config.min_id\n max_id = int(sys.argv[3]) if argnum >= 4 else config.max_id\n\n dh = db.DB(groupname)\n database = dh.load_data()\n exit_msg = scrape_run(groupname, min_id, max_id, database)\n exit_code = 0\nexcept KeyboardInterrupt:\n (exit_code, exit_msg) = 1, 'Stopped'\nexcept Exception as e:\n (exit_code, exit_msg) = 1, 'ERROR: {}'.format(e)\nfinally:\n print('\\r ')\n try:\n dh.write_data(database)\n except NameError:\n pass\n print('{}\\nExiting...'.format(exit_msg))\n exit(exit_code)\n"
},
{
"alpha_fraction": 0.45712560415267944,
"alphanum_fraction": 0.4589371979236603,
"avg_line_length": 29.66666603088379,
"blob_id": "e9face690644fb0a4255e9615369b33a648f25a6",
"content_id": "424245b19b37f4713747c04ea2d3e1733444828a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1656,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 54,
"path": "/util.py",
"repo_name": "alpakido/tgscrape",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Utility methods\"\"\"\n\ndef print_error(error_msg):\n \"\"\"Prints an error\"\"\"\n print('ERROR: {}'.format(error_msg))\n\n\ndef print_object(lobj):\n \"\"\" Print a message object \"\"\"\n if 'deleted' in lobj.keys() and lobj['deleted'] == '1':\n print('[deleted]')\n return\n\n outputline = '[{}] {}{}: '.format(lobj['datetime'],\n lobj['name'],\n ' (@{})'.format(lobj['username']) if lobj['username'] else '')\n\n if lobj['fwd_name'] or lobj['fwd_username']:\n outputline += '{ '\n outputline += '{}{}: '.format(lobj['fwd_name'],\n ' (@{})'.format(lobj['fwd_username'])\n if lobj['fwd_username']\n else '')\n\n if lobj['quote']:\n outputline += '{{ {} }} '.format(lobj['quote'])\n\n outputline += lobj['msg']\n\n if lobj['photo']:\n outputline += ' {}'.format(lobj['photo'])\n\n if lobj['video']:\n outputline += ' {}'.format(lobj['video'])\n\n if lobj['voice']:\n outputline += ' {}'.format(lobj['voice'])\n\n if lobj['link']['title'] or lobj['link']['description'] or lobj['link']['preview']:\n link_msg = [\n lobj['link']['title'],\n lobj['link']['description'],\n lobj['link']['preview']\n ]\n link_msg = list(filter(None, link_msg))\n link_msg = ' - '.join(link_msg)\n outputline += ' <{}>'.format(link_msg)\n\n if lobj['fwd_name'] or lobj['fwd_username']:\n outputline += ' }'\n\n print(outputline)\n"
}
] | 6 |
LLinville/ReverseParser
|
https://github.com/LLinville/ReverseParser
|
b3074edca8eba8303bd7d1843f2c22dccb5b4dca
|
4924c5e5ce5f4d341f904665f2b92ee2251279de
|
30edb7f4c5356671f67a883cabf6a05994dc1b8b
|
refs/heads/master
| 2016-08-12T18:52:51.549319 | 2016-04-07T23:21:43 | 2016-04-07T23:21:43 | 55,456,232 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8199999928474426,
"alphanum_fraction": 0.8199999928474426,
"avg_line_length": 49,
"blob_id": "a7a6b8380bf44314a9f57066f15d4f6d71b6d9b3",
"content_id": "23b8c5cdba22b5529d7b174ca642056f7343ffd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 2,
"path": "/README.md",
"repo_name": "LLinville/ReverseParser",
"src_encoding": "UTF-8",
"text": "# ReverseParser\nGiven a context free grammar, generate an example which is parsable by said grammar\n"
},
{
"alpha_fraction": 0.5675392746925354,
"alphanum_fraction": 0.5759162306785583,
"avg_line_length": 30.866666793823242,
"blob_id": "eac2a3aa03ca33207295d5fd44e41cc5d22e0ca3",
"content_id": "13c6b6804c0c2fbffd9e2133fa2e1ba9148ed9db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 30,
"path": "/TokenType.py",
"repo_name": "LLinville/ReverseParser",
"src_encoding": "UTF-8",
"text": "import random\nclass TokenType:\n def __init__(self, type, examples = None):\n self.type = type\n if examples is None:\n self.examples = []\n\n def getExamples(self):\n return self.examples\n\n def addExample(self, tokenExample):\n self.examples.append(tokenExample)\n\n def randomExample(self):\n totalAdjustedWeight = 0\n for example in self.examples:\n if example.weight == 0:\n totalAdjustedWeight += 1\n else:\n totalAdjustedWeight += example.weight\n averageWeight = totalAdjustedWeight * 1.0 / len(self.examples)\n target = random.randrange(0,totalAdjustedWeight)\n weightSoFar = 0\n for example in self.examples:\n if example.weight == 0:\n weightSoFar += averageWeight\n else:\n weightSoFar += example.weight\n if weightSoFar > target:\n return example"
},
{
"alpha_fraction": 0.5776851177215576,
"alphanum_fraction": 0.5801181793212891,
"avg_line_length": 34.974998474121094,
"blob_id": "71a95bc6b5de566329d56f9f244367a92e145a6a",
"content_id": "d7b2522a00fbe40354235ba7d87eb796504b5bac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2877,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 80,
"path": "/Reader.py",
"repo_name": "LLinville/ReverseParser",
"src_encoding": "UTF-8",
"text": "from tokentype import TokenType\nfrom tokenexample import TokenExample\n\nclass Reader:\n\n def __init__(self, nodeTypeDict = None, reservedTokenTypeNames = None):\n if nodeTypeDict is None:\n #dictionary of the pairs tokenName:TokenType\n self.nodeTypeDict = {\n \"<space>\":TokenType(\"<space>\", examples = [TokenExample(\"<space>\",[\" \"])]),\n \"<empty>\":TokenType(\"<empty>\", examples = [TokenExample(\"<empty>\",[\" \"])]),\n \"<newline>\":TokenType(\"<newline>\", examples = [TokenExample(\"<newline>\",[\"\\n\"])])\n }\n\n if reservedTokenTypeNames is None:\n self.reservedTokenTypeNames = [\"<space>\",\"<empty>\",\"<newline>\"]\n\n def parseExampleLine(self, line, tokenTypeName):\n splitLine = line[:].split()\n if splitLine == []:\n return\n if splitLine[-1] == \";\":\n #default weight\n tokenExample = TokenExample(tokenTypeName, splitLine[:-1][:])\n else:\n #weight was specified\n tokenExample = TokenExample(tokenTypeName, splitLine[:-2][:], int(splitLine[-1]))\n\n self.nodeTypeDict[tokenTypeName].addExample(tokenExample)\n\n\n def parseDefinitionBody(self, file):\n line = file.readline().strip()\n while line.strip().startswith(\"//\") or line == \"\\n\":\n line = file.readline()\n tokenTypeName = line.strip()\n if tokenTypeName in self.reservedTokenTypeNames:\n print \"Error: Tried to rewrite reserved token type: \" + tokenTypeName\n if tokenTypeName[:7] == \"<number\":\n print \"Error: Tried to define <number #-#> token\"\n\n if tokenTypeName not in self.nodeTypeDict.keys():\n self.nodeTypeDict[tokenTypeName] = TokenType(tokenTypeName)\n #print \"created key for \"+tokenTypeName+\": \"+str(self.nodeTypeDict[tokenTypeName])\n\n line = file.readline()\n while True:\n line = line.strip()\n if line == \"}\":\n break\n if line[:2] == \"//\":\n line = file.readline()\n continue\n self.parseExampleLine(line, tokenTypeName)\n line = file.readline()\n\n def parseFileBody(self, file):\n line = file.readline().strip()\n while line:\n if line.strip() == \"{\":\n self.parseDefinitionBody(file)\n line = file.readline()\n\n def readGrammarFile(self, filename):\n file = open(filename, \"r\")\n\n self.parseFileBody(file)\n\n file.close()\n\n return self.nodeTypeDict\n\n# tdict = {}\n# tdict[\"noun\"] = TokenType(\"noun\")\n# tdict[\"noun\"].addExample(TokenExample(\"noun\",[\"word1\"]))\n# nounToken = TokenType(\"noun\")\n# nounToken.addExample(TokenExample(\"noun\",[\"a\",\"dog\"]))\n# verbToken = TokenType(\"verb\")\n# for example in verbToken.getExamples():\n# print example.splitExampleText"
},
{
"alpha_fraction": 0.6975609660148621,
"alphanum_fraction": 0.6975609660148621,
"avg_line_length": 33.16666793823242,
"blob_id": "26278fe6c30d21850a919fc6c8b1733aeebe88af",
"content_id": "c18a27c63ec805c01df1f537b970a588a979026e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 12,
"path": "/grammarnode.py",
"repo_name": "LLinville/ReverseParser",
"src_encoding": "UTF-8",
"text": "class GrammarNode:\n def __init__(self, tokenType, splitExampleText = None):\n self.tokenType = tokenType\n self.splitExampleText = splitExampleText\n self.expandedGrammarNodeList = []\n\n @staticmethod\n def literal(textValue):\n return GrammarNode(None, splitExampleText = [textValue])\n\n def setExpandedGrammarNodeList(self, eGNL):\n self.expandedGrammarNodeList = eGNL\n"
},
{
"alpha_fraction": 0.6590126156806946,
"alphanum_fraction": 0.6607347726821899,
"avg_line_length": 39.46511459350586,
"blob_id": "51a48faa07fc62a9bcdcc3eb586f622f9f5674c5",
"content_id": "3c3e7814730cfe62efc80c90892f3cebfaf1addf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1742,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 43,
"path": "/generator.py",
"repo_name": "LLinville/ReverseParser",
"src_encoding": "UTF-8",
"text": "from grammarnode import GrammarNode\n\nclass Generator:\n def __init__(self, tokenTypeDict, startingTokenTypeName = \"<start>\"):\n self.tokenTypeDict = tokenTypeDict\n self.startingNode = GrammarNode(tokenTypeDict[startingTokenTypeName], tokenTypeDict[startingTokenTypeName].randomExample())\n\n\n def expandGrammarNodeOneLayer(self, grammarNode):\n if grammarNode.tokenType is None:\n #literal grammar node, don't expand\n return\n tokenType = grammarNode.tokenType\n splitExampleText = tokenType.randomExample().splitExampleText\n grammarNodeList = []\n for word in splitExampleText:\n if word[0] == \"<\" and word[-1] == \">\":\n grammarNodeList.append(GrammarNode(self.tokenTypeDict[word]))\n else:\n #at a literal, make new leaf in the tree\n grammarNodeList.append(GrammarNode.literal(word))\n grammarNode.setExpandedGrammarNodeList(grammarNodeList)\n\n def expandAllTheWay(self, startingNode = None):\n if startingNode is None:\n startingNode = self.startingNode\n self.expandGrammarNodeOneLayer(startingNode)\n for node in startingNode.expandedGrammarNodeList:\n self.expandAllTheWay(startingNode = node)\n return startingNode\n\n @staticmethod\n def toString(fullyExpandedGrammarNode):\n if fullyExpandedGrammarNode.tokenType is None:\n literal = fullyExpandedGrammarNode.splitExampleText[0]\n if literal in \",.\":\n return literal\n return \" \" + literal\n\n text = \"\"\n for node in fullyExpandedGrammarNode.expandedGrammarNodeList:\n text += Generator.toString(node)\n return text\n\n\n"
},
{
"alpha_fraction": 0.6751269102096558,
"alphanum_fraction": 0.6802030205726624,
"avg_line_length": 38.599998474121094,
"blob_id": "65725b7c9e543db062cefa477c068e732aa14e15",
"content_id": "6ebefae6aacc1599c74b956ecd5d439ac15a862f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 5,
"path": "/TokenExample.py",
"repo_name": "LLinville/ReverseParser",
"src_encoding": "UTF-8",
"text": "class TokenExample:\n def __init__(self, exampleOf, splitExampleText, weight = 0):\n self.exampleOf = exampleOf\n self.weight = weight\n self.splitExampleText = splitExampleText"
},
{
"alpha_fraction": 0.8221476674079895,
"alphanum_fraction": 0.8221476674079895,
"avg_line_length": 28.899999618530273,
"blob_id": "50ed326ce2984120a5b8ecc2bfd50bc6a1088c9d",
"content_id": "6b2d925ec12fb67296330831083575b88c15ee5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 10,
"path": "/generatortest.py",
"repo_name": "LLinville/ReverseParser",
"src_encoding": "UTF-8",
"text": "from generator import Generator\nfrom reader import Reader\n\nreader = Reader()\ntokenTypeDict = reader.readGrammarFile(\"grammars/Kant.g\")\ngenerator = Generator(tokenTypeDict)\nexpandedNode = generator.expandAllTheWay(generator.startingNode)\nprint Generator.toString(expandedNode)\nprint \" \"\nprint \"done\""
}
] | 7 |
MS103/studia
|
https://github.com/MS103/studia
|
1a1e054ef9411914b700ff7f2e480dbedf7bae66
|
f60ff94495768becbac1601fa9ab52f6d042c691
|
4a2ae7327ff4e1a72773272c874d74a6cdd3b568
|
refs/heads/master
| 2020-04-29T02:45:17.912726 | 2019-05-19T08:34:05 | 2019-05-19T08:34:05 | 175,782,491 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5928906798362732,
"alphanum_fraction": 0.6063044667243958,
"avg_line_length": 33.67441940307617,
"blob_id": "e711de5d9f0b8d89e1fc0ceb6d165491dcca9100",
"content_id": "00c79f7bc63b23737c8cbfbf6bc12d203b636531",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1502,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 43,
"path": "/Kryptografia/Lista 3/openssl.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "from Crypto.Cipher import AES\n\nimport hashlib\nimport os\nimport struct\n\n\nclass OpenSSL:\n def __init__(self, mode):\n self.modes = {'ofb': AES.MODE_OFB, 'ctr': AES.MODE_CTR, 'cbc': AES.MODE_CBC}\n self.mode = self.modes[mode]\n\n # CBC - Polega na dodawaniu XOR każdego kolejnego bloku tekstu jawnego do poprzednio otrzymanego bloku szyfrogramu.\n\n # OFB - Szyfrujemy wektor inicjalizujący i powstaje nam X.\n # X XOR blok tekstu jawnego. Natępnie szyfrujemy X i powstaje X' itd.\n\n # CTR - Szyfrujemy Number-used-once+licznik i powstaje X.\n # X XOR blok tekstu jawnego. Zwiększ licznik. Powtórz dla kolejnych bloków.\n\n def encrypt(self, msg, key, iv):\n iv = bytes(iv.encode())\n msg = bytes(msg.encode())\n if len(key) % 16:\n key = hashlib.sha256(key).digest()\n msg += b'\\x00' * ((16 - len(msg)) % 16) # Długość wiadomości musi być mod 16 = 0\n if self.mode != self.modes['ctr']:\n encryptor = AES.new(key=key, mode=self.mode, iv=iv)\n else:\n encryptor = AES.new(key=key, mode=self.mode)\n\n return encryptor.encrypt(msg)\n\n def decrypt(self, msg, key, iv):\n iv = bytes(iv.encode())\n if len(key) % 16 != 0:\n key = hashlib.sha256(key).digest()\n if self.mode != self.modes['ctr']:\n decryptor = AES.new(key=key, mode=self.mode, iv=iv)\n else:\n decryptor = AES.new(key=key, mode=self.mode)\n\n return decryptor.decrypt(msg)\n"
},
{
"alpha_fraction": 0.5668016076087952,
"alphanum_fraction": 0.5890688300132751,
"avg_line_length": 21.454545974731445,
"blob_id": "d02618d090a5f23b6f0159fcb3f554d8b171e288",
"content_id": "42d53e02a2196f29588cc653bddae216f59a87e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 22,
"path": "/Kryptografia/Lista 2/zad2.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "from common import *\n\n\ndef KSA(key, n, t):\n num_key = str2num(key)\n key_length = len(num_key)\n s = list(range(n))\n j = 0\n for i in range(t):\n j = (j + s[i] + num_key[i % key_length]) % n\n swap(s, i, j)\n return s\n\n\nd = 0\nt = n = 256\nlength = 10\nkey = 'Wiki'\nkeystream_list = rc4_mdrop_d(key, n, t, d, KSA, length)\nbinary = num2hexb(keystream_list)\nf = open(r\"C:\\Users\\Latitude\\Desktop\\Studia - Git_repo\\studia\\Kryptografia\\Lista 2\\test.txt\", 'w')\nf.write(binary)\n"
},
{
"alpha_fraction": 0.49102622270584106,
"alphanum_fraction": 0.5195581912994385,
"avg_line_length": 34.04838562011719,
"blob_id": "cf2f378552b89ac1b243f9bde03a92ee757210ee",
"content_id": "5921bc19e90e54921aa21a8b6b0ccbeda38473ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2187,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 62,
"path": "/Kryptografia/Lista 5/edvard_curves.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\n\nfrom cyclic_group import Z\n\nPoint = namedtuple('Point', ['x', 'y'])\n\n\nclass EdwardsCurves: # Klasa krzywych eliptycznych zadanych przez: x**2 + y**2 = 1 + d * x**2 * y**2 (mod p)\n\n def __init__(self, d, p):\n self.z = Z(p)\n self.p = p\n self.d = self.process_d(d)\n self.one = self.z(1)\n self.zero = self.z(0)\n\n def process_d(self, d): #Przygotowanie wartości d, żeby można było wpisać np. '1/3'\n if type(d) == str:\n data = d.split('/')\n return self.z(int(data[0])) / self.z(int(data[1]))\n return self.z(d)\n\n def add_points(self, p1, p2): #Dodawanie punktów na krzywej - wzory z wykładu\n x1, y1 = p1\n x2, y2 = p2\n x3 = (x1 * y2 + y1 * x2) / (self.one + self.d * x1 * y1 * x2 * y2)\n y3 = (y1 * y2 - x1 * x2) / (self.one - self.d * x1 * y1 * x2 * y2)\n return self.create_point(x3.x, y3.x)\n\n def scalar_mul(self, scalar, point): #Mnożenie punktu przez skalar\n if scalar == 0:\n return self.create_point(0, 1)\n if scalar == 1:\n return point\n Q = self.scalar_mul(scalar // 2, point)\n Q = self.add_points(Q, Q)\n if scalar % 2:\n Q = self.add_points(Q, point)\n return Q\n\n def neg(self, p):#Zwraca punkt przeciwny\n return self.create_point(-p.x.x, p.y.x)\n\n def create_point(self, x, y): #Tworzy punkt z własnościami grupy\n x = self.z(x)\n y = self.z(y)\n return Point(x, y)\n\n def is_on_curve(self, p): #Sprawdza czy punkt jest na krzywej\n x, y = p\n x, y = x.x, y.x\n return (x ** 2 + y ** 2) % self.p == (1 + self.d.x * x ** 2 * y ** 2) % self.p\n\n def order(self, g): #Najmniejszy skalar x taki, że x*g = (0, 1) <- punkt bazowy dla krzywej zadanej\n base_point = self.create_point(0, 1) # x**2 + y**2 = 1 + d * x**2 * y**2 (mod p)\n if not (self.is_on_curve(g) or g != base_point):\n print(\"Błąd wartości generatora!\")\n exit(-1)\n for i in range(2, self.p):\n if self.scalar_mul(i, g) == base_point:\n return i\n return 1\n"
},
{
"alpha_fraction": 0.6208082437515259,
"alphanum_fraction": 0.6294066905975342,
"avg_line_length": 36.51612854003906,
"blob_id": "95161dd85a19135b6e48c2b2ef6ed6e2182027f1",
"content_id": "92f5285572e128a34818710a8ae6518eba5da187",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 31,
"path": "/Kryptografia/Lista 3/tests.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "import sys\nfrom jks import jks\n\nfrom command_line_parser import CommandLineParser\nfrom openssl import OpenSSL\n\ncommand_line_parser = CommandLineParser()\nparsed_args = command_line_parser.parse_arguments(sys.argv[1:])\n\nkey_store = jks.KeyStore.load(parsed_args['keystore_path'], parsed_args['password'])\nkey = key_store.private_keys['alias_name'].pkey[:32]\n\nnum_of_success = 0\nmsg_to_enc = 'x'\ntypes_of_enc = ['ofb', 'cbc', 'ctr']\nif True:\n for enc_type in types_of_enc:\n print('Sprawdzam szyfrowanie i deszyfrowanie dla ', enc_type.upper())\n openssl = OpenSSL(enc_type)\n iv = '0' * 16\n enc_m = openssl.encrypt(msg_to_enc, key, iv)\n dec_m = openssl.decrypt(enc_m, key, iv).rstrip(b'\\x00')\n try:\n dec_m = dec_m.decode()\n if_success = msg_to_enc == dec_m\n except UnicodeDecodeError as e:\n print(\"Wystąpił błąd!\", e)\n if_success = 0\n num_of_success += if_success\n print('Sprawdzam czy {a} == {b}. Rezultat to {c}\\n'.format(a=msg_to_enc, b=dec_m, c=if_success))\n print('Stosunek sukcesów do prób to {a}/{b}'.format(a=num_of_success, b=len(types_of_enc)))\n"
},
{
"alpha_fraction": 0.5064995288848877,
"alphanum_fraction": 0.5459610223770142,
"avg_line_length": 26.97402572631836,
"blob_id": "199c196693031b78744956973203e1da2cd3f130",
"content_id": "bd11f2266b34e09f8d33fbb72cb40f8d8a1f8e7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2170,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 77,
"path": "/Kryptografia/Lista 1/zad 1.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "class LCG: # s[i+1] = (s[i]*a + b) % m\n\n def __init__(self, seed, a, b, m):\n self.state = seed\n self.a = a\n self.b = b\n self.m = m\n\n def random(self):\n self.state = (self.state * self.a + self.b) % self.m\n return self.state\n\n\nclass Crack_LCG:\n def __init__(self):\n self.state = None\n self.a = None\n self.b = None\n self.m = None\n\n @staticmethod\n def crack_lcg(states, m): # Musimy znać tylko m\n delta = states[1] - states[0]\n a = (states[2] - states[1]) * modinv(delta, m) % m\n b = (states[1] - states[0] * a) % m\n return (states[-1], a, b, m)\n\n def predict_new_value_lcg(self, states, m):\n if self.state is None:\n state, self.a, self.b, self.m = self.crack_lcg(states, m)\n self.state = (state * self.a + self.b) % self.m\n return self.state\n else:\n self.state = (self.state * self.a + self.b) % self.m\n return self.state\n\n def clear_cracker(self):\n self.state = None\n self.a = None\n self.b = None\n self.m = None\n\n\n## FUNKCJE POMOCNICZE - znalezione w internecie\ndef egcd(a, b): # Rozszerzony algorytm Euklidesa\n if a == 0:\n return (b, 0, 1)\n else:\n g, x, y = egcd(b % a, a)\n return (g, y - (b // a) * x, x) # zwraca (g, x, y) takie że a*x + b*y = g = gcd(a, b); gdzie gcd = NWD\n\n\ndef modinv(b, n): # Odwrócone modulo\n g, x, _ = egcd(b, n)\n if g == 1:\n return x % n\n else:\n raise ValueError('Nie można znaleźć wartiści odwrotnej')\n\n\n## PROGRAM WLAŚCIWY\nseed = 10534\na = 672257317069504227\nb = 7382843889490547368\nm = 9223372036854775783 #Liczba pierwsza; Musi być można zrobić (x_1 - x_0) * y mod(m) = 1, dla dowolnego y < m .\n\nstates = []\nlcg = LCG(seed=seed, a=a, b=b, m=m)\ncracker = Crack_LCG()\nfor i in range(1000):\n lcg.random()\nfor j in range(3):\n states.append(lcg.random())\nfor k in range(10):\n print('\\n')\n print('Przewidujemy nową liczbę: {0}'.format(cracker.predict_new_value_lcg(states, m)))\n print('Nową liczbą rzeczywiście jest: {0}'.format(lcg.random()))\n"
},
{
"alpha_fraction": 0.49098196625709534,
"alphanum_fraction": 0.5160320401191711,
"avg_line_length": 17.830188751220703,
"blob_id": "1f6e48c6f7c656a38474d561d33b4172815c3a6c",
"content_id": "e2fca5821912ebc8e327a4bcd61195a3a1045488",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 998,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 53,
"path": "/Kryptografia/Lista 2/common.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef PRGA(S, n):\n i = 0\n j = 0\n while True:\n i = (i + 1) % n\n j = (j + S[i]) % n\n\n S[i], S[j] = S[j], S[i]\n K = S[(S[i] + S[j]) % n]\n yield K\n\n\ndef rc4(key, n, t, ksa, length):\n s = ksa(key, n, t)\n keystream = PRGA(s, n)\n return [next(keystream) for i in range(length)]\n\n\ndef rc4_drop_d(key, n, t, d, ksa_alg, length):\n stream = num2hex(rc4(key, n, t, ksa_alg, length))\n return stream[d:]\n\n\ndef rc4_mdrop_d(key, n, t, d, ksa_alg, length):\n stream = rc4(key, n, t, ksa_alg, length)\n return stream[slice(0, len(stream) + 1, d + 1)]\n\n\ndef swap(l, i, j):\n l[i], l[j] = l[j], l[i]\n\n\ndef str2num(l):\n return [ord(x) for x in l]\n\n\ndef str2bin(key):\n return ''.join(format(ord(x), '08b') for x in key)\n\n\ndef num2bin(l):\n return ''.join(format(x, '08b') for x in l)\n\n\ndef num2hex(l):\n return ''.join([format(x, '02X') for x in l])\n\n\ndef num2hexb(l):\n return ''.join([format(x, '08X') + '\\n' for x in l])\n"
},
{
"alpha_fraction": 0.502964437007904,
"alphanum_fraction": 0.5207509994506836,
"avg_line_length": 25.63157844543457,
"blob_id": "452a6b62e46f78f6a2d0b690ba093567511c42a2",
"content_id": "0bdc502fce78c202fb82b89fcd8f020a9f7c7bda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1012,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 38,
"path": "/Kryptografia/Lista 2/zad2_sst.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "from common import *\n\n\ndef KSASST(key, n, t):\n num_key = str2num(key)\n key_length = len(num_key)\n s = list(range(n))\n\n marked_list = [False for _ in range(n)]\n marked_list[-1] = True\n marked_num = 1\n j = n\n i = 0\n while marked_num < n:\n i = i % n\n j = (j + s[i % n] + num_key[i % key_length]) % n\n swap(s, i, j)\n if marked_num < n / 2:\n if not marked_list[j] and not marked_list[i]:\n marked_list[j] = True\n marked_num += 1\n else:\n if (not marked_list[j] and marked_list[i]) or (not marked_list[j] and i == j):\n marked_list[j] = True\n marked_num += 1\n swap(marked_list, i, j)\n i += 1\n return s\n\n\nd = 1\nt = n = 256\nlength = 10 ** 8\nkey = 'Wiki'\nkeystream_list = rc4_mdrop_d(key, n, t, d, KSASST, length)\nbinary = num2hexb(keystream_list)\nf = open(r\"C:\\Users\\Latitude\\Desktop\\Studia - Git_repo\\studia\\Kryptografia\\Lista 2\\test.txt\", 'w')\nf.write(binary)\n"
},
{
"alpha_fraction": 0.6227545142173767,
"alphanum_fraction": 0.6294910311698914,
"avg_line_length": 42.09677505493164,
"blob_id": "794127029f02eb8a89b54b452021ed3899225350",
"content_id": "f2b05ebc4106dc9e6042e73b1ba4295254789455",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1340,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 31,
"path": "/Kryptografia/Lista 5/el_gamal.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "from edvard_curves import EdwardsCurves\n\n\nclass ElGamal:\n def __init__(self, curve: EdwardsCurves, g):\n if not curve.is_on_curve(g):\n print(\"Generator nie na krzywej!\")\n exit(-1)\n self.curve = curve\n self.g = g\n self.n = curve.order(g)\n\n def generate_public_key(self, private_key):\n return self.curve.scalar_mul(private_key, self.g) # Iloczyn skalarny generatora i klucza prywatnego\n\n def encrypt_point(self, p, public_key, rand):\n if not (self.curve.is_on_curve(p) or self.curve.is_on_curve(public_key)):\n print(\"Punkty nie na krzywej!\")\n exit(-1)\n cipher = self.curve.scalar_mul(rand, self.g), self.curve.add_points(p, self.curve.scalar_mul(rand, public_key))\n print(f'Szyfruję punkt {p} za pomocą klucza publicznego {public_key}.\\nOtrzymano {cipher}')\n return cipher\n\n def decrypt_cipher(self, cipher, private_key):\n c1, c2 = cipher\n if not (self.curve.is_on_curve(c1) or self.curve.is_on_curve(c2)):\n print(\"Punkty nie na krzywej!\")\n exit(-1)\n decoded = self.curve.add_points(c2, self.curve.neg(self.curve.scalar_mul(private_key, c1)))\n print(f'Odszyfrowuję punkt {cipher} za pomocą klucza prywatnego {private_key}.\\n Otrzymano{decoded}')\n return decoded\n"
},
{
"alpha_fraction": 0.4525386393070221,
"alphanum_fraction": 0.46578365564346313,
"avg_line_length": 20.31764793395996,
"blob_id": "0dbbac99a94a752ac45b10e74acd3b80138a3aa1",
"content_id": "212312126f6643fa57ff3e14d4e067c4fc06f8ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1822,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 85,
"path": "/Kryptografia/Lista 4/zad1.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom math import gcd as bltin_gcd\n\n\n# FUNKCJE POMOCNICZE\n\ndef coprime2(a, b):\n return bltin_gcd(a, b) == 1\n\n\ndef egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\n\ndef modinv(a, m):\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m\n\n\n# MERKLE-HELLMAN PUZZLE\n\ndef gen_keys(n):\n w = []\n s = 0\n for i in range(n):\n w.append(np.random.randint(s + 1, np.ceil(1.2 * (s + 1))))\n s = sum(w)\n\n q = np.random.randint(s + 1, np.ceil(1.2 * (s + 1)))\n r = q\n while not coprime2(q, r):\n r = np.random.randint(0, q + 1)\n beta = []\n for j in range(n):\n beta.append(r * w[j] % q)\n if if_print:\n print('Public key: ', beta)\n print('Private key: ', (w, q, r))\n return beta, w, q, r\n\n\ndef enc(m, beta):\n c = 0\n for i in range(len(m)):\n c += m[i] * beta[i]\n if if_print:\n print(\"Ciphertext is {}\".format(c))\n return c\n\n\ndef dec(c, w, q, r, n):\n s = modinv(r, q)\n c_p = (c * s) % q\n m_p = [None] * n\n for j in range(n):\n i = n - j - 1\n m_p[i] = int(w[i] <= c_p)\n c_p -= w[i] * m_p[i]\n if if_print:\n print('Odkodowana wiaddomość {}'.format(m_p))\n return m_p\n\n\n# KOD WŁAŚCIWY\nglobal if_print\nif_print = False\nn = 10\n\nbeta, w, q, r = gen_keys(n)\nm = list(np.random.randint(0, 2, n))\nc = enc(m, beta)\nm_p = dec(c, w, q, r, n)\nprint(\n 'Wiadomość przed zakodowaniem: {m}\\n'\n 'Wiadomość odszyfrowana: {m_p}\\n'\n 'Czy eksperyment się udał? {result}'.format(m=m,\n m_p=m_p,\n result=m == m_p))\n"
},
{
"alpha_fraction": 0.5187760591506958,
"alphanum_fraction": 0.5354658961296082,
"avg_line_length": 22.96666717529297,
"blob_id": "99563b083e800e73aef9aa582e53f514424a67e3",
"content_id": "0a4d4bce59910251c47c83d4a294c04f8a538a96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 30,
"path": "/Kryptografia/Lista 2/zad2_rs.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "from common import *\nimport numpy as np\n\n\ndef KSARS(key, n, t):\n bit_key = str2bin(key)\n key_length = len(bit_key)\n s = list(range(n))\n for r in range(t):\n top = []\n bottom = []\n for i in range(n):\n temp = (r * n + i) % key_length\n if bit_key[temp] == '0':\n top.append(s[i])\n else:\n bottom.append(s[i])\n s = top + bottom\n return s\n\n\nd = 1\nn = 256\nt = int(round(2 * n * np.log(n)))\nlength = 10\nkey = 'Wiki'\nkeystream_list = rc4_mdrop_d(key, n, t, d, KSARS, length)\nbinary = num2hexb(keystream_list)\nf = open(r\"C:\\Users\\Latitude\\Desktop\\Studia - Git_repo\\studia\\Kryptografia\\Lista 2\\test.txt\", 'w')\nf.write(binary)\n"
},
{
"alpha_fraction": 0.6076447367668152,
"alphanum_fraction": 0.6216976046562195,
"avg_line_length": 28.147541046142578,
"blob_id": "a5d2821ad1f8e396d1de0f5810d5379b794da6bf",
"content_id": "84625d212be53969493949f2f0b576c8321759d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1781,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 61,
"path": "/Kryptografia/Lista 3/main.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom random import randint\nfrom jks import jks\n\nfrom command_line_parser import CommandLineParser\nfrom openssl import OpenSSL\n\n\ndef xor_strings(str1, str2):\n return \"\".join([chr(ord(a) ^ ord(b)) for a, b in zip(str1, str2)])\n\n\ndef increment_iv(iv):\n iv = bin(int(iv, 2) + 1)[2:]\n return '0' * (16 - len(iv)) + iv\n\n\ndef destinguisher(key, iv, msg_list, enc_msg):\n iv2 = increment_iv(iv)\n encryptor = OpenSSL('cbc')\n for i, m in enumerate(msg_list):\n xored = xor_strings(xor_strings(iv, iv2), m)\n enc_m = encryptor.encrypt(xored, key, iv2)\n if enc_m == enc_msg:\n return i, enc_m\n\n\ncommand_line_parser = CommandLineParser()\nparsed_args = command_line_parser.parse_arguments(sys.argv[1:])\n\nenc_type = 'cbc'\nopenssl = OpenSSL(enc_type)\niv = '0' * 16\nkey_store = jks.KeyStore.load(parsed_args['keystore_path'], parsed_args['password'])\nkey = key_store.private_keys['alias_name'].pkey[:32]\n\nf = open(parsed_args['input_path'], 'r')\ninput_ms = [x for x in f]\nf = open(r'C:\\Users\\Latitude\\Desktop\\output.txt', 'w')\n\nif len(input_ms) == 0:\n raise IOError('Za mało danych wejściowych')\nelif len(input_ms) == 2 :\n msg_list = [x[:16] for x in input_ms]\n random_msg = msg_list[randint(0, 1)]\n\n enc_m = openssl.encrypt(random_msg, key, iv)\n f.write(str(enc_m)+'\\n')\n print('Szyfruje {a} w {b}'.format(a=random_msg, b=enc_m))\n\n if enc_type == 'cbc':\n print('\\n\\nProgram uruchamia destinguisher\\n\\n')\n dec_m, cipher = destinguisher(key, iv, msg_list, enc_m)\n print('{a} zaszyfrowano w {b}'.format(a=msg_list[dec_m], b=cipher))\nelse:\n\n for m in input_ms:\n enc_m = openssl.encrypt(m, key, iv)\n f.write(str(enc_m)+'\\n')\n print('Szyfruje {a} w {b}'.format(a=m, b=enc_m))\n\n"
},
{
"alpha_fraction": 0.6957210898399353,
"alphanum_fraction": 0.7163233160972595,
"avg_line_length": 38.4375,
"blob_id": "5d5055284db83759105bc08b6c3d3c20a5d2db3b",
"content_id": "922808976e49a560f09c231740d3af3191795733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 631,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 16,
"path": "/Kryptografia/Lista 5/main.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "import random\n\nfrom edvard_curves import EdwardsCurves\nfrom el_gamal import ElGamal\n\nd = 5\np = 17\nec = EdwardsCurves(d, p)\ng = ec.create_point(7, 12) # must be on edwards curve\nmessage = ec.create_point(12, 7) # must be on edwards curve\neg = ElGamal(ec, g)\npriv_key = random.randint(1, eg.n - 1) # smaller than order of point because we don't want pub_key to be base_point\nrand_int = random.randint(1, eg.n - 1) # does not have to be smaller than order of g\npub_key = ec.scalar_mul(priv_key, g) # must be on edwards curve\nencoded = eg.encrypt_point(message, pub_key, rand_int)\ndecrypted = eg.decrypt_cipher(encoded, priv_key)\n"
},
{
"alpha_fraction": 0.557692289352417,
"alphanum_fraction": 0.6282051205635071,
"avg_line_length": 26.130434036254883,
"blob_id": "bbc515f5369e8dc19ef27e6dcc85d390c066fb88",
"content_id": "00cb5d0bc490c85e1e9db79409edc42b2bbf1863",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1248,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 46,
"path": "/Kryptografia/Lista 4/zad2.py",
"repo_name": "MS103/studia",
"src_encoding": "UTF-8",
"text": "from openssl import OpenSSL\nfrom random import randint\nimport time\n\n# private values\ns = 16 # len(secret)\nn = 24 # len(msg)\n# N = pow(2, n) # len(secrets)\nN = 1000\n\n# Placeholder\nmsgs = []\nkeys = []\nsecrets = []\nstart = time.time()\n# ALICE (Encryption)\nfor i in range(0, N):\n secret = str(randint(1000000000000000000000000, 9999999999999999999999999))\n key = str(randint(1000, 9999))\n\n enc_suite = OpenSSL('cbc')\n msg = enc_suite.encrypt('0' * (n - s) + secret, key * 4, key * 4)\n\n msgs.append(msg)\n keys.append(key)\n secrets.append(secret)\n\n# ALICE sends \"msgs\" Block to BOB\n\n# BOB (Brute force Decryption)\ndecrypted_msg = ''\nrand_msg_solve = randint(0, N-1)\n\nprint(\"Start decrypting at {}\".format(time.time() - start))\nkeys = [str(x) for x in range(1000, 9999)]\nwhile not decrypted_msg[:(n - s)] == '0' * (n - s):\n if len(keys) == 0:\n print(\"Could not find decryption\")\n exit(-1)\n key = keys.pop(randint(0, len(keys)-1))\n dec_suite = OpenSSL('cbc')\n decrypted_msg = dec_suite.decrypt(msgs[rand_msg_solve], key * 4, key * 4)\n\nprint('Bob decrypted secret:\\t\\t' + decrypted_msg[(n - s):])\nprint('Alice secret (' + str(rand_msg_solve) + '):\\t\\t' + secrets[rand_msg_solve])\nprint(time.time() - start)\n"
}
] | 13 |
cmu-mars/model-learner
|
https://github.com/cmu-mars/model-learner
|
594718015b9612b08578c4968052bb98b4c917ae
|
245789ef8dee74fbf446c25c5abb7cd8b5200f42
|
9e84c5837991c60fd4e74a0b62793fd6c4566c2a
|
refs/heads/master
| 2021-06-04T05:30:23.816935 | 2020-04-30T16:27:22 | 2020-04-30T16:27:22 | 95,971,910 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5479323267936707,
"alphanum_fraction": 0.5596282482147217,
"avg_line_length": 34.19852828979492,
"blob_id": "2d6e65f09c9506d2cdfe1c378a6205da489d9802",
"content_id": "f3a656a3dc2a2692ec02f87072744d534ec8cc53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9576,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 272,
"path": "/learner/tranlearner.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "import GPy\nimport GPyOpt\nimport numpy as np\nimport math\nimport re\nimport time\n\nfrom learner.maxUncertainty import AcquisitionMU\n\n\nclass TranLearner:\n\n def __init__(self, budget, num_dims, true_power_model):\n\n self.offline_budget_ratio = 0.5\n self.offline_budget = int(budget * self.offline_budget_ratio)\n if self.offline_budget > 200:\n self.offline_budget = 200\n\n self.online_budget = budget - self.offline_budget\n self.used_budget = 0\n\n self.num_dims = num_dims\n self.opIDs = {}\n for i in range(self.num_dims):\n self.opIDs['o'+str(i)]=i\n\n\n self.true_power_model = true_power_model \n self.domain = self.createDomain(self.num_dims, \"discrete\" , (0, 1))\n\n self.bo = None\n # Note: Ensure suggorate_model has a predict function\n # Check update_config_files() in learner.py \n self.suggorate_model = None\n\n\n # parameter:\n # domain_type: continuous or discrete\n # domain_1d : (lower, upper) or (num1, num2, ..., numN)\n def createDomain(self, num_dims, domain_type, domain_1d):\n domain = []\n for i in range(num_dims):\n domain.append({'name':'x_'+str(i+1), 'type':domain_type, 'domain': domain_1d})\n return domain\n\n def uniSampleDomain(self, domain, num_points):\n num_dims = len(domain)\n X = np.zeros((num_points, num_dims))\n for dim_idx in range(num_dims):\n d = domain[dim_idx]\n if d['type'] == 'continuous':\n X[:, dim_idx] = np.random.uniform(d['domain'][0], d['domain'][1], size=num_points)\n elif d['type'] == 'discrete':\n X[:, dim_idx] = np.random.choice(d['domain'], size=num_points)\n else:\n raise ValueError(\"Unsupported variable type: {}\".format(d['type'])) \n return X\n\n\n\n # functionality: load power model from text file\n # output:\n # model: a dict where each element is a pair of (key=a polynomal term represented in a tuple, weight)\n # The constant term : model[(-1)] = weight1\n # Single option term : model[(opID)] = weight2\n # Interacting options term: model[(opID1, opID2,...)] = weightt3\n def loadPowerModel(self, filepath):\n model={tuple([-1]):0}\n with open(filepath) as f:\n model_txt = f.read(); # there is only one line in the model file\n terms=model_txt.replace(\" \", \"\").rstrip().split(\"+\")\n for term in terms:\n parts=term.split(\"*\")\n if len(parts)==1: # It is a constant. Assume there are at most 1 constant\n model[tuple([-1])] = float(parts[0])\n else:\n weight=parts[0]\n opList=parts[1:]\n opListNumericalIDs=[]\n for op in opList:\n opListNumericalIDs.append(self.opIDs[op])\n model[tuple(opListNumericalIDs)]=float(weight)\n return model\n\n\n # functionality: get the performance of the power model under the given configuration\n # input:\n # config: 2D numpy array where each row is an array of 20 elements, which represets a point\n # output:\n # the performance of the power model\n def measurePM0(self, configs):\n num_points = configs.shape[0]\n perfs = np.zeros((num_points, 1))\n\n for p_idx in range(num_points):\n config = configs[p_idx, :]\n perf = 0\n for key, value in self.powerModel.items():\n if -1 not in key: # if the key is not the constant term\n for ID in key:\n if config[ID] == 0:\n value = 0\n break\n perf += value\n perfs[p_idx][0] = perf\n return perfs\n\n def measurePM(self, configs):\n X = configs\n\n if X.ndim == 1:\n X = np.reshape(X, (1, len(X)))\n\n Y = self.true_power_model.evaluateModelFast(X)\n Y = np.reshape(Y, (len(Y), 1))\n\n return Y\n\n\n\n def create_bo(self, model_update_interval, X_init, Y_init):\n # Initialize Bayesian optimization\n\n # --- feasible region\n space = GPyOpt.Design_space(space=self.domain)\n\n # --- CHOOSE the objective\n objective = GPyOpt.core.task.SingleObjective(self.measurePM)\n\n # --- CHOOSE the model type\n model = GPyOpt.models.GPModel(exact_feval=True,optimize_restarts=10,verbose=False)\n\n # --- CHOOSE the acquisition optimizer\n aquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(space)\n\n # --- CHOOSE the type of acquisition\n acquisition = AcquisitionMU(model, space, optimizer=aquisition_optimizer)\n\n # --- CHOOSE a collection method\n evaluator = GPyOpt.core.evaluators.Sequential(acquisition)\n\n # Bo object\n bo = GPyOpt.methods.ModularBayesianOptimization(\n model,\n space,\n objective,\n acquisition,\n evaluator,\n X_init,\n Y_init = Y_init,\n normalize_Y = False,\n model_update_interval = model_update_interval)\n\n return bo\n\n def offline_learning(self):\n \n num_init = int(self.offline_budget*0.2)\n \n if num_init > 50:\n num_init = 50\n budget = self.offline_budget - num_init\n\n if self.offline_budget <= 100:\n model_update_interval = 1\n elif (self.offline_budget > 100) and (self.offline_budget <= 200):\n model_update_interval = 2\n else:\n model_update_interval = 5\n\n num_iters = budget // model_update_interval\n\n print(\"Offline learning budget: {}\".format(self.offline_budget))\n print(\"--- Already used budget: {}\".format(self.used_budget))\n print(\"--- Run Bayesian Optimization with {} initial points and {} iterations\".format(num_init, math.ceil(budget/model_update_interval)))\n\n start_time = time.time()\n\n X_init = self.uniSampleDomain(self.domain, num_init)\n Y_init = self.measurePM(X_init)\n\n self.bo = self.create_bo(model_update_interval, X_init, Y_init)\n self.bo.run_optimization(budget)\n\n # Consume left budget if any\n left_budget = budget - model_update_interval*num_iters\n if left_budget > 0:\n model_update_interval = left_budget\n self.bo = self.create_bo(model_update_interval, self.bo.X, self.bo.Y)\n self.bo.run_optimization(left_budget)\n\n print(\"--- Offline learning is done. Consume {} seconds ---\".format(time.time() - start_time)) \n\n self.suggorate_model = self.bo.model\n\n # Update budget information\n self.used_budget += self.offline_budget\n self.offline_budget = 0\n \n return self.suggorate_model\n\n def online_learning(self):\n budget = 25\n if self.online_budget < budget:\n budget = self.online_budget\n\n if self.used_budget < 150:\n interval = 1\n elif (self.used_budget >= 150) and (self.used_budget < 250):\n interval = 2\n elif (self.used_budget >= 250) and (self.used_budget < 350):\n interval = 5\n elif (self.used_budget >= 350) and (self.used_budget < 450):\n interval = 10\n else:\n interval = 25\n\n model_update_interval = interval\n\n print(\"Online learning started with the budget {} and {} iterations\".format(budget, math.ceil(budget/interval)))\n print(\"--- Already used budget: {}\".format(self.used_budget))\n\n start_time = time.time()\n\n self.bo = self.create_bo(\n model_update_interval,\n self.bo.X,\n self.bo.Y)\n self.bo.run_optimization(budget)\n\n # Consume left budget if any\n num_iters = budget // model_update_interval\n left_budget = budget - model_update_interval*num_iters\n if left_budget > 0:\n model_update_interval = left_budget\n self.bo = self.create_bo(model_update_interval, self.bo.X, self.bo.Y)\n self.bo.run_optimization(left_budget)\n\n print(\"--- Online learning is done. Consume {} seconds ---\".format(time.time() - start_time)) \n\n self.suggorate_model = self.bo.model\n\n # Update budget information\n self.used_budget += budget\n self.online_budget -= budget\n\n return self.suggorate_model\n\n\n def get_pareto_frontier(self, Xs, Ys, maxX=True, maxY=True):\n # Sort the list in either ascending or descending order of X\n myList = sorted([[Xs[i], Ys[i]] for i in range(len(Xs))], reverse=maxX)\n idx_sorted = sorted(range(len(Xs)), key=lambda k: Xs[k])\n # Start the Pareto frontier with the first value in the sorted list\n p_front = [myList[0]]\n i = 0\n pareto_idx = [idx_sorted[i]]\n # Loop through the sorted list\n for pair in myList[1:]:\n i += 1\n if maxY:\n if pair[1] >= p_front[-1][1]:\n p_front.append(pair)\n pareto_idx.append(idx_sorted[i])\n else:\n if pair[1] <= p_front[-1][1]:\n p_front.append(pair)\n pareto_idx.append(idx_sorted[i])\n p_frontX = [pair[0] for pair in p_front]\n p_frontY = [pair[1] for pair in p_front]\n return pareto_idx, p_frontX, p_frontY\n\n\n"
},
{
"alpha_fraction": 0.651296854019165,
"alphanum_fraction": 0.6570605039596558,
"avg_line_length": 18.27777862548828,
"blob_id": "ed3539c80f2f1fb32c3767352825622cc057b18c",
"content_id": "ea64907a81dec62bb6f7e7309e7c0ea7e1a680e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 347,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 18,
"path": "/tests/test_basic.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport unittest\n\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_learner(self):\n assert True\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(BasicTestSuite)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6106623411178589,
"alphanum_fraction": 0.6922455430030823,
"avg_line_length": 29.975000381469727,
"blob_id": "9d86d6accad7f4e7c776cef1e50bea7275869d56",
"content_id": "eff0536b95eac430fd7fe1d4dbe22845d33a3f33",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1238,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 40,
"path": "/README.md",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "# model-learner\nThis project implements the intent discovery module of BRASS MARS project.\n\n\n\n# Install\n\n```bash\ngit clone https://github.com/cmu-mars/model-learner.git\ncd model-learner\nmake\n```\n\n# Usage\n\nHere an example how to use the modules in the `learner` package to machine learn a dimensional model:\n```python\nfrom learner.mlearner import MLearner\nfrom learner.model import genModelTermsfromString, Model, genModelfromCoeff\n\nndim = 20\nbudget = 1000\nmodel_txt = \"\"\"10 + 1.00 * o0 + 2.00 * o1 + 3.00 * o2 +\n4.00 * o3 + 5.00 * o4 + 6.00 * o5 + 7.00 * o6 + 8.00 * o7 + \n1.00 * o8 + 2.00 * o9 + 3.00 * o10 + 4.00 * o11 + 5.00 * o12 + \n6.00 * o13 + 7.00 * o14 + 8.00 * o15 + 1.00 * o16 + 2.00 * o17 + \n3.00 * o18 + 4.00 * o19 + 1 * o0 * o1\"\"\"\n\npower_model_terms = genModelTermsfromString(model_txt)\ntrue_power_model = Model(power_model_terms, ndim)\nlearner = MLearner(budget, ndim, true_power_model)\nlearned_model = learner.discover()\nlearned_power_model_terms = genModelfromCoeff(learned_model.named_steps['linear'].coef_, ndim)\nlearned_power_model = Model(learned_power_model_terms, ndim)\nlearned_power_model.__str__()\n```\n\n# Maintainer\n\nIf you need a new feature to be added, please contact [Pooyan Jamshidi](https://pooyanjamshidi.github.io)."
},
{
"alpha_fraction": 0.5397986173629761,
"alphanum_fraction": 0.5483836531639099,
"avg_line_length": 34.078067779541016,
"blob_id": "69c36c01ba8426e96ce91e290f59fc8e0741271d",
"content_id": "b51959d5459e0be5a61a34a4f9917f6008ec1943",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9435,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 269,
"path": "/learner/model.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "# General imports\nimport numpy as np\nfrom sympy.core import sympify\nfrom learner.lib import *\nimport re as regex\nimport random\n\n\nclass Model:\n def __init__(self, terms, ndim):\n self.ndim = ndim\n self.allOptions = [\"o\" + str(i) for i in range(ndim)]\n self.constant = 0\n self.individualOptions = []\n self.interactions = []\n self.name = \"\"\n for term in terms:\n if term.isInteraction():\n self.interactions.append(term)\n elif term.isIndividualOption():\n self.individualOptions.append(term)\n elif term.isConstant():\n self.constant = float(term.coefficient)\n\n def evaluateModel(self, xTest):\n if xTest.shape[1] != self.ndim:\n raise ValueError()\n\n L = xTest.shape[0]\n r = np.zeros(L)\n f = sympify(self.__str__())\n vars = {}\n\n for i in range(L):\n for j in range(self.ndim):\n idx = int(regex.findall(\"\\d+$\", self.allOptions[j])[0])\n vars[self.allOptions[j]] = xTest[i, idx]\n r[i] = f.subs(vars).evalf()\n\n return r\n\n def evaluateModelFast(self, xTest):\n Lo = len(self.individualOptions)\n Li = len(self.interactions)\n A = xTest\n\n M = np.zeros(self.ndim + Li)\n\n for i in range(Lo):\n M[self.allOptions.index(self.individualOptions[i].options[0].replace(\" \", \"\"))] = self.individualOptions[\n i].coefficient\n\n for i in range(Li):\n options = self.interactions[i].options\n coeff = self.interactions[i].coefficient\n M[self.ndim + i] = coeff\n\n A = np.append(A, A[:, self.allOptions.index(options[0].replace(\" \", \"\")):self.allOptions.index(\n options[0].replace(\" \", \"\")) + 1], axis=1)\n for idx in range(1, len(options)):\n A[:, self.ndim + i] = A[:, self.ndim + i] * A[:, self.allOptions.index(options[idx].replace(\" \", \"\"))]\n\n r = np.dot(A, M) + self.constant\n\n return r\n\n def simplifyModel(self):\n Lo = self.getNumberOfOptions()\n Li = self.getNumberOfInteractions()\n options2remove = []\n for i in range(1, Lo):\n currentOption = self.individualOptions[i]\n for j in range(i):\n if self.individualOptions[j].options[0].replace(\" \", \"\") == currentOption.options[0].replace(\" \", \"\"):\n self.individualOptions[j].coefficient = self.individualOptions[\n j].coefficient + currentOption.coefficient\n options2remove.append(i)\n break\n\n interactions2remove = []\n for i in range(1, Li):\n currentInteraction = self.interactions[i]\n for j in range(i):\n if len(self.interactions[j].options) == len(currentInteraction.options):\n equalOptions = 0\n for k in range(len(self.interactions[j].options)):\n for l in range(len(currentInteraction.options)):\n if self.interactions[j].options[k] == currentInteraction.options[l]:\n equalOptions += 1\n break\n if equalOptions == len(self.interactions[j].options):\n self.interactions[j].coefficient = self.interactions[j].coefficient + currentInteraction.coefficient\n interactions2remove.append(i)\n break\n\n for i in sorted(options2remove, reverse=True):\n self.individualOptions.pop(i)\n\n for i in sorted(interactions2remove, reverse=True):\n self.interactions.pop(i)\n\n def getInteractions(self):\n return self.interactions\n\n def getIndividualOptions(self):\n return self.individualOptions\n\n def getNumberOfInteractions(self):\n return len(self.interactions)\n\n def getNumberOfOptions(self):\n return len(self.individualOptions)\n\n def removeInteraction(self, position):\n if len(self.interactions) >= 1:\n self.interactions.pop(position)\n\n def removeIndividualOption(self, position):\n if len(self.individualOptions) > 1: # for a model to be valid, at least one individual option is needed\n self.individualOptions.pop(position)\n\n def addOption(self, coefficient):\n if len(self.individualOptions) < self.ndim:\n for i in range(self.ndim):\n proposedOption = \"o\" + str(i)\n shouldBeAdded = True\n for j in range(len(self.individualOptions)):\n if proposedOption == self.individualOptions[j].options[0].replace(\" \", \"\"):\n shouldBeAdded = False\n break\n if shouldBeAdded:\n self.individualOptions.append(Term(coefficient, [proposedOption]))\n break\n self.simplifyModel()\n\n def addInteraction(self, term):\n self.interactions.append(term)\n self.simplifyModel()\n\n def changeTerm(self, newTerm, position):\n if position < len(self.individualOptions):\n tempTerm = self.individualOptions[position]\n self.individualOptions[position] = newTerm\n else:\n position -= len(self.individualOptions)\n tempTerm = self.interactions[position]\n self.interactions[position] = newTerm\n return tempTerm\n\n def getTermByPosition(self, position):\n if position < len(self.individualOptions):\n return self.individualOptions[position]\n else:\n return self.interactions[position - len(self.individualOptions)]\n\n def __str__(self):\n str2 = \"\"\n Lo = len(self.individualOptions)\n Li = len(self.interactions)\n for i in range(len(self.individualOptions)):\n if i < Lo - 1:\n str2 += str(self.individualOptions[i]) + \" + \"\n else:\n str2 += str(self.individualOptions[i])\n\n if Li > 0:\n str2 += \" + \"\n for i in range(len(self.interactions)):\n if i < Li - 1:\n str2 += str(self.interactions[i]) + \" + \"\n else:\n str2 += str(self.interactions[i])\n\n if self.constant != 0:\n str2 += \" + \" + str(self.constant)\n return str2\n\n\nclass Term:\n def __init__(self, coefficient, options=\"1\"): # The default value is for the constant term\n self.coefficient = coefficient\n self.options = options\n\n def __str__(self):\n str2 = str(\"{0:.2f}\".format(self.coefficient)) + \" * \"\n if len(self.options) > 1:\n for i in range(len(self.options)):\n if i < len(self.options) - 1:\n str2 += str(self.options[i]) + \" * \"\n else:\n str2 += str(self.options[i])\n else:\n str2 += str(self.options[0])\n return str2\n\n def isConstant(self):\n if len(self.options) == 1 and is_number(self.options[0]):\n return True\n else:\n return False\n\n def isIndividualOption(self):\n if len(self.options) == 1 and not is_number(self.options[0]):\n return True\n else:\n return False\n\n def isInteraction(self):\n if len(self.options) == 1:\n return False\n else:\n return True\n\n\ndef genModelTermsfromString(txtModel):\n txtModel = txtModel.replace(\" \", \"\")\n terms = regex.split(\"[+]\", txtModel)\n generatedModel = []\n for i in range(len(terms)):\n term = regex.split(\"[*]\", terms[i])\n if len(term) == 1 and is_number(term[0]): # this is the constant term\n coeff = float(term[0])\n generatedModel.append(Term(coeff))\n else:\n coeff = 1\n idx = -1\n for index in range(len(term)):\n if is_number(term[index]):\n coeff = float(term[index])\n idx = index\n\n if idx != -1: # we have a explicit coefficient, i.e., 2*o1 instead of o1\n term.pop(idx)\n generatedModel.append(Term(coeff, term))\n\n return generatedModel\n\n\ndef genModelfromCoeff(coeff, ndim):\n options = [\"o\" + str(i) for i in range(ndim)]\n generatedModel = []\n\n generatedModel.append(Term(coeff[0]))\n for i in range(ndim):\n generatedModel.append(Term(coeff[i+1], [options[i]]))\n\n for i in range(ndim):\n for j in range(i+1, ndim):\n generatedModel.append(Term(coeff[ndim + 1 + i + j], [options[i], options[j]]))\n\n return generatedModel\n\n\ndef genModel(individualOptions, interactions, max_coeff):\n given_model = []\n size = individualOptions + interactions\n for i in range(size):\n if i < individualOptions:\n term = Term(random.randint(0, max_coeff), [\"o\" + str(i)])\n given_model.append(term)\n else:\n option1 = random.randint(0, individualOptions - 1)\n option2 = random.randint(0, individualOptions - 1)\n while option1 == option2:\n option2 = random.randint(0, individualOptions - 1)\n term = Term(random.randint(0, max_coeff), [\"o\" + str(option1), \"o\" + str(option2)])\n given_model.append(term)\n\n return given_model"
},
{
"alpha_fraction": 0.512110710144043,
"alphanum_fraction": 0.5216262936592102,
"avg_line_length": 43.42307662963867,
"blob_id": "d6511c314f0dca129fdb3976a281b166e7259739",
"content_id": "084175a0412fc1c978eb4d6887e57f7130f15827",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2312,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 52,
"path": "/learner/power_system.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "class powerSystem:\n # input parameters:\n # powerModelFilepath: file path of power model\n # opIDs: a dict that maps the name of an option to its numerical ID, an iteger from 0 to n-1. n is the total number of options\n def __init__(self, powerModelFilepath, opIDs):\n self.numOfOptions = len(opIDs)\n self.opIDs = opIDs\n self.pmfp = powerModelFilepath\n self.powerModel = self.loadPowerModel()\n \n # functionality: load power model from text file\n # output:\n # model: a dict where each element is a pair of (key=a polynomal term represented in a tuple, weight)\n # The constant term : model[(-1)] = weight1\n # Single option term : model[(opID)] = weight2\n # Interacting options term: model[(opID1, opID2,...)] = weightt3\n def loadPowerModel(self):\n try:\n model={tuple([-1]):0}\n with open(self.pmfp) as f:\n model_txt = f.read(); # there is only one line in the model file\n terms=model_txt.replace(\" \", \"\").rstrip().split(\"+\")\n for term in terms:\n parts=term.split(\"*\")\n if len(parts)==1: # It is a constant. Assume there are at most 1 constant\n model[tuple([-1])] = float(parts[0])\n else:\n weight=parts[0]\n opList=parts[1:]\n opListNumericalIDs=[]\n for op in opList:\n opListNumericalIDs.append(self.opIDs[op])\n model[tuple(opListNumericalIDs)]=float(weight)\n return model\n except Exception as e:\n raise Exception(e)\n\n # functionality: get the performance of the power model under the given configuration\n # input:\n # config: a list of 0 or 1\n # output:\n # the performance of the power model\n def measure(self, config):\n perf = 0\n for key, value in self.powerModel.items():\n if -1 not in key: # if the key is not the constant term\n for ID in key:\n if config[ID] == 0:\n value = 0\n break\n perf += value\n return perf\n\n\n"
},
{
"alpha_fraction": 0.5841013789176941,
"alphanum_fraction": 0.5841013789176941,
"avg_line_length": 24.5,
"blob_id": "609b0b945548443e9091e528397a086b95e4e183",
"content_id": "449798fea9195e64e87ffaf743ece4fc102c5271",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 868,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 34,
"path": "/learner/ready_db.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "import json\nfrom learner.constants import AdaptationLevel\n\n\nclass ReadyDB:\n def __init__(self, ready_db):\n with open(ready_db) as db:\n data = json.load(db)\n self.db = data\n\n def get_budget(self):\n return self.db[\"discharge-budget\"]\n\n\n def get_baseline(self):\n if self.db[\"level\"] == \"a\":\n return AdaptationLevel.BASELINE_A\n elif self.db[\"level\"] == \"b\":\n return AdaptationLevel.BASELINE_B\n elif self.db[\"level\"] == \"c\":\n return AdaptationLevel.BASELINE_C\n elif self.db[\"level\"] == \"d\":\n return AdaptationLevel.BASELINE_D\n\n\n\n def get_power_model(self):\n return \"model\" + str(self.db[\"power-model\"])\n\n def get_start_location(self):\n return self.db[\"start-loc\"]\n\n def get_target_locations(self):\n return self.db[\"target-locs\"]\n\n"
},
{
"alpha_fraction": 0.6440092325210571,
"alphanum_fraction": 0.6486175060272217,
"avg_line_length": 31.11111068725586,
"blob_id": "801ef5bd35cde34706505b8c3f36d661ccad2073",
"content_id": "707b6d83dbca38f3c90b3b2d96600c5029e91a38",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 868,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 27,
"path": "/learner/testLearner.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "\nfrom learner.learn import Learn\nfrom learner.constants import AdaptationLevel\n\n\ntry:\n model_learner = Learn()\n model_learner.get_true_model()\n model_learner.start_learning()\n if model_learner.ready.get_baseline() == AdaptationLevel.BASELINE_C:\n model_learner.dump_learned_model()\n\n model_learner.update_config_files()\n\n max_runs = 8\n if model_learner.ready.get_baseline() == AdaptationLevel.BASELINE_D:\n run=0\n while model_learner.budget > model_learner.used_budget:\n if run >= max_runs:\n break\n print(\"Online learning Run {}\".format(run+1))\n model_learner.start_online_learning()\n model_learner.update_config_files()\n run+=1\n\n #print(model_learner.learner.measurePM(model_learner.default_conf))\nexcept Exception as e:\n print(\"Error: {}\".format(e))\n"
},
{
"alpha_fraction": 0.6339347958564758,
"alphanum_fraction": 0.6353226900100708,
"avg_line_length": 38.47945022583008,
"blob_id": "604d50c73c77acb476b0e74cb3376a9bf6d9e75f",
"content_id": "2f530a847e5b9068af2213fd5a18e0750d813735",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2882,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 73,
"path": "/learner/maxUncertainty.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "from GPyOpt.acquisitions.base import AcquisitionBase\nfrom GPyOpt.core.task.cost import constant_cost_withGradients\n \nclass AcquisitionMU(AcquisitionBase):\n \n \"\"\"\n General template to create a new GPyOPt acquisition function\n\n :param model: GPyOpt class of model\n :param space: GPyOpt class of domain\n :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer\n :param cost_withGradients: function that provides the evaluation cost and its gradients\n\n \"\"\"\n\n # --- Set this line to true if analytical gradients are available\n analytical_gradient_prediction = False\n\n \n def __init__(self, model, space, optimizer, cost_withGradients=None, **kwargs):\n self.optimizer = optimizer\n super(AcquisitionMU, self).__init__(model, space, optimizer)\n \n # --- UNCOMMENT ONE OF THE TWO NEXT BITS\n \n # 1) THIS ONE IF THE EVALUATION COSTS MAKES SENSE\n #\n # if cost_withGradients == None:\n # self.cost_withGradients = constant_cost_withGradients\n # else:\n # self.cost_withGradients = cost_withGradients \n\n # 2) THIS ONE IF THE EVALUATION COSTS DOES NOT MAKE SENSE\n #\n # if cost_withGradients == None:\n # self.cost_withGradients = constant_cost_withGradients\n # else:\n # print('LBC acquisition does now make sense with cost. Cost set to constant.') \n # self.cost_withGradients = constant_cost_withGradients\n if cost_withGradients is not None:\n print('The set cost function is ignored! MU acquisition does not make sense with cost.') \n\n\n\n def _compute_acq(self,x):\n \n # --- DEFINE YOUR AQUISITION HERE (TO BE MAXIMIZED)\n #\n # Compute here the value of the new acquisition function. Remember that x is a 2D numpy array \n # with a point in the domanin in each row. f_acqu_x should be a column vector containing the \n # values of the acquisition at x.\n #\n m, s = self.model.predict(x) \n f_acqu = s\n return f_acqu\n\n \n \n def _compute_acq_withGradients(self, x):\n \n # --- DEFINE YOUR AQUISITION (TO BE MAXIMIZED) AND ITS GRADIENT HERE HERE\n #\n # Compute here the value of the new acquisition function. Remember that x is a 2D numpy array \n # with a point in the domanin in each row. f_acqu_x should be a column vector containing the \n # values of the acquisition at x. df_acqu_x contains is each row the values of the gradient of the\n # acquisition at each point of x.\n #\n # NOTE: this function is optional. If note available the gradients will be approxiamted numerically.\n m, s, dmdx, dsdx = self.model.predict_withGradients(x) \n f_acqu = s \n df_acqu = dsdx\n \n return f_acqu, df_acqu\n"
},
{
"alpha_fraction": 0.5660207271575928,
"alphanum_fraction": 0.5745831727981567,
"avg_line_length": 38.58928680419922,
"blob_id": "306e29dcb164b71235e0693d4b823e5f1cab61f1",
"content_id": "bafae152276f9674463e474b40d6c7c57c7b6c17",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2219,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 56,
"path": "/learner/mlearner.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "from sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\nimport numpy as np\n\n\nclass MLearner:\n\n def __init__(self, budget, ndim, power_model):\n self.budget = budget\n self.degree = ndim\n self.model = power_model\n\n def discover(self, observation_noise_level=0):\n # performance models has interaction degree of two, based on our study\n model = Pipeline([(\"poly\", PolynomialFeatures(degree=2, interaction_only=True, include_bias=True)),\n (\"linear\", LinearRegression(fit_intercept=True))])\n\n # take some ran dom samples\n # this should be replaced with pair wise sampling\n X = np.random.randint(2, size=(self.budget, self.degree))\n y = self.model.evaluateModelFast(X)\n if observation_noise_level != 0:\n #print(\"Noise observation: N(0, {})\".format(observation_noise_level))\n y = y + np.random.normal(\n loc=0.0,\n scale=observation_noise_level,\n size=self.budget)\n\n # fit the polynomial model regression\n pmodel = model.fit(X, y)\n\n return pmodel\n\n def get_pareto_frontier(self, Xs, Ys, maxX=True, maxY=True):\n # Sort the list in either ascending or descending order of X\n myList = sorted([[Xs[i], Ys[i]] for i in range(len(Xs))], reverse=maxX)\n idx_sorted = sorted(range(len(Xs)), key=lambda k: Xs[k])\n # Start the Pareto frontier with the first value in the sorted list\n p_front = [myList[0]]\n i = 0\n pareto_idx = [idx_sorted[i]]\n # Loop through the sorted list\n for pair in myList[1:]:\n i += 1\n if maxY:\n if pair[1] >= p_front[-1][1]:\n p_front.append(pair)\n pareto_idx.append(idx_sorted[i])\n else:\n if pair[1] <= p_front[-1][1]:\n p_front.append(pair)\n pareto_idx.append(idx_sorted[i])\n p_frontX = [pair[0] for pair in p_front]\n p_frontY = [pair[1] for pair in p_front]\n return pareto_idx, p_frontX, p_frontY\n\n\n"
},
{
"alpha_fraction": 0.5985320806503296,
"alphanum_fraction": 0.6090520024299622,
"avg_line_length": 35.82432556152344,
"blob_id": "15cd17aa91c75087754fe0e782ce54af8bccb9cd",
"content_id": "465fbf5d4cec31e2637017a1e6c447cf8bed878c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8175,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 222,
"path": "/learner/learn.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\nimport json\n\n# the main for learning\n\nfrom learner.mlearner import MLearner\nfrom learner.model import genModelTermsfromString, Model, genModelfromCoeff\nfrom learner.ready_db import ReadyDB\nfrom learner.lib import *\nfrom learner.constants import AdaptationLevel\n\nfrom learner.tranlearner import TranLearner\n\nmodel_path = os.path.expanduser(\"~/catkin_ws/src/cp1_base/power_models/\")\nlearned_model_path = os.path.expanduser(\"~/cp1/\")\nconfig_list_file = os.path.expanduser('~/cp1/config_list.json')\nconfig_list_file_true = os.path.expanduser('~/cp1/config_list_true.json')\nready_json = os.path.expanduser(\"~/ready\")\nlearned_model_name = 'learned_model'\nused_budget_report_path = os.path.expanduser(\"~/cp1/used_budget\")\n\nndim = 20\ntest_size = 10000\nmu, sigma = 0, 0.1\nspeed_list = [0.15, 0.3, 0.6]\n\noffline_learning_budget_ratio = 0.2\n\nopIDs={}\nnumOfOptions=20\nfor i in range(numOfOptions):\n opIDs[\"o\"+str(i)]=i\n\n\n\nclass Learn:\n def __init__(self):\n self.ready = ReadyDB(ready_db=ready_json)\n self.budget = self.ready.get_budget()\n self.used_budget = 0\n self.model_name = self.ready.get_power_model()\n default_conf = np.concatenate((np.zeros(int(ndim/2)), np.ones(ndim-int(ndim/2))))\n self.default_conf = np.reshape(default_conf, (1, ndim))\n self.learned_model_filepath = os.path.join(learned_model_path, learned_model_name)\n self.true_model_filepath = os.path.join(model_path, self.model_name)\n self.config_list_file = config_list_file\n self.config_list_file_true = config_list_file_true\n self.true_power_model = None\n self.learned_power_model = None\n self.learned_model = None\n self.learner = None\n \n\n def get_true_model(self):\n try:\n with open(self.true_model_filepath, 'r') as model_file:\n model_txt = model_file.read()\n\n power_model_terms = genModelTermsfromString(model_txt)\n self.true_power_model = Model(power_model_terms, ndim)\n print(\"The true model: {0}\".format(self.true_power_model.__str__()))\n return self.true_power_model\n except Exception as e:\n raise Exception(e)\n\n # For case c and case d (offline learning)\n def start_learning(self, observation_noise_level=0):\n\n # learn the model\n try:\n if self.ready.get_baseline() == AdaptationLevel.BASELINE_C:\n self.learner = MLearner(self.budget, ndim, self.true_power_model)\n self.learned_model = self.learner.discover(observation_noise_level=observation_noise_level)\n self.used_budget = self.budget\n elif self.ready.get_baseline() == AdaptationLevel.BASELINE_D:\n self.learner = TranLearner(self.budget, ndim, self.true_power_model)\n self.learned_model = self.learner.offline_learning()\n self.used_budget = self.learner.used_budget\n\n with open(used_budget_report_path, \"w\") as fp:\n fp.write(str(self.used_budget))\n except Exception as e:\n raise Exception(e)\n\n # For case d\n def start_online_learning(self):\n try:\n self.learned_model = self.learner.online_learning()\n self.used_budget = self.learner.used_budget\n with open(used_budget_report_path, \"w\") as fp:\n fp.write(str(self.used_budget))\n except Exception as e:\n raise Exception(e)\n\n # For case c\n def dump_learned_model(self):\n \n \"\"\"dumps model in ~/cp1/\"\"\"\n\n try:\n learned_power_model_terms = genModelfromCoeff(self.learned_model.named_steps['linear'].coef_, ndim)\n self.learned_power_model = Model(learned_power_model_terms, ndim)\n except Exception as e:\n raise Exception(e)\n\n print(\"The learned model: {0}\".format(self.learned_power_model.__str__()))\n\n with open(self.learned_model_filepath, 'w') as model_file:\n model_file.write(self.learned_power_model.__str__())\n\n\n def update_config_files(self):\n\n # configs = itertools.product(range(2), repeat=ndim)\n # xTest = np.zeros(shape=(2**ndim, ndim))\n # i = 0\n # for c in configs:\n # xTest[i, :] = np.array(c)\n # i += 1\n\n test_size = 10000\n\n xTest = np.random.randint(2, size=(test_size, ndim))\n\n for i in range(test_size):\n if np.count_nonzero(xTest[i, :]) == 0:\n xTest = np.delete(xTest, i, 0)\n break\n\n # to avoid negative power load\n if self.ready.get_baseline() == AdaptationLevel.BASELINE_C: \n yTestPower = abs(self.learned_model.predict(xTest))\n if self.ready.get_baseline() == AdaptationLevel.BASELINE_D:\n predY, predYStd = self.learned_model.predict(xTest, with_noise=False)\n predY = np.ravel(predY)\n predYStd = np.ravel(predYStd)\n\n halfIntervals = 1.729*predYStd\n goodIndices = np.where(predY>halfIntervals)\n\n xTest = xTest[goodIndices]\n test_size = xTest.shape[0]\n \n predY = predY[goodIndices]\n predYStd = predYStd[goodIndices]\n yTestPower = predY\n \n \n yTestPower_true = self.true_power_model.evaluateModelFast(xTest)\n\n # adding noise for the speed\n s = np.random.uniform(mu, sigma, test_size)\n\n yTestSpeed = np.zeros(test_size)\n for i in range(test_size):\n yTestSpeed[i] = speed_list[i % len(speed_list)]\n\n yTestSpeed = yTestSpeed + s\n\n if self.ready.get_baseline() == AdaptationLevel.BASELINE_C:\n yDefaultPower = abs(self.learned_model.predict(self.default_conf))\n elif self.ready.get_baseline() == AdaptationLevel.BASELINE_D:\n defaultPredY, defaultPredYVar = self.learned_model.predict(self.default_conf, with_noise=False)\n yDefaultPower = defaultPredY\n yDefaultPower = abs(np.ravel(yDefaultPower))\n\n yDefaultPower_true = self.true_power_model.evaluateModelFast(self.default_conf)\n yDefaultSpeed = speed_list[2]\n\n idx_pareto, pareto_power, pareto_speed = self.learner.get_pareto_frontier(yTestPower, yTestSpeed, maxX=False, maxY=True)\n\n json_data = get_json(pareto_power, pareto_speed)\n\n json_data_true_model = get_json([yTestPower_true[i] for i in idx_pareto], [yTestSpeed[i] for i in idx_pareto])\n\n # add the default configuration\n json_data['configurations'].append({\n 'config_id': 0,\n 'power_load': yDefaultPower[0]/3600*1000,\n 'power_load_w': yDefaultPower[0],\n 'speed': yDefaultSpeed\n })\n with open(self.config_list_file, 'w') as outfile:\n json.dump(json_data, outfile)\n print(\"\\n**Predicted**\")\n print(json_data)\n\n json_data_true_model['configurations'].append({\n 'config_id': 0,\n 'power_load': yDefaultPower_true[0]/3600*1000,\n 'power_load_w': yDefaultPower_true[0],\n 'speed': yDefaultSpeed\n })\n with open(config_list_file_true, 'w') as outfile:\n json.dump(json_data_true_model, outfile)\n print(\"\\n**True**\")\n print(json_data_true_model)\n\n\n\n def dump_true_default_config(self):\n '''\n Write the default config based on the true model for case A and case B.\n Write into config_list_true.json\n '''\n\n yDefaultPower_true = self.true_power_model.evaluateModelFast(self.default_conf)\n yDefaultSpeed = speed_list[2]\n json_data_true_model = {}\n json_data_true_model['configurations'] = []\n json_data_true_model['configurations'].append({\n 'config_id': 0,\n 'power_load': yDefaultPower_true[0]/3600*1000,\n 'power_load_w': yDefaultPower_true[0],\n 'speed': yDefaultSpeed\n })\n with open(self.config_list_file_true, 'w') as outfile:\n json.dump(json_data_true_model, outfile)\n\n def has_budget(self):\n return self.budget > self.used_budget\n"
},
{
"alpha_fraction": 0.5896739363670349,
"alphanum_fraction": 0.625,
"avg_line_length": 16.5238094329834,
"blob_id": "3a54714c778cf86ffbfbb23dbc95214eb07e1680",
"content_id": "af7b5ca0cbb2862f2ea44586f747beb910f13546",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 21,
"path": "/learner/constants.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\n\nclass AdaptationLevel(Enum):\n BASELINE_A = 1\n BASELINE_B = 2\n BASELINE_C = 3\n BASELINE_D = 4\n\n\n\nclass Status(Enum):\n LEARNING_STARTED = 1\n LEARNING_DONE = 2\n ADAPT_STARTED = 3\n ADAPT_DONE = 4\n CHARGING_STARTED = 5\n CHARGING_DONE = 6\n AT_WAYPOINT = 7\n ONLINE_LEARNING_STARTED = 8\n ONLINE_LEARNING_DONE = 9\n"
},
{
"alpha_fraction": 0.6459695100784302,
"alphanum_fraction": 0.6677560210227966,
"avg_line_length": 24.5,
"blob_id": "37efd217bc519f7722f87e4ddaeef47a140f19d8",
"content_id": "ba40716e24e16cb05c22ce9456e42aca04fa5c91",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 918,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 36,
"path": "/learner/genmodels.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "from learner.model import genModel, Model, Term\nimport os\nimport numpy as np\n\nmodel_path = os.path.expanduser(\"~/cp1/models/temp\")\nmodel_name = 'model'\n\nscale = 100.0\nN = 100\nndim = 20\nmax_coeff = 10\n\nopt = 20\ninteractions = list(range(1, N+1))\n\nxTest = np.ones(shape=(1, ndim))\n\nidx = 0\nfor interaction in interactions:\n model_terms = genModel(opt, interaction, max_coeff)\n model = Model(model_terms, ndim)\n max_val = model.evaluateModelFast(xTest)[0]\n\n # Normalize model\n model_terms_normalized = []\n for term in model_terms:\n new_coeff = term.coefficient/max_val*scale\n new_term = Term(new_coeff, term.options)\n model_terms_normalized.append(new_term)\n\n model_normalized = Model(model_terms_normalized, ndim)\n\n model_txt = model_normalized.__str__()\n with open(os.path.join(model_path, model_name + str(idx)), 'w') as file:\n file.write(model_txt)\n idx += 1\n"
},
{
"alpha_fraction": 0.5909860730171204,
"alphanum_fraction": 0.597854733467102,
"avg_line_length": 44.405982971191406,
"blob_id": "9c7dcc9631145d800aa7a5d621ede0f2608be0f0",
"content_id": "582c1af64718981eeb4c012515dd6c20534e11ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10632,
"license_type": "permissive",
"max_line_length": 180,
"num_lines": 234,
"path": "/learner/DQN_learner.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "import os\nimport random\nimport numpy as np\nfrom keras import backend as K\n\n\n\nclass DQNLearner:\n def __init__(self, DQNAgent, system, resultDir, isDebug = True, memoryInitSize = 10000, optimizationType=\"min\"):\n self.agent = DQNAgent\n self.system = system\n self.optimizationType = optimizationType\n self.memoryInitSize = memoryInitSize\n self.resultDir = resultDir\n self.DQNModelFilepath = os.path.join(resultDir, 'DQN_model.h5')\n self.isDebug = isDebug\n\n\n # Input:\n # config: the input to the DQN. config[0][0] is the list of option values \n def evaluateConfig(self, config, testSystem):\n sysConfig = config[0][0]\n perf=testSystem.measure(sysConfig)\n return perf\n\n # Input:\n # action: list of options to flip. So far, only one option to filp\n def createNewConfig(self, config, action):\n newConfig = [value for value in config[0][0]]\n #assert len(action)==1\n #opID = action[0]\n for opID in action:\n newConfig[opID] = (newConfig[opID]+1)%2\n newConfig=np.expand_dims([newConfig], axis=0)\n return newConfig\n\n # Use the performance gap to represent the reward\n def rewardCal(self, oldPerf, newPerf):\n if self.optimizationType == \"max\":\n return round(newPerf - oldPerf, 6)\n else: # minize system performance\n return round(oldPerf - newPerf, 6)\n\n # [Used in Debug Mode only]\n # Assume the # of actions equals to the # of options\n # Measuing the performnace of the each configuration resulting from one action by flipping one option\n def measureAllActions(self, state, numberOfActions):\n perfsOfEachAction=[]\n for i in range(numberOfActions):\n tempConfig=self.createNewConfig(state, [i])\n perfsOfEachAction.append(self.evaluateConfig(tempConfig, self.system))\n return perfsOfEachAction\n\n # Calculate the relative performance gap between the optimal action and the selected action\n def calActionError(self, actualPerf, perfsOfEachAction):\n if self.optimizationType == \"max\":\n bestPerf = max(perfsOfEachAction)\n if bestPerf == 0:\n return -actualPerf\n else:\n return 1.0*(bestPerf-actualPerf)/bestPerf\n else:\n bestPerf = min(perfsOfEachAction)\n if bestPerf == 0:\n return actualPerf\n else:\n return 1.0*(actualPerf-bestPerf)/bestPerf\n\n # state represents a configuration: state[0][0] is a list of option values\n def transition(self, state, action, testSystem):\n # new configuration is the new state of the DQN agent\n newConfig=self.createNewConfig(state, action)\n newPerf=self.evaluateConfig(newConfig, testSystem)\n return newConfig, newPerf\n\n # create a random configuration whose dimensionality is (1, 1, self.numOfOptions)\n def getARandomConfig(self):\n config=np.array([])\n for i in range(self.system.numOfOptions):\n config=np.append(config, random.choice([0, 1]))\n \n config=np.expand_dims([config], axis=0)\n return config\n\n # use testSystem to initialize Replay Memory\n def initializeReplayMemory(self, testSystem):\n print(\"[DQN - initialize Replay Memory]\")\n memoryCnt = 0\n while memoryCnt < self.memoryInitSize:\n state=self.getARandomConfig()\n perf=self.evaluateConfig(state, testSystem)\n action = random.sample(range(self.agent.actionSize), self.agent.numOfOptionsToChangePerAction)\n nextState, newPerf = self.transition(state, action, testSystem)\n reward = self.rewardCal(perf, newPerf)\n if self.agent.remember(state, action, reward, nextState):\n memoryCnt += 1 # it is a new experience\n if self.isDebug:\n print(\"SampleID {0:5d} : Reward - {1}.\".format(memoryCnt, reward))\n \n\n # DQN agent learns the good/optimal policies in the system\n def learning(self, totalIters = 1000, iterRatioFirstEpoch = 0.1, iterRatioLastEpoch = 0.05, numEpochs = 50):\n print(\"[DQN - learning]\")\n # set up learning strategy \n itersFirstEpoch = int(totalIters*iterRatioFirstEpoch)\n itersLastEpoch = int(totalIters*iterRatioLastEpoch)\n itersPerMidEpoch = int((totalIters - itersFirstEpoch - itersLastEpoch)/(numEpochs-2))\n if itersPerMidEpoch <= 0:\n msg = \"[Error - Invalid Learning Strategy]: itersPerMidEpoch ({}) is not positive.\\n\".format(itersPerMidEpoch)\n msg += \" totalIters-({}), iterRatioFirstEpoch-({}), iterRatioLastEpoch-({}).\".format(\n totalIters, iterRatioFirstEpoch, iterRatioLastEpoch)\n raise Exception(msg)\n ## update the iteractions in the last epoch\n itersLastEpoch = totalIters - itersFirstEpoch - itersPerMidEpoch*(numEpochs-2)\n\n ## iterations per epoch\n itersPerEpoch = [itersFirstEpoch] + [itersPerMidEpoch]*(numEpochs-2) + [itersLastEpoch]\n print(\"Iterations Per Epoch: \"+str(itersPerEpoch))\n\n # set learning rate for each epoch\n lrDecay=-1.0*(self.agent.learningRateMax - self.agent.learningRateMin)/(numEpochs-1)\n lrs = np.arange(self.agent.learningRateMax, self.agent.learningRateMin, lrDecay)\n lrs = [round(lr,4) for lr in lrs]\n lrs = lrs + [self.agent.learningRateMin]\n\n ## set epsilon for each epoch\n epsilonDecayStep = round(-1.0*(self.agent.epsilonMax - self.agent.epsilonMin)/(numEpochs-1), 4)\n epsilons = np.arange(self.agent.epsilonMax, self.agent.epsilonMin, epsilonDecayStep)\n epsilons = [round(ep, 4) for ep in epsilons]\n epsilons = epsilons + [self.agent.epsilonMin]\n\n\n if self.isDebug:\n avePerfs=[]\n aveRewards=[]\n allRewards=[]\n # averaged PGPs\n # PGP: percentage of the performance gap over the best-actioned performance in each iteration\n # performance gap: <actually-actioned performance> - <best-actioned performance>\n avePGPs=[]\n\n allPerfs = []\n usePolicy = False\n # start with a random state (configuration)\n state=self.getARandomConfig()\n perf=self.evaluateConfig(state, self.system)\n\n # DQN learning process\n for e in range(numEpochs):\n print(\"DQN learning: Epoch \" + str(e))\n if self.isDebug:\n currentEpochRewards=[]\n sumPGPs = 0\n\n currentEpochPerfs=[]\n numIters = itersPerEpoch[e]\n self.agent.setEpsilon(epsilons[e])\n lr = lrs[e]\n K.set_value(self.agent.optimizer.lr, lr)\n for i in range(numIters):\n usePolicy, action = self.agent.act(state)\n # Only pick the top action\n #assert len(action)==1\n if self.agent.explorationBonus != 0: # exploration function is enabled\n self.agent.incrementExplorationCount(state, action)\n nextState, newPerf = self.transition(state, action, self.system)\n reward = self.rewardCal(perf, newPerf)\n self.agent.remember(state, action, reward, nextState)\n\n perf = newPerf\n currentEpochPerfs.append(perf)\n\n if self.isDebug:\n perfsOfEachAction=self.measureAllActions(state, self.system.numOfOptions)\n # percent gap between performances of configurations\n # derived by the best action and the actually selected action \n perfGapPercent=self.calActionError(newPerf, perfsOfEachAction)\n sumPGPs += perfGapPercent\n\n print(\"UsePolicy: {}, Epoch: {}, Iteration: {}, Epsilon: {}, LearningRate: {}, Perf: {}, Reward: {}\".format(usePolicy, e, i, epsilons[e], lr, perf, reward)) \n currentEpochRewards.append(reward)\n\n state = nextState\n # update the prediction network\n self.agent.replay()\n\n allPerfs.extend(currentEpochPerfs)\n self.agent.updateTargetModel()\n\n\n if self.isDebug:\n avePGPs.append(round(sumPGPs/numIters, 4))\n allRewards.extend(currentEpochRewards)\n with open(os.path.join(self.resultDir, \"performence_in_epoch_\"+str(e)), \"w+\") as f:\n currentAvePerf=0.0\n for p in currentEpochPerfs:\n f.write(str(p)+\"\\n\")\n currentAvePerf+=p\n currentAvePerf=round(currentAvePerf/len(currentEpochPerfs), 6)\n avePerfs.append(currentAvePerf)\n\n with open(os.path.join(self.resultDir, \"reward_in_epoch_\"+str(e)), \"w+\") as f:\n currentAveReward=0.0\n for r in currentEpochRewards:\n f.write(str(r)+\"\\n\")\n currentAveReward+=r\n currentAveReward=round(currentAveReward/len(currentEpochRewards), 6)\n aveRewards.append(currentAveReward)\n\n\n if self.isDebug:\n # output final state (configuration)\n with open(os.path.join(self.resultDir, \"DQN_Learning_Summary.txt\"), \"w+\") as f:\n print(\"\\n===Writing summary===\")\n f.write(\"Epoch,\\tEpislon,\\tIterations,\\tAve Reward, \\tAve Perf, \\tAve PGP\\n\")\n print(\"Epoch,\\tEpislon,\\tIterations,\\tAve Reward, \\tAve Perf, \\tAve PGP\")\n epoch=1\n for epsilon, iters, aveR, avePerf, avePGP in zip(epsilons, itersPerEpoch, aveRewards, avePerfs, avePGPs):\n f.write(str(epoch)+\",\\t\"+str(epsilon)+\",\\t\"+str(iters)+\",\\t\"+str(aveR)+\",\\t\"+str(avePerf)+\",\\t\"+str(avePGP)+\"\\n\")\n print(str(epoch)+\",\\t\"+str(epsilon)+\",\\t\"+str(iters)+\",\\t\"+str(aveR)+\",\\t\"+str(avePerf)+\",\\t\"+str(avePGP))\n epoch+=1\n \n with open(os.path.join(self.resultDir, \"allPerfs.txt\"), \"w+\") as f:\n for p in allPerfs:\n f.write(str(p)+\"\\n\")\n with open(os.path.join(self.resultDir, \"allRewards.txt\"), \"w+\") as f:\n for r in allRewards:\n f.write(str(r)+\"\\n\")\n\n \n # Save the DQN predict model\n self.agent.predictModel.save(self.DQNModelFilepath)\n # Return the performance of all sampled configurations\n return allPerfs\n\n\n\n"
},
{
"alpha_fraction": 0.8363636136054993,
"alphanum_fraction": 0.8363636136054993,
"avg_line_length": 5.111111164093018,
"blob_id": "0700bafca9caa7fa6ff1984f6209afde119e000b",
"content_id": "2e042e81cc1bf13ba9a81c0f940312dd3f699d48",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 7,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "nose\nsphinx\nsklearn\npyDOE\nsympy\nscipy\nnumpy\nGPy\nGPyOpt\n"
},
{
"alpha_fraction": 0.668677031993866,
"alphanum_fraction": 0.6783010363578796,
"avg_line_length": 45.93373489379883,
"blob_id": "2aa166df27206a86b15db6058eba2d1247d26ce8",
"content_id": "ba634d9fb0e67d3c447f363459f6bd2a84f4de36",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7793,
"license_type": "permissive",
"max_line_length": 158,
"num_lines": 166,
"path": "/learner/DQN_agent.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "import random\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Reshape\nfrom keras.layers import Dense\nfrom keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D, AveragePooling1D\nfrom keras.layers import Flatten\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom keras.utils import print_summary as modelSummary\n\nimport tensorflow as tf\n\n\n\nclass DQNAgent:\n def __init__(self, optionSize, actionSize, explorationBonus, updateBatchSize, useDoubleDQN=True, \n numOfOptionsToChangePerAction=1, numOfOptionsToChangeByGreedyPerAction=1, actionType=\"e-greedy\"):\n self.optionSize = optionSize\n self.actionSize = actionSize\n self.explorationBonus = explorationBonus\n self.updateBatchSize = updateBatchSize\n self.useDoubleDQN = useDoubleDQN\n self.exploredSAs = {} # a dictionary that tracks the exploration number of a state folloiwng an action. (state, action) : counts\n self.memory = {} # (state, action, nextState) : reward\n self.gamma = 0.95 # discount rate\n self.learningRateMax = 0.7\n self.learningRateMin = 0.1\n self.epsilonMax = 1.0\n self.epsilonMin = 0.1\n self.epsilon = 1.0 # used by epsilon-greedy exploration approach\n self.optimizer = Adam(lr=self.learningRateMax)\n self.alwaysRandomAction = True if actionType==\"random\" else False\n self.numOfOptionsToChangePerAction = numOfOptionsToChangePerAction\n self.numOfOptionsToChangeByGreedyPerAction = numOfOptionsToChangeByGreedyPerAction\n assert self.numOfOptionsToChangeByGreedyPerAction <= self.numOfOptionsToChangePerAction\n assert actionSize >= self.numOfOptionsToChangePerAction\n\n\n self.predictModel = self.buildModelCNN()\n self.targetModel = self.buildModelCNN()\n self.updateTargetModel()\n\n \"\"\"Huber loss for Q Learning\n References: https://en.wikipedia.org/wiki/Huber_loss\n https://www.tensorflow.org/api_docs/python/tf/losses/huber_loss\n \"\"\"\n def huberLoss(self, yTrue, yPred, clipDelta=1.0):\n error = yTrue - yPred\n cond = K.abs(error) <= clipDelta\n\n squaredLoss = 0.5 * K.square(error)\n quadraticLoss = 0.5 * K.square(clipDelta) + clipDelta * (K.abs(error) - clipDelta)\n\n return K.mean(tf.where(cond, squaredLoss, quadraticLoss))\n\n def buildModelCNN(self):\n # Neural Net for Deep-Q learning Model\n model = Sequential()\n\n model.add(Reshape((self.optionSize, 1), input_shape=(1, self.optionSize)))\n model.add(Conv1D(32, 8, activation='relu', input_shape=(self.optionSize,1)))\n model.add(Conv1D(64, 4, activation='relu'))\n model.add(Conv1D(64, 3, activation='relu'))\n model.add(AveragePooling1D(4))\n model.add(Flatten())\n model.add(Dense(512, activation='relu'))\n model.add(Dense(self.actionSize, activation='relu'))\n\n model.compile(loss=self.huberLoss, optimizer=self.optimizer)\n return model\n\n def get_Q_values(self, state):\n qValues=self.predictModel.predict(state)\n return qValues[0]\n\n def updateTargetModel(self):\n # copy weights from model to targetModel\n self.targetModel.set_weights(self.predictModel.get_weights())\n\n # If the current experience is already in the memory, return False.\n # Otherwise, add the experience into the memory, and return True.\n def remember(self, state, action, reward, nextState):\n policy = (tuple(state[0][0]), tuple(action), tuple(nextState[0][0]))\n if policy not in self.memory:\n self.memory[policy] = reward\n return True\n else:\n return False\n\n def act(self, state):\n if self.alwaysRandomAction:\n return False, random.sample(range(self.actionSize), self.numOfOptionsToChangePerAction)\n else:\n if np.random.rand() <= self.epsilon:\n return False, random.sample(range(self.actionSize), self.numOfOptionsToChangePerAction)\n else:\n return True, self.determineAction(state)\n\n # Use the prediction network\n def determineAction(self, state):\n qValues = self.predictModel.predict(state)\n\n # Flip a few options that correspond to top q-values and a few randomly-selected options\n greedyChangeOpIndices=np.argpartition(qValues[0], range(-self.numOfOptionsToChangeByGreedyPerAction, 0))[-self.numOfOptionsToChangeByGreedyPerAction:]\n greedyChangeOpIndices=np.flip(greedyChangeOpIndices) # list(reversed(greedyChangeOpIndices.tolist()))\n leftOptionList=[x for x in range(self.actionSize) if x not in greedyChangeOpIndices]\n randomChanges=random.sample(leftOptionList, self.numOfOptionsToChangePerAction - self.numOfOptionsToChangeByGreedyPerAction)\n\n return greedyChangeOpIndices.tolist() + randomChanges\n\n # Input: record is a tuple, (state, action) where action is a list of options's numerical IDs\n def incrementExplorationCount(self, state, action):\n hashableState=tuple(state[0][0].tolist())\n hashableAction=tuple(action)\n record=(hashableState, hashableAction)\n if record in self.exploredSAs:\n self.exploredSAs[record]=self.exploredSAs[record]+1\n else:\n # for a record that does not exist in the dict, its count value is 1.\n # This helps with the DivisidedByZero issue when calculing the actual exploration bonus.\n self.exploredSAs[record]=2\n\n # Input: qValues: 1-dimentional tuple / list\n # Output: augmented qValues - using the exploration funciton, f(u, N) = u + k/N\n def explorationFunction(self, state, qValues):\n augQValues=[qv for qv in qValues]\n hashableState=tuple(state[0][0].tolist())\n for actionID in range(self.actionSize):\n N=1\n if (hashableState, actionID) in self.exploredSAs:\n N=self.exploredSAs[(hashableState, actionID)]\n augQValues[actionID]=augQValues[actionID]+1.0*self.explorationBonus/N\n return augQValues\n\n def replay(self):\n minibatch = random.sample(self.memory.items(), self.updateBatchSize)\n \n for policy, reward in minibatch:\n stateTuple, actionTuple, nextStateTuple = policy\n state=np.expand_dims([stateTuple], axis=0)\n nextState=np.expand_dims([nextStateTuple], axis=0)\n\n preNetQValues = self.predictModel.predict(state)\n targetNetQValues = self.targetModel.predict(nextState)[0]\n # use exploration function to add exploration bonus\n if self.explorationBonus != 0:\n targetNetQValues = np.array(self.explorationFunction(nextState, targetNetQValues))\n\n futureQVs=None\n if self.useDoubleDQN: # deal with overestimation of q-values. (https://papers.nips.cc/paper/3964-double-q-learning)\n # use DQN prediction network to determine the action for the nextState\n actionForNextState = self.determineAction(nextState)\n # use DQN target network to calculate the q values of taking the above action\n futureQVs = targetNetQValues[np.array(actionForNextState)]\n else: # Vanila DQN with fixed target network\n # pick the maximum value from the target network\n topK = len(actionTuple)\n topKQVs = targetNetQValues[np.argpartition(targetNetQValues, range(-topK, 0))[-topK:]]\n futureQVs = np.flip(topKQVs)\n\n preNetQValues[0][np.array(actionTuple)] = reward + self.gamma * futureQVs\n self.predictModel.fit(state, preNetQValues, epochs=1, verbose=0)\n\n def setEpsilon(self, epsilon):\n self.epsilon = epsilon\n\n\n"
},
{
"alpha_fraction": 0.5221579670906067,
"alphanum_fraction": 0.539499044418335,
"avg_line_length": 21.565217971801758,
"blob_id": "3da5d7b26f8e6ab28a4e8f87bc95a3360b45afa1",
"content_id": "dbc55caadd006bf0a2920177146aabe9c15f8842",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 519,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 23,
"path": "/learner/lib.py",
"repo_name": "cmu-mars/model-learner",
"src_encoding": "UTF-8",
"text": "def is_number(str):\n try:\n # for int, long, float and complex\n complex(str)\n except ValueError:\n return False\n\n return True\n\n\ndef get_json(pareto_power, pareto_speed):\n\n data = {}\n data['configurations'] = []\n for i in range(len(pareto_power)):\n data['configurations'].append({\n 'config_id': i + 1,\n 'power_load': pareto_power[i]/3600*1000,\n 'power_load_w': pareto_power[i],\n 'speed': pareto_speed[i]\n })\n\n return data\n"
}
] | 16 |
vipghn4/speech_recognition_app
|
https://github.com/vipghn4/speech_recognition_app
|
5e730673ef8a3b828c16992ba6113365683c97d2
|
991a20f154d926e9199748ae8f484af91da4c6d2
|
5dc4faf22f3d4fab83feb13a759826ab473904b3
|
refs/heads/master
| 2020-06-02T19:27:55.805288 | 2019-06-12T02:04:57 | 2019-06-12T02:04:57 | 191,282,715 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6411408185958862,
"alphanum_fraction": 0.6667415499687195,
"avg_line_length": 36.07692337036133,
"blob_id": "40fde9b46a2989ffd3b183a038afc7ff94b489f1",
"content_id": "c82e44a97f6140d826486afa8d9c87a0ce08a41e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4453,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 117,
"path": "/mediaplayer.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "import os, sys, glob\r\nimport time\r\nfrom threading import Thread\r\n\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtMultimedia import *\r\nfrom PyQt5.QtMultimediaWidgets import *\r\n\r\nfrom main import launch_ai2thor, get_controllers\r\nfrom MainWindow import Ui_MainWindow\r\n\r\n# Kitchens: FloorPlan1 - FloorPlan30\r\n# Living rooms: FloorPlan201 - FloorPlan230\r\n# Bedrooms: FloorPlan301 - FloorPlan330\r\n# Bathrooms: FloorPLan401 - FloorPlan430\r\nFLOOR_CODES = list(range(1, 31)) + list(range(201, 231)) + list(range(301, 331)) + list(range(401, 431))\r\nFLOOR_CODES = [str(x) for x in FLOOR_CODES]\r\n\r\nclass ViewerWindow(QMainWindow):\r\n state = pyqtSignal(bool)\r\n\r\n def closeEvent(self, e):\r\n # Emit the window state, to update the viewer toggle button.\r\n self.state.emit(False)\r\n\r\n\r\nclass PlaylistModel(QAbstractListModel):\r\n def __init__(self, playlist, *args, **kwargs):\r\n super(PlaylistModel, self).__init__(*args, **kwargs)\r\n self.playlist = playlist\r\n\r\n def data(self, index, role):\r\n if role == Qt.DisplayRole:\r\n media = self.playlist.media(index.row())\r\n return media.canonicalUrl().fileName()\r\n\r\n def rowCount(self, index):\r\n return self.playlist.mediaCount()\r\n\r\n\r\nclass MainWindow(QMainWindow, Ui_MainWindow):\r\n def __init__(self, *args, **kwargs):\r\n super(MainWindow, self).__init__(*args, **kwargs)\r\n self.setupUi(self)\r\n\r\n self.player = QMediaPlayer()\r\n\r\n self.player.play()\r\n\r\n self.playlist = QMediaPlaylist()\r\n self.player.setPlaylist(self.playlist)\r\n\r\n self.viewer = ViewerWindow(self)\r\n self.viewer.setWindowFlags(self.viewer.windowFlags() | Qt.WindowStaysOnTopHint)\r\n self.viewer.setMinimumSize(QSize(480,360))\r\n\r\n videoWidget = QVideoWidget()\r\n self.viewer.setCentralWidget(videoWidget)\r\n self.player.setVideoOutput(videoWidget)\r\n \r\n self.playButton.pressed.connect(self.play_ai2thor)\r\n\r\n self.model = PlaylistModel(self.playlist)\r\n # self.playlistView.setModel(self.model)\r\n # selection_model = self.playlistView.selectionModel()\r\n\r\n self.open_floor_action.triggered.connect(self.open_floor)\r\n self.setAcceptDrops(True)\r\n\r\n self.show()\r\n \r\n def play_ai2thor(self):\r\n print(f\"Playing AI2Thor - Floor plan code: {self.current_floor}\\n\")\r\n self.transDestTextEdit.setPlainText(\"\")\r\n self.mainDestTextEdit.setPlainText(\"\")\r\n self.mainDestTextEdit.insertPlainText(\"Logging into AI2Thor ...\\n\")\r\n self.mainDestTextEdit.insertPlainText(f\"Logging into floor {self.current_floor} ...\\n\")\r\n \r\n ai2thor_thread = Thread(target=launch_ai2thor, args=(self.current_floor, self.mainDestTextEdit, self.transDestTextEdit))\r\n ai2thor_thread.daemon = True\r\n ai2thor_thread.start()\r\n \r\n def open_floor(self):\r\n floor_code, ok = QInputDialog.getItem(self, \"Select floor\", \"List of floors\", FLOOR_CODES, 0, False)\r\n \r\n if ok and floor_code:\r\n self.current_floor = floor_code\r\n self.floorCodeDisplayer.setText(f\"Floor plan {self.current_floor}\")\r\n self.mainDestTextEdit.insertPlainText(f\"Choose floor {self.current_floor}\\n\")\r\n\r\nif __name__ == '__main__':\r\n app = QApplication([])\r\n app.setApplicationName(\"Failamp\")\r\n app.setStyle(\"Fusion\")\r\n\r\n # Fusion dark palette from https://gist.github.com/QuantumCD/6245215.\r\n palette = QPalette()\r\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\r\n palette.setColor(QPalette.WindowText, Qt.white)\r\n palette.setColor(QPalette.Base, QColor(25, 25, 25))\r\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\r\n palette.setColor(QPalette.ToolTipBase, Qt.white)\r\n palette.setColor(QPalette.ToolTipText, Qt.white)\r\n palette.setColor(QPalette.Text, Qt.white)\r\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\r\n palette.setColor(QPalette.ButtonText, Qt.white)\r\n palette.setColor(QPalette.BrightText, Qt.red)\r\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\r\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\r\n palette.setColor(QPalette.HighlightedText, Qt.black)\r\n app.setPalette(palette)\r\n app.setStyleSheet(\"QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }\")\r\n\r\n window = MainWindow()\r\n app.exec_()"
},
{
"alpha_fraction": 0.4117647111415863,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 6.5,
"blob_id": "1f0cdca16bac6044388e203769b519732c47a3ed",
"content_id": "95cc55ea3a6c63054ea59057bdf0883eaa0b9a0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 17,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "PyQt5>=5.6\r\nsip\r\n"
},
{
"alpha_fraction": 0.595575213432312,
"alphanum_fraction": 0.6035398244857788,
"avg_line_length": 34.51612854003906,
"blob_id": "229193e39613490ca5f44041574077045c20aff2",
"content_id": "51e9b1e364d27db59a1a9ef966d24ecbc7e6da42",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1130,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 31,
"path": "/API_server/recognizer/recognizer.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "# Reference: https://realpython.com/python-speech-recognition/\r\n\r\nimport os, sys, glob\r\nimport speech_recognition as sr\r\n\r\nclass SpeechRecognizer:\r\n def __init__(self):\r\n self.r = sr.Recognizer()\r\n \r\n def _recognize(self, audio):\r\n try:\r\n text = self.r.recognize_google(audio)\r\n return text\r\n except sr.UnknownValueError:\r\n print(\"The recognizer could not understand audio\")\r\n except sr.RequestError as e:\r\n print(\"Could not request results from server service; {0}\".format(e))\r\n return \"\"\r\n \r\n def transcribe(self, path, offset=None, duration=None, savepath=None):\r\n with sr.AudioFile(path) as source:\r\n audio = self.r.record(source, offset=offset, duration=duration)\r\n text = self._recognize(audio)\r\n if text is not None and savepath is not None:\r\n with open(savepath, 'w') as f:\r\n f.write(text)\r\n return text\r\n\r\nif __name__ == \"__main__\":\r\n recognizer = SpeechRecognizer()\r\n text = recognizer.transcribe(\"sample_data/OSR_us_000_0012_8k.wav\", savepath=\"test.txt\")"
},
{
"alpha_fraction": 0.6115108132362366,
"alphanum_fraction": 0.6208633184432983,
"avg_line_length": 29.228260040283203,
"blob_id": "6305972a0fd0d13606fcb3ff25a4f00dc8e6c633",
"content_id": "a75b8016bab22b7ba68f88c2a6242315df561b07",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2780,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 92,
"path": "/API_server/app.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, render_template, send_from_directory, jsonify\nimport base64, binascii\nimport uuid\nimport os, sys, glob, time\nfrom threading import Thread\n\nfrom recognizer.recognizer import SpeechRecognizer\n\napp = Flask(__name__)\nrecognizer = SpeechRecognizer()\nqueue = []\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\ndef predict(id):\n global recognizer\n target = os.path.join(APP_ROOT, 'static/audio/')\n audio_path = \"/\".join([target, id, \"audio.wave\"])\n outfile = \"/\".join([target, id, \"result.txt\"])\n text = recognizer.transcribe(audio_path, savepath=outfile)\n return text\n\ndef serve_queue():\n global queue\n while True:\n if len(queue) > 0:\n id = queue[0]\n predict(id)\n del queue[0]\n print(f'Finished processing {id}')\n time.sleep(1)\n\[email protected](\"/\")\ndef main():\n return render_template('index.html')\n\[email protected](\"/upload\", methods=[\"POST\"])\ndef upload():\n global queue\n \n target = os.path.join(APP_ROOT, 'static/audio/')\n if not os.path.isdir(target):\n os.mkdir(target)\n upload = request.files.getlist(\"file\")[0]\n print(\"File name: {}\".format(upload.filename))\n filename = upload.filename\n \n ext = os.path.splitext(filename)[1]\n if (ext == \".wav\"):\n print(\"File accepted\")\n else:\n return render_template(\"error.html\", message=\"The selected file is not supported\"), 400\n \n id = str(uuid.uuid4())\n dir = \"/\".join([target, id])\n if not os.path.exists(dir):\n os.makedirs(dir)\n destination = \"/\".join([dir, \"audio.wave\"])\n print(\"File saved to to:\", destination)\n upload.save(destination)\n \n queue.append(id)\n return jsonify({\"id\": id, \"url-route\": \"/query/\"+id})\n\[email protected](\"/query/<id>\", methods=[\"GET\"])\ndef query(id):\n target = os.path.join(APP_ROOT, 'static/audio/')\n dir = \"/\".join([target, id])\n if not os.path.exists(dir):\n return render_template(\"error.html\", message=\"There's no audio uploaded\"), 400\n if id in queue:\n return render_template(\"error.html\", message=\"The uploaded audio is being processed\"), 400\n result_path = \"/\".join([dir, \"result.txt\"])\n with open(result_path, \"r\") as f:\n result = f.read()\n return render_template(\"processing.html\", dir=id, filename=\"audio.wave\", result=result)\n\[email protected]('/static/audio/<dir>/<filename>')\ndef send_audio(dir, filename):\n return send_from_directory(\"static/audio/\"+dir, filename)\n\nif __name__ == \"__main__\":\n predictor = Thread(target=serve_queue)\n server_app = Thread(target=app.run, kwargs={\n 'host': '0.0.0.0',\n 'port': 5000,\n 'threaded': True\n })\n predictor.start()\n server_app.start()\n predictor.join()\n server_app.join()"
},
{
"alpha_fraction": 0.61164790391922,
"alphanum_fraction": 0.6164788007736206,
"avg_line_length": 34.910892486572266,
"blob_id": "40bbdf979d6fef68aef7469c4a4a26d7871d93d6",
"content_id": "dae7a0e67ec072606be5d70d520271dc0ffb86f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3726,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 101,
"path": "/main.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "import os, glob, sys\r\nimport time\r\ntry:\r\n import ai2thor.controller\r\nexcept ModuleNotFoundError as e:\r\n pass\r\nelse:\r\n from core.third_party_camera import ThirdPartyCameraController\r\n from core.robot import RobotController\r\n\r\nfrom core.recorder import GoogleRecorder\r\nfrom core.analyzer import Analyzer\r\n\r\nSAVE_RECORD = True\r\nSAVE_DIR = \"recorded_audio\"\r\n\r\nQUIT_CODE = 1\r\nNORMAL_CODE = 0\r\nERROR_CODE = -1\r\n\r\ndef get_controllers(floor=\"FloorPlan1\"):\r\n controller = ai2thor.controller.Controller()\r\n controller.start()\r\n controller.reset(floor)\r\n event = controller.step(dict(action='Initialize', gridSize=0.25))\r\n\r\n robot_controller = RobotController(controller, event)\r\n thirdparty_camera_controller = ThirdPartyCameraController(controller, event)\r\n return controller, robot_controller, thirdparty_camera_controller\r\n\r\ndef control_robot(robot_controller, irecorder, ianalyzer, main_text_box, trans_text_box):\r\n transcript = irecorder.get_transcripted_audio(SAVE_RECORD, SAVE_DIR, main_text_box)\r\n if trans_text_box is not None:\r\n trans_text_box.insertPlainText(f\"Transcript: {transcript}\\n\")\r\n else:\r\n print(\"Transcribed text:\", transcript)\r\n \r\n if transcript is None:\r\n return ERROR_CODE\r\n\r\n action = ianalyzer.analyze_transcript(transcript)\r\n if trans_text_box is not None:\r\n trans_text_box.insertPlainText(f\"Action: {action.__str__()}\\n\")\r\n else:\r\n print(action)\r\n \r\n if robot_controller is None:\r\n return ERROR_CODE\r\n if action[\"action\"] == \"move\":\r\n if action[\"direction\"] == \"ahead\":\r\n for i in range(action[\"n_steps\"]):\r\n robot_controller.move('MoveAhead')\r\n elif action[\"direction\"] == \"back\":\r\n for i in range(action[\"n_steps\"]):\r\n robot_controller.move('MoveBack')\r\n elif action[\"direction\"] == \"left\":\r\n for i in range(action[\"n_steps\"]):\r\n robot_controller.move('MoveLeft')\r\n elif action[\"direction\"] == \"right\":\r\n for i in range(action[\"n_steps\"]):\r\n robot_controller.move('MoveRight')\r\n elif action[\"action\"] == \"rotate\":\r\n if action[\"direction\"] == \"up\":\r\n robot_controller.rotate_camera('LookUp')\r\n elif action[\"direction\"] == \"down\":\r\n robot_controller.rotate_camera('LookDown')\r\n elif action[\"direction\"] == \"left\":\r\n robot_controller.rotate_camera('RotateLeft', degree=action[\"degree\"])\r\n elif action[\"direction\"] == \"right\":\r\n robot_controller.rotate_camera('RotateRight', degree=action[\"degree\"])\r\n elif action[\"action\"] == \"quit\":\r\n return QUIT_CODE\r\n return NORMAL_CODE\r\n\r\ndef launch_ai2thor(floor_id, main_text_box, trans_text_box):\r\n try:\r\n robot_controller = get_controllers(f\"FloorPlan{floor_id}\")[1]\r\n print(\"Successfully initialized AI2Thor ...\")\r\n except:\r\n robot_controller = None\r\n print(\"Unsuccessfully initialized AI2Thor ...\")\r\n \r\n if SAVE_RECORD and not os.path.exists(SAVE_DIR):\r\n os.makedirs(SAVE_DIR)\r\n \r\n irecorder = GoogleRecorder()\r\n ianalyzer = Analyzer()\r\n while True:\r\n ret = control_robot(robot_controller, irecorder, ianalyzer, main_text_box, trans_text_box)\r\n if ret == QUIT_CODE or ret == ERROR_CODE:\r\n if main_text_box is not None:\r\n main_text_box.insertPlainText(\"Quit AI2Thor ...\\n\")\r\n else:\r\n print(\"Quit AI2Thor\")\r\n if robot_controller is not None:\r\n robot_controller.controller.stop()\r\n break\r\n time.sleep(1)\r\n\r\nif __name__ == \"__main__\":\r\n launch_ai2thor(\"1\", None, None)"
},
{
"alpha_fraction": 0.5211678743362427,
"alphanum_fraction": 0.5386861562728882,
"avg_line_length": 33.17948532104492,
"blob_id": "5dcd515479ceb9ed5bdbe592a4c97fd8c42ed0f1",
"content_id": "66763f9effe2e34618c72e885580950148707321",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1370,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 39,
"path": "/core/third_party_camera.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "import ai2thor.controller\r\n\r\nclass ThirdPartyCameraController:\r\n def __init__(self, controller, event):\r\n \"\"\"\r\n Input:\r\n param::controller\r\n Agent controller\r\n param::event\r\n Initial state event\r\n Output:\r\n None\r\n \"\"\"\r\n self.controller = controller\r\n self.event = event\r\n \r\n def add_camera(self, rotation, position):\r\n self.event = self.controller.step(dict(\r\n action='AddThirdPartyCamera',\r\n rotation=rotation, # e.g. dict(x=0, y=90, z=0),\r\n position=position # e.g. dict(x=-1.25, y=1.0, z=-1.5)\r\n ))\r\n \r\n def update_camera(self, thirdPartyCameraId, rotation, position):\r\n self.event = self.controller.step(dict(\r\n action='UpdateThirdPartyCamera',\r\n thirdPartyCameraId=0, # id is available in the metadata response\r\n rotation=rotation, # e.g. dict(x=0, y=90, z=0),\r\n position=position # e.g. dict(x=-1.25, y=1.0, z=-1.5)\r\n ))\r\n \r\n def get_cameras(self):\r\n return self.event.metadata['thirdPartyCameras']\r\n \r\n def get_camera_frames(self):\r\n return self.event.third_party_camera_frames\r\n \r\n def get_camera_depth(self):\r\n return self.event.third_party_depth_frames"
},
{
"alpha_fraction": 0.5423438549041748,
"alphanum_fraction": 0.5517536401748657,
"avg_line_length": 25.83333396911621,
"blob_id": "b0d9e1f165582c2f65c5f4baeca13ec64126d261",
"content_id": "9eb80be4746f8afb3eb19ac69da2507dd70dadbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 42,
"path": "/readme.md",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "# Run app\r\n`python mediaplayer.py`\r\n\r\n# Demo video\r\nClick [here](https://www.youtube.com/watch?v=36yvh2frr_I&feature=youtu.be)\r\n\r\n# Recognizer\r\n**Recorder**: a `.py` file of two classes\r\n* `Recorder`: \r\n * `get_transcripted_audio(save_record, save_dir)`:\r\n * Input:\r\n * `save_record`: whether to save the record or not\r\n * Type: `bool`\r\n * `save_dir`: the folder to save the record (if `save_record` is `True`)\r\n * Type: `str`\r\n * Output:\r\n * `transcript`: the transcription of the audio\r\n * Type: `str`\r\n\r\n**Analyzer**: a `.py` file of two classes\r\n* `Analyzer`: \r\n * `analyze_transcript(transcript)`\r\n * Input:\r\n * `transcript`: the transcription of a recorder\r\n * Type: `str`\r\n * Output:\r\n * `action`: a `dict` indicating the action\r\n\r\n# Usage\r\n**Move head**: \r\n* \"move ahead for 5 steps\"\r\n* \"move back for 5 steps\"\r\n* \"move left for 5 steps\"\r\n* \"move right for 5 steps\"\r\n\r\n**Rotate**: \r\n* \"rotate up\"\r\n* \"rotate down\"\r\n* \"rotate left for 50 degrees\"\r\n* \"rotate right for 45 degrees\"\r\n\r\n**Quit the program**: \"quit\"\r\n"
},
{
"alpha_fraction": 0.514675498008728,
"alphanum_fraction": 0.5155022740364075,
"avg_line_length": 31.625,
"blob_id": "3048f9463d704af744e3fb56496545dadc6b99c8",
"content_id": "823a62d9771e33647d54e9f56b2e9caf16be4714",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2419,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 72,
"path": "/core/analyzer.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "import os, sys, glob\r\nimport uuid\r\n\r\nclass Analyzer:\r\n def __init__(self):\r\n pass\r\n \r\n def analyze_transcript(self, transcript):\r\n action = {}\r\n if transcript is None:\r\n return action\r\n action[\"action\"] = self.get_action(transcript)\r\n if action[\"action\"] is not None:\r\n if action[\"action\"] == \"move\":\r\n action[\"direction\"] = self.get_move_direction(transcript)\r\n action[\"n_steps\"] = self.get_n_steps(transcript)\r\n elif action[\"action\"] == \"rotate\":\r\n action[\"direction\"] = self.get_rotate_direction(transcript)\r\n action[\"degree\"] = self.get_rotate_degree(transcript)\r\n return action\r\n \r\n def get_action(self, transcript):\r\n action = None\r\n if \"move\" in transcript:\r\n action = \"move\"\r\n elif (\"rotate\" in transcript) or (\"turn\" in transcript):\r\n action = \"rotate\"\r\n elif (\"quit\" in transcript) or (\"exit\" in transcript):\r\n action = \"quit\"\r\n return action\r\n \r\n def get_n_steps(self, transcript):\r\n n_steps = 1\r\n for word in transcript.split():\r\n try:\r\n n_steps = int(word)\r\n except Exception:\r\n pass\r\n return n_steps\r\n \r\n def get_move_direction(self, transcript):\r\n direction = \"ahead\"\r\n if (\"ahead\" in transcript) or (\"forward\" in transcript) or (\"toward\" in transcript):\r\n direction = \"ahead\"\r\n if (\"back\" in transcript) or (\"backward\" in transcript):\r\n direction = \"back\"\r\n if \"left\" in transcript:\r\n direction = \"left\"\r\n if \"right\" in transcript:\r\n direction = \"right\"\r\n return direction\r\n \r\n def get_rotate_direction(self, transcript):\r\n direction = \"left\"\r\n if \"up\" in transcript:\r\n direction = \"up\"\r\n if (\"down\" in transcript) or (\"back\" in transcript):\r\n direction = \"down\"\r\n if \"left\" in transcript:\r\n direction = \"left\"\r\n if \"right\" in transcript:\r\n direction = \"right\"\r\n return direction\r\n \r\n def get_rotate_degree(self, transcript):\r\n degree = 0\r\n for word in transcript.split():\r\n try:\r\n degree = int(word)\r\n except Exception:\r\n pass\r\n return degree"
},
{
"alpha_fraction": 0.6777560114860535,
"alphanum_fraction": 0.6888453960418701,
"avg_line_length": 44.948978424072266,
"blob_id": "bd8a49f8909cc0b4a4635259c26c5d4e63362492",
"content_id": "87daf062300c0b64117a468c6f491b78ef3f5ea7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4599,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 98,
"path": "/MainWindow.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'mainwindow.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.10\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(484, 371)\r\n self.centralWidget = QtWidgets.QWidget(MainWindow)\r\n \r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.centralWidget.sizePolicy().hasHeightForWidth())\r\n \r\n self.centralWidget.setSizePolicy(sizePolicy)\r\n self.centralWidget.setObjectName(\"centralWidget\")\r\n \r\n self.mainDestTextEdit = QtWidgets.QTextEdit(self.centralWidget)\r\n self.mainDestTextEdit.setReadOnly(True)\r\n self.mainDestTextEdit.setObjectName(\"mainDestTextEdit\")\r\n \r\n self.transDestTextEdit = QtWidgets.QTextEdit(self.centralWidget)\r\n self.transDestTextEdit.setReadOnly(True)\r\n self.transDestTextEdit.setObjectName(\"transDestTextEdit\")\r\n \r\n self.floorCodeDisplayer = QtWidgets.QLabel(self.centralWidget)\r\n self.floorCodeDisplayer.setMinimumSize(QtCore.QSize(80, 0))\r\n self.floorCodeDisplayer.setAlignment(QtCore.Qt.AlignCenter)\r\n self.floorCodeDisplayer.setObjectName(\"floorCodeDisplayer\")\r\n \r\n self.playButton = QtWidgets.QPushButton(self.centralWidget)\r\n self.playButton.setText(\"\")\r\n icon1 = QtGui.QIcon()\r\n icon1.addPixmap(QtGui.QPixmap(\"images/control.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n self.playButton.setIcon(icon1)\r\n self.playButton.setObjectName(\"playButton\")\r\n \r\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_4.setSpacing(6)\r\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\r\n self.horizontalLayout_4.addWidget(self.floorCodeDisplayer)\r\n self.horizontalLayout_4.addWidget(self.playButton)\r\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_4.addItem(spacerItem)\r\n \r\n self.verticalLayout = QtWidgets.QVBoxLayout()\r\n self.verticalLayout.setSpacing(6)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.verticalLayout.addWidget(self.mainDestTextEdit)\r\n self.verticalLayout.addWidget(self.transDestTextEdit)\r\n # self.verticalLayout.addWidget(self.playlistView)\r\n self.verticalLayout.addLayout(self.horizontalLayout_4)\r\n \r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralWidget)\r\n self.horizontalLayout.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout.setSpacing(6)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.horizontalLayout.addLayout(self.verticalLayout)\r\n \r\n MainWindow.setCentralWidget(self.centralWidget)\r\n \r\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\r\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 484, 22))\r\n self.menuBar.setObjectName(\"menuBar\")\r\n \r\n self.menuFloor = QtWidgets.QMenu(self.menuBar)\r\n self.menuFloor.setObjectName(\"menuFloor\")\r\n MainWindow.setMenuBar(self.menuBar)\r\n \r\n self.statusBar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusBar.setObjectName(\"statusBar\")\r\n MainWindow.setStatusBar(self.statusBar)\r\n \r\n self.open_floor_action = QtWidgets.QAction(MainWindow)\r\n self.open_floor_action.setObjectName(\"open_floor_action\")\r\n self.menuFloor.addAction(self.open_floor_action)\r\n self.menuBar.addAction(self.menuFloor.menuAction())\r\n \r\n self.init_params()\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n \r\n def init_params(self):\r\n self.current_floor = \"1\"\r\n \r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Final Project\"))\r\n self.floorCodeDisplayer.setText(_translate(\"MainWindow\", f\"Floor plan {self.current_floor}\"))\r\n self.menuFloor.setTitle(_translate(\"MainWindow\", \"Floor\"))\r\n self.open_floor_action.setText(_translate(\"MainWindow\", \"Open floor...\"))"
},
{
"alpha_fraction": 0.5701087117195129,
"alphanum_fraction": 0.573913037776947,
"avg_line_length": 34.117645263671875,
"blob_id": "a8cee0825b817ad52d6ce6074d5aab4fd0dc764a",
"content_id": "11a5fe17a154be41153ad5a53146468d0765a1f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1840,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 51,
"path": "/core/robot.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "import ai2thor.controller\r\n\r\nPOSSIBLE_MOVES = ['MoveAhead', 'MoveRight', 'MoveLeft', 'MoveBack']\r\nPOSSIBLE_HORIZONTAL_CAM_MOVES = ['RotateRight', 'RotateLeft']\r\nPOSSIBLE_VERTICAL_CAM_MOVES = ['LookUp', 'LookDown']\r\n\r\nclass RobotController:\r\n def __init__(self, controller, event):\r\n \"\"\"\r\n Input:\r\n param::controller\r\n Agent controller\r\n param::event\r\n Initial state event\r\n Output:\r\n None\r\n \"\"\"\r\n self.controller = controller\r\n self.event = event\r\n self.rotation = event.metadata['agent']['rotation']['y']\r\n \r\n def get_agent_state(self):\r\n return self.event.metadata['agent']\r\n \r\n def get_first_view_image_shape(self, ord='hw'):\r\n if ord == 'hw':\r\n return self.event.screen_height, self.event.screen_width \r\n return self.event.screen_width, self.event.screen_height\r\n \r\n def get_first_view_image(self, mode='BGR'):\r\n if mode == 'BGR':\r\n return self.event.cv2img\r\n return self.event.frame\r\n \r\n def get_first_view_depth(self):\r\n return self.event.depth_frame\r\n \r\n def rotate_camera(self, mode='LookUp', degree=0):\r\n if mode in POSSIBLE_HORIZONTAL_CAM_MOVES: \r\n if mode == \"RotateLeft\":\r\n self.rotation -= degree\r\n else:\r\n self.rotation += degree\r\n self.event = self.controller.step(dict(action=\"Rotate\", rotation=self.rotation))\r\n elif mode in POSSIBLE_VERTICAL_CAM_MOVES:\r\n self.event = self.controller.step(dict(action=mode)) \r\n\r\n def move(self, mode='MoveAhead', moveMagnitude=0.0):\r\n if mode not in POSSIBLE_MOVES:\r\n return None\r\n self.event = self.controller.step(dict(action=mode, moveMagnitude=0.0))"
},
{
"alpha_fraction": 0.5971302390098572,
"alphanum_fraction": 0.6052244305610657,
"avg_line_length": 33.78947448730469,
"blob_id": "f33aa256594249aeeb4c2928df7d70dcd63b00a8",
"content_id": "2e39a052fc41302ae95b4fe59e3dff6df36a09a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2718,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 76,
"path": "/core/recorder.py",
"repo_name": "vipghn4/speech_recognition_app",
"src_encoding": "UTF-8",
"text": "import os, sys, glob\r\nimport uuid\r\n\r\nimport sounddevice as sd\r\nimport soundfile as sf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport librosa\r\nimport hmmlearn.hmm as hmm\r\nfrom math import exp\r\n\r\nimport speech_recognition as sr\r\n\r\nDURATION = 7\r\n\r\ndef record_sound(filename, duration=1, fs=44100, play=False, text_box=None):\r\n if text_box is not None:\r\n text_box.insertPlainText(\"Be ready ...\\n\")\r\n else:\r\n print(\"Be ready ...\")\r\n sd.play( np.sin( 2*np.pi*940*np.arange(fs)/fs ) , samplerate=fs, blocking=True)\r\n \r\n if text_box is not None:\r\n text_box.insertPlainText(\"Start recording (start speaking 1 second after this statement) ...\\n\")\r\n else:\r\n print(\"Start recording (start speaking 1 second after this statement) ...\")\r\n \r\n data = sd.rec(frames=duration*fs, samplerate=fs, channels=1, blocking=True)\r\n \r\n if text_box is not None:\r\n text_box.insertPlainText(\"Stop recording ...\\n\")\r\n else:\r\n print(\"Stop recording ...\")\r\n if play:\r\n sd.play(data, samplerate=fs, blocking=True)\r\n sf.write(filename, data=data, samplerate=fs)\r\n\r\ndef record_data(prefix, n=25, duration=1):\r\n for i in range(n):\r\n print('{}_{}.wav'.format(prefix, i))\r\n record_sound('{}_{}.wav'.format(prefix, i), duration=duration)\r\n if i % 5 == 4:\r\n input(\"Press Enter to continue...\")\r\n\r\nclass GoogleRecorder:\r\n def __init__(self):\r\n self.recognizer = sr.Recognizer()\r\n \r\n def get_transcripted_audio(self, save_record=False, save_dir=None, text_box=None):\r\n # with sr.Microphone() as source:\r\n # audio = r.listen(source)\r\n path = os.path.join(save_dir, str(uuid.uuid4())+\".wav\")\r\n record_sound(filename=path, duration=DURATION, play=True, text_box=text_box)\r\n r = sr.Recognizer()\r\n with sr.AudioFile(path) as source:\r\n audio = r.record(source)\r\n if not save_record:\r\n # path = os.path.join(save_dir, str(uuid.uuid4())+\".wav\")\r\n # self.save_audio(audio, path)\r\n os.remove(path)\r\n transcript = self.transcribe_audio(audio)\r\n return transcript\r\n \r\n def save_audio(self, audio, path):\r\n with open(path, \"wb\") as f:\r\n f.write(audio.get_raw_data())\r\n\r\n def transcribe_audio(self, audio):\r\n try:\r\n return self.recognizer.recognize_google(audio)\r\n except sr.UnknownValueError:\r\n print(\"Google Speech Recognition could not understand audio\")\r\n return None\r\n except sr.RequestError as e:\r\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\r\n return None"
}
] | 11 |
BrandyBecker/RockPaperScissorsPython
|
https://github.com/BrandyBecker/RockPaperScissorsPython
|
9d514ea3351f7b927944a89741a3499abb01cdb8
|
a84475b6c4d4089742485d57f21aff1f4d87d53b
|
b85518d7c0c3e28c4781d9a77ac873fa922e0782
|
refs/heads/master
| 2022-09-20T12:40:22.481612 | 2020-06-02T14:53:51 | 2020-06-02T14:53:51 | 268,827,533 | 0 | 1 |
MIT
| 2020-06-02T14:44:52 | 2020-06-02T14:44:57 | 2020-06-02T14:53:52 | null |
[
{
"alpha_fraction": 0.5439965128898621,
"alphanum_fraction": 0.5587342977523804,
"avg_line_length": 28.21518898010254,
"blob_id": "4620c249943c26a3ead823ea129e6894034c578b",
"content_id": "4a1899f900f5f1acf415fb1945bb8a8884a3da3b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2307,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 79,
"path": "/rock_paper_scissors.py",
"repo_name": "BrandyBecker/RockPaperScissorsPython",
"src_encoding": "UTF-8",
"text": "#import module we need\nimport random\n\n#file i/o functions for historical results\ndef load_results():\n text_file = open(\"history.txt\", \"r\")\n history = text_file.read().split(\",\")\n text_file.close()\n return history\n\ndef save_results( w, t, l):\n text_file = open(\"history.txt\", \"w\")\n text_file.write( str(w) + \",\" + str(t) + \",\" + str(l))\n text_file.close()\n\n#welcome message\nresults = load_results()\nwins = int(results[0])\nties = int( results[1])\nlosses = int(results[2])\nprint(\"Welcome to Rock, Paper, Scissors!\")\nprint(\"Wins: %s, Ties: %s, Losses: %s\" % (wins, ties, losses))\nprint(\"Please choose to continue...\")\n\n\n#initialize user, computer choices\ncomputer = random.randint(1,3)\nuser = int(input(\"[1] Rock [2] Paper [3] Scissors [9] Quit\\n\"))\n\n#gamplay loop\nwhile not user == 9:\n #user chooses ROCK\n if user == 1:\n if computer == 1:\n print(\"Computer chose rock...tie!\")\n ties += 1\n elif computer == 2:\n print(\"Computer chose paper...computer wins :(\")\n losses += 1\n else:\n print(\"Computer chose scissors...you wins :)\")\n wins += 1\n\n #user chooses PAPER\n elif user == 2:\n if computer == 1:\n print(\"Computer chose rock...you win :)\")\n wins += 1\n elif computer == 2:\n print(\"Computer chose paper...tie!\")\n ties += 1\n else:\n print(\"Computer chose scissors...computer wins :(\")\n losses += 1\n \n #user chooses SCISSORS\n elif user == 3:\n if computer == 1:\n print(\"Computer chose rock...computer wins :(\")\n losses += 1\n elif computer == 2:\n print(\"Computer chose paper...you win :)\")\n wins += 1\n else:\n print(\"Computer chose scissors...tie!\")\n ties += 1\n else:\n print(\"Invalid selection. Please try again.\")\n #print updated stats\n print(\"Wins: %s, Ties: %s, Losses: %s\" % (wins, ties, losses))\n\n #prompt user to make another selection\n print(\"Please choose to continue...\")\n #initialize user, computer choices\n computer = random.randint(1,3)\n user = int(input(\"[1] Rock [2] Paper [3] Scissors [9] Quit\\n\"))\n\n# #game over, save results\nsave_results(wins, ties, losses)"
},
{
"alpha_fraction": 0.8571428656578064,
"alphanum_fraction": 0.8571428656578064,
"avg_line_length": 23.5,
"blob_id": "8f783c0dba03286949841b4750e509342363222b",
"content_id": "3985bb701f65b470569095960a0a817e8898cce7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 49,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 2,
"path": "/README.md",
"repo_name": "BrandyBecker/RockPaperScissorsPython",
"src_encoding": "UTF-8",
"text": "# RockPaperScissorsPython\ntext based python game\n"
}
] | 2 |
JaesungLeee/Hufs_mp
|
https://github.com/JaesungLeee/Hufs_mp
|
085e963a4fd7c8d5595ccecca82c0ca6b97ca38e
|
76d4f46278bd3a86dd3c42ea552cf1c2c0631b20
|
22808343401b508bb64a7f10642b092f75fdee0a
|
refs/heads/master
| 2022-11-18T04:19:47.307241 | 2020-07-24T14:57:42 | 2020-07-24T14:57:42 | 266,525,992 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.450092077255249,
"alphanum_fraction": 0.4699815809726715,
"avg_line_length": 30.929410934448242,
"blob_id": "84858d29ad2689ff95c9d1048faf993b96529aa6",
"content_id": "88788c34cbdc36bcf76cc8fe3a21b6df18ff1a75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2801,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 85,
"path": "/Flask/flask_websocket_sensor_control_HBE/app.py",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nfrom flask_socketio import SocketIO, emit\nimport wiringpi as wp\nimport datetime\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n// wiringPi 코드\nLED_PIN = 7\nSPI_CH = 0\nADC_CH_LIGHT = 0\nADC_CS = 29\nSPI_SPEED = 500000\nwp.wiringPiSetup() \nwp.wiringPiSPISetup(SPI_CH, SPI_SPEED)\nwp.pinMode(LED_PIN, 1)\nwp.pinMode(ADC_CS, 1) // light 센서를 쓰기 위한 pinMode\n\n#=======================================================================\n# index.html\n#=======================================================================\[email protected]('/') // / 디렉토리 접근 하면 index 함수 불림\ndef index():\n \"\"\"Serve the index HTML\"\"\"\n return render_template('index.html')\n\n#=======================================================================\n# led action + response\n#=======================================================================\[email protected](\"/led1/<led_state>\") // /led1/<led_state> 로 접근하면 밑에 함수 불림\ndef control_led_action(led_state):\n print(\"control_led_action\")\n \n if led_state == \"true\": \n print(\"action==true\")\n wp.digitalWrite(LED_PIN, 1)\n ledS=\"ON\" \n else: \n print(\"action==false\")\n wp.digitalWrite(LED_PIN, 0)\n ledS=\"OFF\"\n \n now = datetime.datetime.now()\n timeString = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n templateData = {\n 'time': timeString ,\n 'ledS' : ledS\n } \n return render_template('ajax_led_response.html',**templateData)\n\n\n\n\[email protected]('/iot_with_flask.js') // index.html에서 js 파일을 부르기 위해 함\ndef gauge(): \n return render_template('iot_with_flask.js') \n\n#=======================================================================\n# socketio ( websocket ) \n#=======================================================================\[email protected]('measurement_start')\ndef on_create(data):\n print(\"measurement_start\")\n it=data['iterations']\n for i in range(it):\n wp.digitalWrite(ADC_CS, 1)\n buf = bytearray(3)\n buf[0] = 0x06 | ((ADC_CH_LIGHT & 0x04)>>2)\n buf[1] = ((ADC_CH_LIGHT & 0x03)<<6)\n buf[2] = 0x00\n wp.digitalWrite(ADC_CS,0)\n ret_len, buf = wp.wiringPiSPIDataRW(SPI_CH, bytes(buf))\n buf = bytearray(buf)\n buf[1] = 0x0F & buf[1]\n #value=(buf[1] << 8) | buf[2]\n light_val = int.from_bytes(bytes(buf), byteorder='big')\n wp.digitalWrite(ADC_CS,1)\n print(\"light value=\", light_val)\n socketio.sleep(.2)\n emit('msg_from_server', {'lightVal': light_val})\n\n#======================================================================= \nif __name__ == '__main__':\n socketio.run(app, host='0.0.0.0', port=5555, debug=False)\n\n"
},
{
"alpha_fraction": 0.6145833134651184,
"alphanum_fraction": 0.6354166865348816,
"avg_line_length": 16.545454025268555,
"blob_id": "070adddfe1c6447168d2ed2565135d92334184d9",
"content_id": "920a3d9364a84b06ab38b4ed3f5254472399d095",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 11,
"path": "/Homework/HW#10/laserOff.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <wiringPi.h>\n#define PIN 22\n\nint main(void){\n if(wiringPiSetup() == -1) return 1;\n pinMode(PIN,OUTPUT);\n\n digitalWrite(PIN,LOW);\n printf(\"Laser Off\");\n}"
},
{
"alpha_fraction": 0.6216931343078613,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 17.899999618530273,
"blob_id": "35e3f6de4adb550588cce5d7da9b71ce60e370f5",
"content_id": "a75b33a27a4d3d1d7f115ff496177f14daa29b21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 806,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 40,
"path": "/Homework/HW#03/##3_Polling_Counting_Touch_Sensor_and_LED.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UHC",
"text": "// Polling 방식 \n// touch sensor가 몇번 눌렸는지 정확하게 알기 위해 interrupt\n// led가 몇번 켜지는지는 polling \n#include <stdio.h>\n#include <wiringPi.h>\n\n#define TOUCH_PIN 6\n#define LED_PIN 7\n\nint touch_cnt = 0\n\nvoid touchPressed() {\n\ttouch_cnt++;\n}\n \nint main(void) {\n\tint touch;\n\tint prev_touch = 0;\n\tint led_cnt = 0;\n\t\n\tif(wiringPiSetup() == -1) return -1;\n\tpinMode(TOUCH_PIN, INPUT);\n\tpinMode(LED_PIN, OUTPUT);\n\t\n\twiringPiISR(TOUCH_PIN, INT_EDGE_RISING, touchPressed);\n\t\n\twhile(true) {\n\t\ttouch = digitalRead(TOUCH_PIN);\n\t\t\n\t\tif(prev_touch == 0 && touch == 1) {\n\t\t\tdigitalWrite(LED_PIN, HIGH);\n\t\t\tled_cnt++;\n\t\t\tdelay(1000);\n\t\t\tdigitalWrite(LED_PIN, LOW);\n\t\t\tprintf(\"Pressed %d times and Lightened %d times\\n\", touch_cnt, led_cnt);\n\t\t}\n\t\tprev_touch = touch;\n\t\tdelay(100);\n\t} \n}\n"
},
{
"alpha_fraction": 0.5627118349075317,
"alphanum_fraction": 0.5830508470535278,
"avg_line_length": 14.578947067260742,
"blob_id": "9f1ed4b6d3abdf41cd65a58306c1b1edc5b9dfa7",
"content_id": "99bfade65e304259a3a7e11871e5cabea13e335f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 19,
"path": "/Homework/HW#11/home_pi_jslee/switch_alert_php.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <wiringPi.h>\n#include <stdio.h>\n\n#define PIN 3\n\nvoid get_switch(void) {\n printf(\"Pressed!!\");\n exit(0);\n}\n\nint main(void){\n if(wiringPiSetup() == -1) return 1;\n pinMode(PIN,INPUT);\n\n wiringPiISR(PIN, INT_EDGE_FALLING, get_switch);\n while(1) {\n sleep(1);\n }\n}"
},
{
"alpha_fraction": 0.5327102541923523,
"alphanum_fraction": 0.5700934529304504,
"avg_line_length": 16.29032325744629,
"blob_id": "3e654b3e09cbc678f8a304b475c87e9658da5443",
"content_id": "822a51ad06516a5130b1585efd5534fe8a0bb610",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 31,
"path": "/Homework/HW#10/US.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <wiringPi.h>\n\n#define TRIG 28\n#define OUT 29\n\nint main(void) {\n int dis = 0, i;\n long start, travel;\n\n if(wiringPiSetup() == -1) return 1;\n pinMode(TRIG, OUTPUT);\n pinMode(OUT, INPUT);\n\n digitalWrite(TRIG, 0);\n usleep(2);\n digitalWrite(TRIG, 1);\n usleep(20);\n digitalWrite(TRIG, 0);\n\n while(digitalRead(OUT) == 0);\n start = micros();\n \n while(digitalRead(OUT) == 1);\n travel = micros() - start;\n dis = travel / 58;\n\n printf(\"%d\\n\", dis);\n\n delay(500);\n}"
},
{
"alpha_fraction": 0.6671035289764404,
"alphanum_fraction": 0.6854521632194519,
"avg_line_length": 22.121212005615234,
"blob_id": "82607e4a3a3a59fbf6544d1c8e764378230516d2",
"content_id": "3752788451eaa8ccc38351690724b64c6f5fb2fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 833,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 33,
"path": "/Homework/HW#02/##2_Interrupt_Touch_Sensor_and_LED.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UHC",
"text": "#include <stdio.h>\n#include <wiringPi.h>\n\n#define TOUCH_PIN 6\n#define LED_PIN 7\n\nvoid edge_rise(void);\n\nvoid edge_fall(void);\n\nint main(void) {\n\tif(wiringPiSetup() == -1) return 1;\n\tpinMode(TOUCH_PIN, INPUT);\n\tpinMode(LED_PIN, OUTPUT);\n\t\n\twiringPiISR(TOUCH_PIN, INT_EDGE_RISING, edge_rise);\n\twiringPiISR(TOUCH_PIN, INT_EDGE_FALLING, edge_fall);\n\t\n\tdelay(10000);\t\t// 위에가 interrupt 신호 같은 거여서 delay 해줘야 함 \n}\n\nvoid edge_rise(void) {\n\tdigitalWrite(LED_PIN, HIGH);\n\tdelay(1000);\t// 1초 동안 켜짐 \n\twiringPiISR(TOUCH_PIN, INT_EDGE_FALLING, edge_fall);\t\t// edge_fall 함수로 가서 LED가 꺼짐 \n\tprintf(\"Edge_rised\\n\");\n} \n\nvoid edge_fall(void) {\n\tdigitalWrite(LED_PIN, LOW); \n\twiringPiISR(TOUCH_PIN, INT_EDGE_RISING, edge_rise);\t\t// edge_rise 함수로 가서 LED가 켜짐 \n\tprintf(\"Edge_falled\\n\");\n}\n"
},
{
"alpha_fraction": 0.4496268630027771,
"alphanum_fraction": 0.46082088351249695,
"avg_line_length": 30.52941131591797,
"blob_id": "48d5d9a06864e42a5af13fe245c966e2e87284fc",
"content_id": "8e6b7c00cd577d864227c6e45c13f72188ce4673",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 17,
"path": "/Homework/HW#11/var_www_html/remote_con_quiz11.php",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "<?php\n\tif(isset($_GET['lightLog'])) {\n $value = shell_exec(\"/home/pi/jslee/php_lightLog 2>&1\");\n echo $value;\n } \n else if(isset($_GET['touchAlert'])) {\n $value = shell_exec(\"/home/pi/jslee/php_touchAlert 2>&1\");\n echo $value;\n }\n else if(isset($_GET['switchAlert'])) {\n $value = shell_exec(\"home/pi/jslee/switch_alert_php 2>&1\");\n echo $value;\n }\n else {\n echo \"No Such Argument!!! by jaesung.\";\n }\n?>\n"
},
{
"alpha_fraction": 0.5746705532073975,
"alphanum_fraction": 0.5878477096557617,
"avg_line_length": 24.296297073364258,
"blob_id": "a25ec5aeefef8a6a4f951369d77d5e2f53e83162",
"content_id": "228dd829b1c3370cbffecda34d738a1f68309d1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1366,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 54,
"path": "/Homework/HW#11/var_www_html/iot_quiz11.js",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "var remoteCon = 'remote_con_quiz11.php';\n\n$(document).ready(function(){\n $.ajax({\n type: \"GET\",\n timeout: 100000 // sets timeout to 100 seconds\n });\n $(\"#GET_LIGHT\").click(function(){\n $.get(remoteCon+\"?lightLog\", function(data, status){\n //alert(\"Data: \" + data + \"\\nStatus: \" + status);\n //$(\"#LIGHT_val\").val(data);\n lightGraph = document.getElementById('lightGraph');\n //console.log(data);\n drawGraph(lightGraph, data);\n });\n });\n \n $(\"#GET_TOUCH\").click(function(){\n document.TOUCH.src='img/touch_off.png';\n touchAlert();\n });\n\n $(\"GET_SWITCH\").click(function(){\n document.SWITCH.src = \"img/sw_off.png\";\n switchAlert();\n });\n});\n\nfunction drawGraph(target, inputData) {\n var xAxis = [1, 2, 3, 4, 5];\n //console.log(inputData);\n var yData = inputData.split(\",\").map(Number);\n var trace0 = {\n x: xAxis,\n y: yData\n };\n Plotly.plot(target, [trace0]);\n}\n\nfunction touchAlert() {\n $.get(remoteCon+\"?touchAlert\", function(data, status){\n console.log(data);\n document.TOUCH.src='img/touch_on.png';\n //$('TOUCH').attr(\"src\", \"img/touch_on.png\");\n //alert(\"Touch button has been touched!! \\nData: \" + data);\n });\n}\n\nfunction switchAlert() {\n $.get(remoteCon+\"?switchAlert\", function(data, status) {\n console.log(data);\n document.SWITCH.src=\"img/sw_on.png\";\n })\n}\n"
},
{
"alpha_fraction": 0.562809944152832,
"alphanum_fraction": 0.5685950517654419,
"avg_line_length": 19.879310607910156,
"blob_id": "b24f69a060fdaed3ff6dae8cda224713048f781d",
"content_id": "e2eee95a7f396d562cb04b76049f41f3355cd961",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1210,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 58,
"path": "/Homework/HW#10/quiz10.js",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "function XHR_write(data) {\n var xhr = new XMLHttpRequest();\n\n xhr.open(\"GET\",\"remote_con.php?\" + data);\n xhr.send();\n}\n\nfunction XHR_read(data) {\n var xhr = new XMLHttpRequest();\n\n xhr.open(\"GET\",\"remote_con.php?\" + data, false);\n xhr.send();\n \n return xhr.responseText;\n}\n\nfunction repeat() {\n ultraSonic();\n}\nsetInterval(function(){repeat();});\n\n//--------------------------------------------------------//\n\n\n\nfunction ultraSonic(){\nvar val=XHR_read('ultraSonic');\n\nif(val>=20){\n document.ultraSonic.src='img/ultrasonic3.png';\n}else if(val>=10){\n document.ultraSonic.src='img/ultrasonic2.png';\n}else{\n document.ultraSonic.src='img/ultrasonic1.png';\n}\n\ndocument.getElementById(\"US_val\").value=val+\"cm\";\n}\n\n\nfunction LASERON(){\n var state = document.getElementById(\"LASER_ON\").value;\n\n if (state = \"ON\") {\n \n XHR_write('LASERON');\n document.LASER.src='img/laser_on.png';\n // document.getElementById(\"LASER_ON\").value = \"OFF\"\n state = \"OFF\"\n }\n else {\n \n XHR_write('LASEROFF');\n document.LASER.src='img/laser_off.png';\n state = \"ON\"\n // document.getElementById(\"LASER_ON\").value = \"ON\"\n }\n}"
},
{
"alpha_fraction": 0.5196602940559387,
"alphanum_fraction": 0.5696759819984436,
"avg_line_length": 17.67058753967285,
"blob_id": "a88182061f324710411a5f323448aa2463817ead",
"content_id": "0f2c1aefd47c87353aa2323c77b1f0333adde6d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3415,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 170,
"path": "/Homework/HW#06/##6_stepmotor_ultrasonic_led_buzzer.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UHC",
"text": "(예) 25cm < 30cm, Step Motor Stopped.\nStep motor가 중지되어 있다가 거리 x가 30cm 이상이 되면 'xcm >= 30cm, Step Motor Resumed.' 라고 출력하고, 다시 step motor가 돌아간다. \n(예) 40cm >= 30cm, Step Motor Resumed.\nx가 20cm 미만이면 'xcm < 20cm, LED On.' 라고 출력하고, LED를 켠다.\n(예) 17cm < 20cm, LED On.\nLED가 켜져 있다가 x가 20cm 이상이 되면 'xcm >= 20cm, LED Off.' 라고 출력하고, 다시 LED를 끈다.\n(예) 24cm >= 20cm, LED Off.\nx가 10cm 미만이면 'xcm < 10cm, Buzzer On.' 라고 출력하고, buzzer를 울린다.\n(예) 3cm < 10cm, Buzzer On.\nBuzzer가 울리다가 x가 10cm 이상이 되면 'xcm >= 10cm, Buzzer Off.' 라고 출력하고, 다시 buzzer를 끈다.\n(예) 12cm >= 10cm, Buzzer Off.\n \n\n#include <stdio.h>\n#include <wiringPi.h>\n\n#define LED_PIN 7\n\n#define BUZZER_PIN 15\n\n#define TRIG 28\n#define OUT 29\n\n#define PIN_1A 27\n#define PIN_1B 0\n#define PIN_2A 1\n#define PIN_2B 24\n\nvoid motor() {\n\tdigitalWrite(PIN_1A,HIGH);\n digitalWrite(PIN_1B,LOW);\n\tdigitalWrite(PIN_2A,LOW);\n\tdigitalWrite(PIN_2B,LOW);\n usleep(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,HIGH);\n digitalWrite(PIN_2A,LOW);\n digitalWrite(PIN_2B,LOW);\n usleep(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,LOW);\n digitalWrite(PIN_2A,HIGH);\n digitalWrite(PIN_2B,LOW);\n usleep(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,LOW);\n digitalWrite(PIN_2A,LOW);\n digitalWrite(PIN_2B,HIGH);\n usleep(1000);\n}\n\nvoid buzzer() {\n\tint i;\n\tfor(i=0; i<10; i++) {\n\t digitalWrite(BUZZER_PIN,HIGH);\n \tdelay(200);\n \tdigitalWrite(BUZZER_PIN,LOW);\n \tdelay(200);\n\t} \n}\n\nvoid led_pin() {\n\tint i;\n\tfor(i=0; i< 10; i++) {\n\n digitalWrite(LED_PIN,HIGH);\n\n delay(500);\n\n digitalWrite(LED_PIN,LOW);\n\n delay(500);\n\n } \n}\n\nint ultrasonic() {\n\tdigitalWrite(TRIG,0);\n\tusleep(2);\n digitalWrite(TRIG,1);\n usleep(20);\n digitalWrite(TRIG,0);\n\n while(digitalRead(OUT) == 0);\n\n start = micros();\n\n while(digitalRead(OUT) == 1);\n\n travel = micros() - start;\n\n dis = travel / 58;\n\n if (dis < 30) {\n printf(\"%dcm < 30cm, Step Mottor Stopped\", dis);\n if (dis < 20) {\n \tled_pin();\n \tprintf(\"%dcm < 20cm. LED On\", dis);\n\n \tif(dis < 10) {\n \t\tbuzzer();\n \t\tprintf(\"%dcm < 10cm. Buzzer On\", dis);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tprintf(\"%dcm >= 10cm. Buzzer Off\", dis);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tprintf(\"%dcm >= 20cm. LED Off\", dis);\n\t\t}\n // motor()를 안부르면 안돌아감 \n \t\n\t}\n\telse {\n\t\tmotor();\t\n\t\tprint(\"%dcm >= 30cm, Step Motter Resumed\", dis);\n\t}\n delay(100);\n\t\t\n}\n\n\n\nint main(void){\n int dis=0, i;\n long start,travel;\n int i=0;\n\n if(wiringPiSetup() == -1) return 1;\n \n pinMode(LED_PIN,OUTPUT);\n \n pinMode(BUZZER_PIN,OUTPUT);\n\n pinMode(TRIG,OUTPUT);\n pinMode(OUT,INPUT);\n \n pinMode(PIN_1A,OUTPUT);\n pinMode(PIN_1B,OUTPUT);\n pinMode(PIN_2A,OUTPUT);\n pinMode(PIN_2B,OUTPUT);\n\n\tultrasonic();\n\t\n\t\n}\n\n\n\n/*\nint ultrasonic() {\ndigitalWrite(TRIG,0);\n usleep(2);\n digitalWrite(TRIG,1);\n usleep(20);\n digitalWrite(TRIG,0);\n\n while(digitalRead(OUT) == 0);\n\n start = micros();\n\n while(digitalRead(OUT) == 1);\n\n travel = micros() - start;\n\n dis = travel / 58;\n \n return dis;\n }\n */\n\n \n \n"
},
{
"alpha_fraction": 0.6558441519737244,
"alphanum_fraction": 0.7207792401313782,
"avg_line_length": 27,
"blob_id": "41412d295fe361d85b604d8fea2910c63552e20b",
"content_id": "594e346a4c1ec8fdc40eaccfc7de8cc952d82218",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 11,
"path": "/Flask/README.md",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "# Hufs_mp\n> 2020-1 Hufs Microprocessor Design & Application <br>\n> 2020-1 마이크로프로세서 설계 및 응용 Project입니다. <br>\n\n### Flask\n* 설치 프로그램\n> 1. sudo apt-get install python3-dev\n> 2. sudo apt-get install python3-pip\n> 3. sudo pip3 install wiringpi\n> 4. sudo pip3 install flask\n> 5. sudo pip3 install flask-socketio\n"
},
{
"alpha_fraction": 0.6553910970687866,
"alphanum_fraction": 0.6948555111885071,
"avg_line_length": 31.272727966308594,
"blob_id": "d8083a477479af532dd1c0ab5d06bda3e951a643",
"content_id": "cb624d65a529b2ee9085f2b8efcfbe60e06dbe64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2185,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 44,
"path": "/Homework/README.md",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "# Hufs_mp\n## 2020-1 Hufs Microprocessor Design & Application <br>\n## 2020-1 마이크로프로세서 설계 및 응용 Project입니다. <br>\n## Edge Computing과 IoT Smart Server를 다룹니다. <br>\n\n<br>\n\n[4. Quiz 4](#Quiz-4) \n[10. Quiz 10](#Quiz-10) \n[11. Quiz 11](#Quiz-11) \n\n\n\n<br>\n\n### Quiz 4\n* Polling 방식으로 Switch Sensor를 0.1초 간격으로 센싱하여 Switch Sensor가 눌리면 \"Pressed.\\n\"를 출력하고, Buzzer를 1초 동안 울린다. (Pressed는 눌릴 때만 출력한다)\n* 수행을 시작한 이후로, 스위치 센서가 눌린 횟수와 Buzzer가 울린 횟수를 Buzzer가 꺼질 때 마다 출력하시오.\n* 스위치 센서가 눌린 횟수는 interrupt handler에서 센다.\n* 예를 들어, 스위치 센서가 5번 눌리고, Buzzer가 3번 울렸다면, 다음과 같이 출력된다:\n* \"Pressed 5 times and Buzzed 3 times.\"\n<br>\n\n### Quiz 10\n* 웹브라우저에서 Lazer의 상태를 나타내는 이미지와 Ultrasonic Sensor의 상태를 나타내는 이미지를 표시 (HTML)\n* Lazer 상태 이미지 밑에는 버튼이 하나 있다(HTML). 처음에 버튼에는 On이라고 쓰여 있다. On 버튼을 누르면, Lazer가 켜지고, 버튼의 글씨가 Off로 바뀐다. Off 버튼을 누르면 Lazer가 꺼지고 버튼의 글씨가 On으로 바뀐다.(Javascript, XHR, PHP, C executable)\n* Lazer 상태 이미지는 처음에 꺼진 이미지 였다가, Lazer가 켜지면 Lazer가 켜진 이미지로, 꺼지면 꺼진 이미지로 바뀐다 (Javascript, HTML).\n* Ultrasonic 상태 이미지는 측정한 거리 값(cm)이 20cm이상이면 3번 이미지로, 10cm 이상~20cm 미만이면 2번 이미지, 10cm 미만이면 1번 이미지로 바뀐다 (Javascript)\n* Ultrasonic 센싱은 0.5초 간격으로 주기적으로 한다 (Javascript, XHR, PHP, C executable).\n<br>\n\n### Quiz 11\n* 참조: ksaehwa_jQueryAjaxPlotlyEx.zip\n - Web Push (Alert) with jQuery Ajax\n* Touch 센서 구현을 참조하여 switch 센서 alert를 구현(Jquery Ajax 이용)한다.\n* 소스 코드 확장자 이름이 제대로 되어 있어야 함. \n\n - quiz11.html\n\n - iot_quiz11.js\n\n - remote_con_quiz11.php\n\n - switch_alert_php.c"
},
{
"alpha_fraction": 0.5559566617012024,
"alphanum_fraction": 0.570397138595581,
"avg_line_length": 20.30769157409668,
"blob_id": "412686125ba0fd7769748883c56b2027b76dc3eb",
"content_id": "fdf7e830134648e2316f5ab844a399ace143c1e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 13,
"path": "/Homework/HW#11/home_pi_jslee/php_lightLog.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n\n#define BUF_SIZE 100\n\nint main(void){\n FILE *fp;\n char str[BUF_SIZE];\n fp = fopen(\"/home/pi/ksaehwa/log/lightLog.txt\", \"r\");\n if (fp == NULL) printf(\"file open error!!\");\n fread(str, 1, BUF_SIZE, fp);\n printf(\"%s\", str);\n fclose(fp);\n}\n"
},
{
"alpha_fraction": 0.6419270634651184,
"alphanum_fraction": 0.66796875,
"avg_line_length": 20.94285774230957,
"blob_id": "66cd00de1c29a741ca1bf61e8dad637943165023",
"content_id": "f8a91a4dcb4966cc4d7c5b1bfae7c536512e8351",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 35,
"path": "/Homework/HW#03/##3_2_Interrupt_Counting_Touch_Sensor_and_LED.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UHC",
"text": "#include <wiringPi.h>\n#include <stdio.h>\n\n#define TOUCH_PIN 6\n#define LED_PIN 7\n\n#interrupt, touch_cnt == led_cnt\n\nvoid edge_rise(void);\n\nint touch_cnt = 0, led_cnt = 0;\n\nint main(void){\n if(wiringPiSetup() == -1) return 1;\n pinMode(TOUCH_PIN,INPUT);\n pinMode(LED_PIN, OUTPUT);\n\twiringPiISR(TOUCH_PIN, INT_EDGE_RISING, edge_rise);\n\t\n\twhile(1){\n\t\tdelay(10000);\n\t}\n}\n\n\nvoid edge_rise(void){\n\tprintf(\"Pressed.\\n\");\n\ttouch_cnt++;\n\tdigitalWrite(LED_PIN,HIGH);\n\tdelay(500);\n\tdigitalWrite(LED_PIN,LOW);\n\tled_cnt++;\n\tprintf(\"Pressed %d times and Lightened %d times\\n\", touch_cnt, led_cnt); //I/O buffer 개행문자 만나야 찍기 시작(어느정도 찰때 까지) \n\t//fflush(0) 32번째줄에 개행 안쓰고 싶으면. fflush(버퍼를 비워라) , printf부른다음 바로 비워줘야함. 바로바로output하려면 fflush. \n\t//file descripter : 0(화면), 1(keyboard)\n}\n"
},
{
"alpha_fraction": 0.6145833134651184,
"alphanum_fraction": 0.6354166865348816,
"avg_line_length": 16.545454025268555,
"blob_id": "b097d171698ae9253797ffc5952eb2f6a1a13c4f",
"content_id": "72174d3f9d6116f719c7dc2bc9e26d001ad5b132",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 11,
"path": "/Homework/HW#10/laserOn.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <wiringPi.h>\n#define PIN 22\n\nint main(void){\n if(wiringPiSetup() == -1) return 1;\n pinMode(PIN,OUTPUT);\n\n digitalWrite(PIN,HIGH);\n printf(\"Laser On\");\n}"
},
{
"alpha_fraction": 0.6321614384651184,
"alphanum_fraction": 0.6751301884651184,
"avg_line_length": 27.98113250732422,
"blob_id": "1c6fa0a0630e5085b8d294383e0f120388ab1e51",
"content_id": "d3d129ffba13109e2afd442ffe66120db1779c47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1536,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 53,
"path": "/Flask/flask_websocket_sensor_control_HBE/templates/gauge.js",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "Gauge = function(id) {\n\t//canvas initialization\n\tvar canvas=document.getElementById(id);\n\tvar ctx = canvas.getContext(\"2d\");\n\t\n\t//dimensions\n\tvar W = canvas.width;\n\tvar H = canvas.height;\n\t\n\t//Variables\n\tvar data =30;\n\tvar degrees = 45;\n\tvar new_degrees = 0;\n\tvar difference = 0;\n\tvar color = \"lightgreen\"; //green looks better to me\n\tvar bgcolor = \"lightgrey\";\n\tvar text;\n\n\tthis.draw=function(data) {\n\t\tvar d=parseInt(data)\n\t//Clear the canvas everytime a chart is drawn\n\tctx.clearRect(0, 0, W, H);\n\t\t\n\t//Background 360 degree arc\n\tctx.beginPath();\n\tctx.strokeStyle = bgcolor;\n\tctx.lineWidth = 20;\n\tctx.arc(W/2, H/2, 70, -225/180*Math.PI, 45/180*Math.PI, false); //you can see the arc now\n\tctx.stroke();\n\t\t\n\t//gauge will be a simple arc\n\t//Angle in radians = angle in degrees * PI / 180\n\tctx.beginPath();\n\tctx.strokeStyle = color;\n\tctx.lineWidth = 20;\n\t//The arc starts from the rightmost end. If we deduct 90 degrees from the angles\n\t//the arc will start from the topmost end\n val_max = 10; //1500\n\tctx.arc(W/2, H/2, 70, - 225*Math.PI/180, (-225+270/val_max*d)/180*Math.PI, false); \n\t//you can see the arc now\n\tctx.stroke();\n\t\n\t\tctx.fillStyle = color;\n\t\tctx.font = \"15px Arial\";\n\t\ttext = data + \" Level\";\n\t\t//Lets center the text\n\t\t//deducting half of text width from position x\n\t\ttext_width = ctx.measureText(text).width;\n\t\t//adding manual value to position y since the height of the text cannot\n\t\t//be measured easily. There are hacks but we will keep it manual for now.\n\t\tctx.fillText(text, W/2 - text_width/2, H/2);\n\t};\n};\n"
},
{
"alpha_fraction": 0.4819934666156769,
"alphanum_fraction": 0.5209167003631592,
"avg_line_length": 18.359155654907227,
"blob_id": "76fdb62cb55dea6c7b5044e86bc218467a3b89d4",
"content_id": "1fc27f76646057638f0a517c74d30dbf78884728",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2749,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 142,
"path": "/Homework/HW#06/##6_stepmotor_ultrasonic_led_buzzer (2).c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <wiringPi.h>\n\n#define LED_PIN 7\n\n#define BUZZER_PIN 15\n\n#define TRIG 28\n#define OUT 29\n\n#define PIN_1A 27\n#define PIN_1B 0\n#define PIN_2A 1\n#define PIN_2B 24\n\nvoid buzzer_on() {\n\tdigitalWrite(BUZZER_PIN,HIGH);\n} \n\nvoid buzzer_off() {\n\tdigitalWrite(BUZZER_PIN,LOW);\n}\n\nvoid led_on() {\n\tdigitalWrite(LED_PIN,HIGH);\n}\n\nvoid led_off() {\n\tdigitalWrite(LED_PIN,LOW);\n}\n\n\nvoid stepD() {\n \tdigitalWrite(PIN_1A,HIGH);\n digitalWrite(PIN_1B,LOW);\n digitalWrite(PIN_2A,LOW);\n digitalWrite(PIN_2B,LOW);\n delay(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,HIGH);\n digitalWrite(PIN_2A,LOW);\n digitalWrite(PIN_2B,LOW);\n delay(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,LOW);\n digitalWrite(PIN_2A,HIGH);\n digitalWrite(PIN_2B,LOW);\n delay(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,LOW);\n digitalWrite(PIN_2A,LOW);\n digitalWrite(PIN_2B,HIGH);\n delay(1000); \n}\n\nint get_distance(){\n\tint dis=0, i;\n long start,travel; \n \n digitalWrite(TRIG,0);\n usleep(2);\n digitalWrite(TRIG,1);\n usleep(20);\n digitalWrite(TRIG,0);\n \n\twhile(digitalRead(OUT) == 0);\n \n\tstart = micros();\n \n\twhile(digitalRead(OUT) == 1);\n \n\ttravel = micros() - start;\n \n\tdis = travel / 58;\n \n\treturn dis;\n}\n\nint main(void){\n int motor_running=1;\n int buzzerOn = 0;\n int ledOn = 0;\n \n if(wiringPiSetup() == -1) return 1;\n \n pinMode(LED_PIN,OUTPUT);\n \n pinMode(BUZZER_PIN,OUTPUT);\n\n pinMode(TRIG,OUTPUT);\n pinMode(OUT,INPUT);\n \n pinMode(PIN_1A,OUTPUT);\n pinMode(PIN_1B,OUTPUT);\n pinMode(PIN_2A,OUTPUT);\n pinMode(PIN_2B,OUTPUT);\n \n stepD();\n while(1) {\n int dis=get_distance();\n if(dis >= 20 && ledOn == 1) {\n \tprintf(\"%dcm >= 20cm. LED Off.\", dis);\n \tledOn = 0;\n \tled_off();\n\t\t}\n\t\t\n\t\tif(dis >= 10 && buzzerOn == 1) {\n \tprintf(\"%dcm >= 10cm. Buzzer Off.\", dis);\n \tBuzzerOn = 0;\n \tBuzzer_off();\n\t\t}\n\t\t\n if(dis < 30){\n if(motor_running==1){\n printf(\"%dcm < 30cm, Step Moteor Stopped.\\n\", dis);\n motor_running = 0;\n }\n if(dis < 20) {\n \tif(ledOn == 0) {\n \t\tprintf(\"%dcm < 30cm, LED On.\", dis);\n \t\tledOn = 1;\n \t\tled_on();\n\t\t\t\t}\n\t\t\t\tif(dis < 10) {\n\t\t\t\t\tif(buzzerOn == 0) {\n\t\t\t\t\t\tprintf(\"%dcm < 10cm. Buzzer On\", dis);\n\t\t\t\t\t\tbuzzerOn = 1;\n\t\t\t\t\t\tbuzzer_on();\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n }\n else{ \n if(motor_running==0){\n printf(\"%dcm >= 30cm, Step Moteor Resumed.\\n\", dis);\n motor_running = 1;\n }\n stepD(); \n }\n delay(100);\n }\n}\n"
},
{
"alpha_fraction": 0.6565445065498352,
"alphanum_fraction": 0.6586387157440186,
"avg_line_length": 21.738094329833984,
"blob_id": "139249f96d041eecd4600c2dea43e7263fa86005",
"content_id": "a5888702c0cc17c126f5026876e090cbc1c6d21a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 42,
"path": "/Homework/HW#12/templates/iot_with_flask.js",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "function measurement_start() {\n\tconsole.log('start...');\n\tmeasurementCnt = document.getElementById(\"measurementCnt\");\n\tsocket.emit('measurement_start', {iterations: parseInt(measurementCnt.value)});\n}\n\nvar socket = io.connect('http://' + document.domain + ':' + location.port);\n\nsocket.on('connect', function() {\n\tconsole.log('Websocket connected!');\n});\n\nvar arr_time_vals = [];\nvar arr_sound_vals = [];\n\nsocket.on('msg_from_server', function(msg) {\n\tconsole.log(msg);\n\t\n\tvar logArea = document.getElementById(\"log\"); \n\tvar graphArea = document.getElementById(\"mydiv\");\n\n\tif(msg==null) { \n\t\tlogArea.textContent='';\n\t} \n\telse { \n\t\tlogArea.textContent='time value='+ msg.timeVal + \n\t\t\t\t\t\t\t'\\nsound value=' + msg.soundVal + \n\t\t\t\t\t\t\t'\\n'+logArea.textContent; \n\t}\n\n\tarr_sound_vals.push(msg.soundVal);\n\tarr_time_vals.push(msg.timeVal);\n\n\tvar trace1 = {\n\t\tx : arr_time_vals,\n\t\ty : arr_sound_vals\n\t}\n\n\tvar data = [trace1]\n\n\tPlotly.newPlot('myDiv', data)\n});\n"
},
{
"alpha_fraction": 0.624495267868042,
"alphanum_fraction": 0.6514132022857666,
"avg_line_length": 19.08108139038086,
"blob_id": "ebee5907a73db712b03380165bdf7b9c806219a0",
"content_id": "84889d27418cd667d85341ae2341fbe8fa31275c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 743,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 37,
"path": "/Homework/HW#04/##4_Counting_Switch_Sensor_and_Buzzer.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "//polling switch&buzzer\n//Switch -> 0 : pressed, 1 : unpressed\n#include <wiringPi.h>\n#include <stdio.h>\n\n#define BUZZER_PIN 15\n#define SWITCH_PIN 3\n\nint switchCount = 0;\n\nvoid switchPressed(){\n\tswitchCount++;\t\n}\n\nint main(void){\n\tint buzzerCount = 0;\n\tint sw;\n int prev_sw = 1;\n \n if(wiringPiSetup() == -1) return 1;\n pinMode(SWITCH_PIN,INPUT);\n\tpinMode(BUZZER_PIN,OUTPUT);\n\twiringPiISR(SWITCH_PIN, INT_EDGE_FALLING, switchPressed);\n\n\twhile(1){\n\t\tsw = digitalRead(SWITCH_PIN);\n\t\tif(sw == 0 && prev_sw == 1){\n\t\t\tdigitalWrite(BUZZER_PIN,HIGH);\n\t\t\tdelay(1000);\n\t\t\tdigitalWrite(BUZZER_PIN,LOW);\n\t\t\tbuzzerCount++;\n\t\t\tprintf(\"Pressed %d times and Buzzed %d times\\n\", switchCount, buzzerCount);\n\t\t}\n\t\tdelay(100);\n\t\tprev_sw = sw;\n\t}\n}\n"
},
{
"alpha_fraction": 0.6320754885673523,
"alphanum_fraction": 0.6792452931404114,
"avg_line_length": 18.18181800842285,
"blob_id": "d79a42ee2ff0c3aba6b946520a0ea3647b689d39",
"content_id": "8c8c012a1ce7a08aea9ca3e6663aaa033998d9ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 256,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 11,
"path": "/README.md",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "# Hufs_mp\n> 2020-1 Hufs Microprocessor Design & Application <br>\n> 2020-1 마이크로프로세서 설계 및 응용 Project입니다. <br>\n> Edge Computing과 IoT Smart Server를 다룹니다. <br>\n\n<br>\n\n### Flask\n* \n### Sensor & Actuator control\n* \n"
},
{
"alpha_fraction": 0.5375722646713257,
"alphanum_fraction": 0.5578034520149231,
"avg_line_length": 14,
"blob_id": "39749edc31bc9b3f39536390f7eec17c24801fa3",
"content_id": "edd1a22c5c31ea7c368e5cd75cae126dc78d3d7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 346,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 23,
"path": "/Homework/HW#11/home_pi_jslee/php_touchAlert.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <wiringPi.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#define PIN 6\nvoid edge_rise(void);\n\nint main(void){\n \n if(wiringPiSetup() == -1) return 1;\n pinMode(PIN,INPUT);\n\twiringPiISR(PIN, INT_EDGE_RISING, edge_rise);\n while(1) {\n \t sleep(1);\n\t}\n\n}\n\n\nvoid edge_rise(void){\t\n printf(\"1\");\n exit(0);\n}\n\n"
},
{
"alpha_fraction": 0.5083159804344177,
"alphanum_fraction": 0.5519750714302063,
"avg_line_length": 17.631067276000977,
"blob_id": "8727cadfaa864299cef0f599fc9ba259ab07e49f",
"content_id": "e44ece0ba176aa9d9d2899096420188e2adc7f0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1942,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 103,
"path": "/Homework/HW#05/##5_stepmotor_ultrasonic.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UHC",
"text": "#include <stdio.h>\n#include <wiringPi.h>\n\n#define TRIG 28\n#define OUT 29\n\n#define PIN_1A 27\n#define PIN_1B 0\n#define PIN_2A 1\n#define PIN_2B 24\n\nvoid motor() {\n\tdigitalWrite(PIN_1A,HIGH);\n digitalWrite(PIN_1B,LOW);\n\tdigitalWrite(PIN_2A,LOW);\n\tdigitalWrite(PIN_2B,LOW);\n usleep(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,HIGH);\n digitalWrite(PIN_2A,LOW);\n digitalWrite(PIN_2B,LOW);\n usleep(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,LOW);\n digitalWrite(PIN_2A,HIGH);\n digitalWrite(PIN_2B,LOW);\n usleep(1000);\n digitalWrite(PIN_1A,LOW);\n digitalWrite(PIN_1B,LOW);\n digitalWrite(PIN_2A,LOW);\n digitalWrite(PIN_2B,HIGH);\n usleep(1000);\n}\n\nint main(void){\n int dis=0, i;\n long start,travel;\n int i=0;\n\n if(wiringPiSetup() == -1) return 1;\n\n pinMode(TRIG,OUTPUT);\n pinMode(OUT,INPUT);\n \n pinMode(PIN_1A,OUTPUT);\n pinMode(PIN_1B,OUTPUT);\n pinMode(PIN_2A,OUTPUT);\n pinMode(PIN_2B,OUTPUT);\n\n\t\t\t\n while(1) {\t\n \tdigitalWrite(TRIG,0);\n usleep(2);\n digitalWrite(TRIG,1);\n usleep(20);\n digitalWrite(TRIG,0);\n\n while(digitalRead(OUT) == 0);\n\n start = micros();\n\n while(digitalRead(OUT) == 1);\n\n travel = micros() - start;\n\n dis = travel / 58;\n\n if (dis < 30) {\n printf(\"%dcm < 30cm, Step Mottor Stopped\", dis);\n // motor()를 안부르면 안돌아감 \n \t\n\t\t}\n\t\telse {\n\t\t\tmotor();\t\n\t\t\tprint(\"%dcm >= 30cm, Step Motter Resumed\", dis);\n\t\t}\n delay(100);\n } \n}\n\n\n\n/*\nint ultrasonic() {\ndigitalWrite(TRIG,0);\n usleep(2);\n digitalWrite(TRIG,1);\n usleep(20);\n digitalWrite(TRIG,0);\n\n while(digitalRead(OUT) == 0);\n\n start = micros();\n\n while(digitalRead(OUT) == 1);\n\n travel = micros() - start;\n\n dis = travel / 58;\n \n return dis;\n }\n */\n\n \n \n"
},
{
"alpha_fraction": 0.4558434784412384,
"alphanum_fraction": 0.48175567388534546,
"avg_line_length": 28.546875,
"blob_id": "c2e1baa0feae76ddc39af5c570532964a3188576",
"content_id": "23df4c1de4ed4d98766728dd7966af04dfaca69c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1893,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 64,
"path": "/Homework/HW#12/app.py",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nfrom flask_socketio import SocketIO, emit\nimport wiringpi as wp\nimport datetime\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\n\nSPI_CH = 0\nADC_CH_SOUND = 0\nADC_CS = 29\nSPI_SPEED = 500000\nwp.wiringPiSetup()\nwp.wiringPiSPISetup(SPI_CH, SPI_SPEED)\nwp.pinMode(ADC_CS, 1)\n\n#=======================================================================\n#\t\t\t\tindex.html\n#=======================================================================\[email protected]('/')\ndef index():\n \"\"\"Serve the index HTML\"\"\"\n return render_template('index.html')\n\n\[email protected]('/iot_with_flask.js')\ndef gauge():\n return render_template('iot_with_flask.js')\n\n#=======================================================================\n#\t\t\t\tsocketio ( websocket ) \n#=======================================================================\[email protected]('measurement_start')\ndef on_create(data):\n print(\"measurement_start\")\n it=data['iterations']\n\n # sound.c -> .py\n for i in range(it):\n wp.digitalWrite(ADC_CS, 1)\n buf = bytearray(3)\n buf[0] = 0x06 | ((ADC_CH_SOUND & 0x04)>>2)\n buf[1] = ((ADC_CH_SOUND & 0x03)<<6)\n buf[2] = 0x00\n wp.digitalWrite(ADC_CS,0)\n ret_len, buf = wp.wiringPiSPIDataRW(SPI_CH, bytes(buf))\n buf = bytearray(buf)\n buf[1] = 0x0F & buf[1]\n #value=(buf[1] << 8) | buf[2]\n sound_val = int.from_bytes(bytes(buf), byteorder='big')\n wp.digitalWrite(ADC_CS,1)\n print(\"sound value=\", sound_val)\n\n\n now = datetime.datetime.now()\n time_val = now.strftime(\"%H:%M:%S\")\n\n socketio.sleep(1) # 1초 sleep\n emit('msg_from_server', {'soundVal': sound_val, 'timdVal' : time_val})\n\n#======================================================================= \nif __name__ == '__main__':\n socketio.run(app, host='0.0.0.0', port=5555, debug=False)\n"
},
{
"alpha_fraction": 0.5406162738800049,
"alphanum_fraction": 0.5910364389419556,
"avg_line_length": 22.799999237060547,
"blob_id": "048387aea49de63b4119427b574396646287a021",
"content_id": "1febecc3efad50ec146f1c84b552f6d2a2723710",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 798,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 30,
"path": "/Homework/HW#01/##1_Touch_Sensor_and_LED.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UHC",
"text": "// Polling 방식\n// delay(100) : 0.1초 : 10^-3 \n#include <stdio.h>\n#include <wiringPi.h>\n\n#define PIN_LED 7\n#define PIN_TOUCH 6\n\nint main(void) {\n\tint touch = 0, prev_touch = 0, i, j;\n\t\n\tif (wiringPiSetup() == -1) return 1;\n\tpinMode(PIN_TOUCH, INPUT);\n\tpinMode(PIN_LED, OUTPUT);\n\t\n\twhile(1) {\n\t\ttouch = digitalRead(PIN_TOUCH);\t\t// 누르면 1, 안누르면 0 -> 이거 자체가 Polling \n\t\tif(touch == 1 && prev_touch == 0) {\n\t\t\tprintf(\"Pressed\\n\", touch)\n\t\t\tfor(j = 0; j < 3; j++) {\n\t\t\t\tdigitalWrite(PIN_LED, HIGH);\n\t\t\t\tdelay(500);\n\t\t\t\tdigitalWrite(PIN_LED, LOW);\n\t\t\t\tdelay(500);\n\t\t\t}\n\t\t}\n\t\tprev_touch = touch;\t\t// 때면 touch가 0이되서 prev_touch가 0 -> 다시 while문 돌때 touch = 1, prev_touch = 0 \n\t\tdelay(100);\t\t// 0.1초 간격으로 touch 값을 주기적으로 읽게 됨 \n\t}\n}\n"
},
{
"alpha_fraction": 0.3811053931713104,
"alphanum_fraction": 0.4190231263637543,
"avg_line_length": 27.290908813476562,
"blob_id": "315af0f24ae8fe17aab9154cf31c6ae93dc51c50",
"content_id": "02ad851a85aa00a434a99d6e29d8fd5e072cddde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1556,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 55,
"path": "/Homework/HW#11/home_pi_jslee/lightLogDaemon.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <wiringPi.h>\n#include <string.h>\n\n#define SPI_CH 0\n#define ADC_CH 0\n#define ADC_CS 29\n#define SPI_SPEED 500000\n#define LOG_SIZE 5\n#define BUF_SIZE 100\n\nint main(void){\n FILE *fp;\n int value=0, i=0, j;\n unsigned char buf[3];\n char log[LOG_SIZE];\n char str[BUF_SIZE];\n char str_temp[BUF_SIZE];\n\n if(wiringPiSetup() == -1) return 1;\n if(wiringPiSPISetup() == -1) return -1;\n pinMode(ADC_CS,OUTPUT);\n for(i=0; i<BUF_SIZE; i++) str[i]='\\0';\n i=0;\n\n while(1){\n buf[0] = 0x06 | ((ADC_CH & 0x04)>>2);\n buf[1] = ((ADC_CH & 0x03)<<6);\n buf[2] = 0x00;\n digitalWrite(ADC_CS,0);\n wiringPiSPIDataRW(SPI_CH,buf,3);\n buf[1]=0x0F & buf[1];\n value=(buf[1] << 8) | buf[2];\n digitalWrite(ADC_CS,1);\n log[i] = value;\n printf(\"log[%d]=%d\\n\", i, log[i]);\n i = (i+1) % LOG_SIZE;\n \n if(i==0) {\n sprintf(str, \"\");\n for(j=0; j<LOG_SIZE;j++) { \n sprintf(str_temp, \"%d\", log[j]);\n strcat(str, str_temp);\n if (j != LOG_SIZE-1)\n strcat(str, \", \");\n }\n fp = fopen(\"./log/lightLog.txt\", \"w\");\n if (fp == NULL) printf(\"File Open Error!\\n\");\n fprintf(fp, \"%s\", str);\n fclose(fp);\n printf(\"%s\\n\", str);\n }\n\t delay(500);\n }\n}\n"
},
{
"alpha_fraction": 0.5243847966194153,
"alphanum_fraction": 0.5731543898582458,
"avg_line_length": 23.29347801208496,
"blob_id": "d3e872901b7d1b8e846ccfea2c364168e1dd2f21",
"content_id": "9ac7da68bfb98a12b9a66533dd979bc61cb6d8f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2429,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 92,
"path": "/Homework/HW#05/##5_stepmotor_ultrasonic (2).c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UHC",
"text": "//mp5\n//프로그램을 수행하기 시작하면 step motor는 정상 회전으로 계속 돌아가야 한다. \n//Ultrasonic Sensor로 주변과의 거리 x를 0.1s 간격으로 측정한다. \n//측정된 거리 x가 30cm 미만이면 'xcm < 30cm, Step Motor Stopped.' 라고 출력하고, step motor를 중지시킨다.\n//(예) 25cm < 30cm, Step Motor Stopped.\n//Step motor가 중지되어 있다가 거리 x가 30cm 이상이 되면 'xcm >= 30cm, Step Motor Resumed.' 라고 출력하고, 다시 step motor가 돌아간다. \n//(예) 40cm >= 30cm, Step Motor Resumed.\n\n#include <stdio.h>\n#include <wiringPi.h>\n#define TRIG 28\n#define OUT 29\n#define PIN_1A 27\n#define PIN_1B 0\n#define PIN_2A 1\n#define PIN_2B 24\n#define DELAY 8000 //7000 //2000\n\nvoid stepMotor() {\n\tdigitalWrite(PIN_1A, HIGH);\n\tdigitalWrite(PIN_1B, LOW);\n\tdigitalWrite(PIN_2A, LOW);\n digitalWrite(PIN_2B, LOW);\n delay(1000);\n digitalWrite(PIN_1A, LOW);\n digitalWrite(PIN_1B, HIGH);\n digitalWrite(PIN_2A, LOW);\n digitalWrite(PIN_2B, LOW);\n delay(1000);\n digitalWrite(PIN_1A, LOW);\n digitalWrite(PIN_1B, LOW);\n digitalWrite(PIN_2A, HIGH);\n digitalWrite(PIN_2B, LOW);\n delay(1000);\n digitalWrite(PIN_1A, LOW);\n digitalWrite(PIN_1B, LOW);\n digitalWrite(PIN_2A, LOW);\n digitalWrite(PIN_2B, HIGH);\n delay(1000);\n}\n\nint ultrasonic(){\n int dis = 0;\n long start, travel; \n \n digitalWrite(TRIG,0);\n usleep(2);\n digitalWrite(TRIG,1);\n usleep(20);\n digitalWrite(TRIG,0);\n \n while(digitalRead(OUT) == 0);\n start = micros();\n while(digitalRead(OUT) == 1);\n \n\ttravel = micros() - start;\n dis = travel / 58;\n \n\treturn dis;\n}\n\nint main(void){\n int motor_running = 1;\n \n if(wiringPiSetup() == -1) return 1;\n pinMode(TRIG,OUTPUT);\n pinMode(OUT,INPUT);\n pinMode(PIN_1A,OUTPUT);\n pinMode(PIN_1B,OUTPUT);\n pinMode(PIN_2A,OUTPUT);\n pinMode(PIN_2B,OUTPUT);\n \n stepMotor();\n while(1) {\n dis = ultrasonic();\n \n if(dis < 30) {\n if(motor_running == 1) {\n printf(\"%dcm < 30cm, Step Moteor Stopped.\\n\", dis);\n motor_running = 0;\n }\n }\n else { \n \tif(motor_running == 0) {\n \tprintf(\"%dcm >= 30cm, Step Moteor Resumed.\\n\", dis);\n motor_running = 1;\n }\n stepMotor(); \n }\n delay(100);\n }\n}\n"
},
{
"alpha_fraction": 0.4834054708480835,
"alphanum_fraction": 0.5108225345611572,
"avg_line_length": 20.65625,
"blob_id": "da6c66edf3de728820bccd7e2cb2908d11669a4e",
"content_id": "13be09370dbc3074996ba940f9073359bdbc2788",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 32,
"path": "/Homework/HW#01/##1_1_Touch_Sensor_and_LED.c",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "// Polling ��� \n#include <stdio.h>\n#include <wiringPi.h>\n\n#define TOUCH_PIN 6\n#define LED_PIN 7\n\nint main(void) {\n\tint prev_touch = 0, touch, i, led_state = 0\n\t\n\tif(wiringPiSetup() == -1) return 1;\n\tpinMode(TOUCH_PIN, INPUT);\n\tpinMode(LED_PIN, OUTPUT);\n\t\n\twhile(1) {\n\t\ttouch = digitalRead(TOUCH_PIN);\t\t // digitalRead�� �� ���� ������ 1, ���� 0 \n\t\tif(touch && !prev_touch) {\t\t\t // touch�� 1�̰� prev_touch�� 1�̸� �� ������ �ִ°� \n\t\t\tprintf(\"PRESSED\\n\");\n\t\t\tif(led_state == 0) {\n\t\t\t\tdigitalWrite(LED_PIN, HIGH);\n\t\t\t\tled_state == 1;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tdigitalWrite(LED_PIN, LOW);\n\t\t\t\tled_state == 0;\n\t\t\t}\n\t\t}\n\t\t// ���⼭ ���� 0.1�� �������� �ֱ������� ����\t\t\n\t\tprev_touch = touch;\n\t\tdelay(100);\n\t}\n}\n"
},
{
"alpha_fraction": 0.49559471011161804,
"alphanum_fraction": 0.5088105797767639,
"avg_line_length": 22.947368621826172,
"blob_id": "962ac7ccdd98de66497e2bc3263c2f96a2e05d60",
"content_id": "0025544f582bf8796814df8fb963fbbabc738fae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 454,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 19,
"path": "/Homework/HW#10/remote_con.php",
"repo_name": "JaesungLeee/Hufs_mp",
"src_encoding": "UTF-8",
"text": "<?php\n if(isset($_GET['ultraSonic'])) {\n $ret = shell_exec('/home/pi/jaesung/US 2>&1');\n echo $ret;\n }\n\n else if(isset($_GET['LASERON'])) {\n $ret = shell_exec('/home/pi/jaesung/laserOn 2>&1');\n echo $ret;\n }\n\n else if(isset($_GET['LASEROFF'])) {\n $ret = shell_exec('/home/pi/jaesung/laserOff 2>&1');\n echo $ret;\n }\n\n else {\n echo \"You entered wrong parameter. Please try again.\"\n }"
}
] | 28 |
ara-software/authorlist
|
https://github.com/ara-software/authorlist
|
a2cb8795a19556fcbdee70d6cb8bd993b15b42cf
|
2274bdee9ad41fb2dbd75737b165b2426946d733
|
f855286bbd8862b142f0def9a56766862476d5af
|
refs/heads/master
| 2023-08-19T00:21:44.569025 | 2023-08-16T18:12:11 | 2023-08-16T18:12:11 | 208,102,633 | 0 | 0 | null | 2019-09-12T17:05:27 | 2019-09-24T02:55:53 | 2019-09-24T15:16:35 |
Python
|
[
{
"alpha_fraction": 0.7058081030845642,
"alphanum_fraction": 0.7083333134651184,
"avg_line_length": 34.772727966308594,
"blob_id": "e6870c22246000ef2e6991e5f22ba72ac15dc9b9",
"content_id": "98c5838a5d91ff8db25d81316297619bfd9c5192",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 792,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 22,
"path": "/Makefile",
"repo_name": "ara-software/authorlist",
"src_encoding": "UTF-8",
"text": "\n.PHONY: all \n\ntgts=output/ara_authors.html output/ara_authors.txt output/ara_authors_revtex.tex output/ara_institutes_revtex.tex output/ara_elsarticle_authors.tex output/ara_icrc_authors.tex output/ara_pos_authors.tex\n\nall: index.html \n\nclean: \n\t@rm -rf output \n\t@rm -f index.html \n\n$(tgts): authors_in.yaml institutes_in.yaml ara_author_tool.py | output\n\t@echo Running ara_author_tool.py\n\t@./ara_author_tool.py output/ara_ \n\noutput: \n\t@mkdir -p $@\n\nindex.html: output/ara_authors.html \n\t@echo \"<!DOCTYPE html><html><head><title>ara Author List</title></head> <body><h1 align='center'>ara Author List</h1><hr/>\" > $@\n\t@cat $^ >> $@ \n\t@echo \"</body></html>\" >> $@\n\t@echo \"Please considering committing/pushing your index.html if it differs from https://ara-software.github.io/authorlist\" \n\t\n\n"
},
{
"alpha_fraction": 0.6654943227767944,
"alphanum_fraction": 0.6693311333656311,
"avg_line_length": 36.09738540649414,
"blob_id": "02b40d0c25b2dc6a361b642dd4c36a829c37758f",
"content_id": "0412f72489750e873062ded44891c45daae8dc78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15638,
"license_type": "no_license",
"max_line_length": 325,
"num_lines": 421,
"path": "/ara_author_tool.py",
"repo_name": "ara-software/authorlist",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n## ARA Author Tool to save time on author lists... \n# Cosmin Deaconu <[email protected]>\n# apologies for the semicolons, it's a reflex at this point... \n# This is about as brute force as it gets :)\n\nimport sys\nimport datetime\ntry:\n import yaml\nexcept ImportError:\n raise ImportError('yaml is not installed')\n\nprefix = \"ara_\" #prefix for all output files (first argument overrideS) \ncollaboration = \"ARA\" # (second argument overrides) \n\n\nif len(sys.argv) > 1: \n prefix = sys.argv[1] \n\nif len(sys.argv) > 2: \n collaboration = sys.argv[2] \n\n\n## may need to do more here! \ndef tex_escape(string): \n\n return string.replace(\"&\",\"\\&\")\n\ndef html_escape(string):\n\n return string.replace(\"&\",\"&\") \n\n\n\n\n# Start by opening the institute list (institutes_in.yaml)\n# this is now loaded as a yaml file, which is more hierarchical \n# and requires less manual parsing\ninstitutes = {} \n\nfinst = open(\"institutes_in.yaml\")\nparsed_finst = yaml.safe_load(finst)\nfor inst in parsed_finst:\n\n inst_id = parsed_finst[inst]['instituteid']\n inst_addr = parsed_finst[inst]['address']\n inst_short = parsed_finst[inst]['shortid']\n\n if inst_id in institutes: \n print( \"WARNING: duplicate ID \\\"%s\\\" found! Replacing existing.\" % (inst_id))\n\n institutes[inst_id] = (inst_addr, inst_short) \n\n\n\n# Then open the authors list (authors_in.yaml)\n\nauthors = [] \nsorted_institutes = [] \ninstitute_numbers = {}\n\nfauth = open('authors_in.yaml')\nparsed_fauth = yaml.safe_load(fauth)\nfor author in parsed_fauth:\n authlistname = parsed_fauth[author]['authlistname']\n affiliations = []\n for aff in parsed_fauth[author]['affiliations']:\n if aff not in institutes:\n print(\" WARNING, no key for %s found in institutes.in\" % (aff))\n else:\n if aff not in sorted_institutes: \n sorted_institutes.append(aff)\n institute_numbers[aff] = len(sorted_institutes)\n affiliations.append(aff)\n authorids = {}\n if 'orcid' in parsed_fauth[author]:\n authorids['orcid'] = parsed_fauth[author]['orcid']\n if 'inspireid' in parsed_fauth[author]:\n authorids['inspireid'] = parsed_fauth[author]['inspireid']\n authors.append((authlistname,affiliations,authorids)) \n\n\n# authors.txt \n\nf_authors_txt = open(prefix +\"authors.txt\",\"w\") \n\nfirst = True\nfor author in authors: \n\n if not first: \n f_authors_txt.write(\", \"); \n f_authors_txt.write(author[0] + \" \"); \n\n for aff in author[1]:\n f_authors_txt.write(\"[%d]\" % (institute_numbers[aff]) ); \n\n first = False\n\nf_authors_txt.write(\"\\n\\n\"); \nfor i in range(len(sorted_institutes)): \n f_authors_txt.write(\"%d: %s\\n\"%( i+1, institutes[sorted_institutes[i]][0])) \n\n\nf_authors_txt.close()\n\n\n# authors.html \n\nf_authors_html = open(prefix +\"authors.html\",\"w\") \n\nf_authors_html.write(\"<p align='center'>\") \nfirst = True\nfor author in authors: \n\n if not first: \n f_authors_html.write(\", \\n\"); \n f_authors_html.write(author[0]); \n\n f_authors_html.write(\"<sup>\"); \n first_aff = True\n for aff in author[1]:\n if not first_aff:\n f_authors_html.write(\",\"); \n f_authors_html.write(\"<a href='#%s'>%d</a>\" % (aff, institute_numbers[aff]) ); \n\n first_aff = False \n f_authors_html.write(\"</sup>\"); \n\n first = False\n\nf_authors_html.write(\"<br>(<b>%s Collaboration</b>)\\n\" % (collaboration)); \nf_authors_html.write(\"</p>\\n\\n\"); \nfor i in range(len(sorted_institutes)): \n f_authors_html.write(\"<br> <a name='%s'\\\\> <sup>%d</sup> %s\\n\"%(sorted_institutes[i], i+1, html_escape(institutes[sorted_institutes[i]][0]))) \n\n\nf_authors_html.close()\n\n\n# revtex_authors.tex \nf_revtex_authors = open(prefix + \"revtex_authors.tex\",\"w\")\nf_revtex_authors.write(\"%% Collaboration author file for %s in revtex format\\n\" % (collaboration)) \nf_revtex_authors.write(\"%% \\\\input this file in main body (make sure you also do the institutes file in the preamble!) \\n\\n\" ) \n\nfor author in authors: \n name = author[0].replace(\" \",\"~\")\n f_revtex_authors.write(\" \\\\author{%s}\" % (name)) \n if author[1] is not None: \n for aff in author[1]: \n f_revtex_authors.write(\"\\\\at%s\" % (aff)) \n f_revtex_authors.write(\"\\n\") \n\nf_revtex_authors.write(\"\\\\collaboration{%s Collaboration}\\\\noaffiliation\\n\" % (collaboration)); \n\nf_revtex_authors.close()\n\n\n# revtex_institutes.tex \nf_revtex_institutes = open(prefix + \"revtex_institutes.tex\",\"w\")\nf_revtex_institutes.write(\"%% Collaboration institute file for %s in revtex format\\n\" % (collaboration)) \nf_revtex_institutes.write(\"%% \\\\input this file in the preamble (make sure you also do the author file in the body!) \\n\\n\") \n\nfor key in sorted_institutes: \n addr = tex_escape(institutes[key][0]) ; \n f_revtex_institutes.write(\"\\\\newcommand{\\\\at%s}{\\\\affiliation{%s}}\\n\" % (key, addr)); \n\nf_revtex_institutes.close()\n\n\n\n#elsarticle_authors.tex \n\nf_elsarticle_authors = open(prefix + \"elsarticle_authors.tex\",\"w\"); \n\nf_elsarticle_authors.write(\"%% authorlist for elsarticle publications for %s collaboration\\n\\n\" % (collaboration) ); \n\nf_elsarticle_authors.write(\"\\\\collaboration{%s Collaboration}\\n\\n\" % (collaboration)); \n\nfor key in sorted_institutes: \n num = institute_numbers[key]; \n addr = tex_escape(institutes[key][0]) ; \n f_elsarticle_authors.write(\"\\\\address[%d]{%s}\\n\" % (num, addr)); \n\nf_elsarticle_authors.write(\"\\n\\n\"); \n\nfor author in authors: \n name = author[0].replace(\" \",\"~\")\n affs = \"\" \n for aff in author[1]: \n if affs != \"\": \n affs += \",\"\n affs += str(institute_numbers[aff])\n f_elsarticle_authors.write(\"\\\\author[%s]{%s}\\n\" % (affs,name)) \n\nf_elsarticle_authors.close()\n\n\n# pos_authors.tex \n\nf_pos_authors = open(prefix +\"pos_authors.tex\",\"w\") \nf_pos_authors.write(\"%% PoS list for %s Collaboration\\n\\n\" % (collaboration)); \nfirst = True\n\nf_pos_authors.write(\"\\\\author{\\n\"); \n\nf_pos_authors.write(\" (%s Collaboration)\\n\" % (collaboration)); \n\nfor author in authors: \n name = author[0].replace(\" \",\"~\")\n if not first: \n f_pos_authors.write(\",\\n\"); \n f_pos_authors.write(\" %s\" % (name)); \n affs = \"\" \n for aff in author[1]: \n if affs != \"\": \n affs += \",\"\n affs += str(institute_numbers[aff])\n \n f_pos_authors.write(\"$^{%s}$\"%(affs))\n first = False\n\nf_pos_authors.write(\"\\n\\\\\\\\\\n\\\\\\\\\\n\");\nfirst = True\nfor i in range(len(sorted_institutes)): \n # if not first:\n # f_pos_authors.write(\",\\n\")\n f_pos_authors.write(\" $^{%d}$%s\\\\\\\\\\n\"%( i+1, tex_escape(institutes[sorted_institutes[i]][0])))\n first = False \n\nf_pos_authors.write(\"\\n}\\n\"); \nf_pos_authors.close()\n\n\n## ICRC authors\nf_icrc_authors = open(prefix + \"icrc_authors.tex\",\"w\"); \nf_icrc_authors.write(\"%% ICRC list for %s Collaboration\\n\\n\" % (collaboration)); \n\n\nfirst = True\nnum_institutes = 0 \nf_icrc_authors.write(\"\\\\noindent\\n\")\nfor author in authors: \n\n name = author[0].replace(\" \",\"~\")\n\n if not first: \n f_icrc_authors.write(\", \\n\"); \n f_icrc_authors.write(name); \n\n first_aff = True\n for aff in author[1]:\n if not first_aff:\n f_icrc_authors.write(\"\\\\textsuperscript{,}\"); \n if institute_numbers[aff] > num_institutes: \n #f_icrc_authors.write(\"\\\\footnote[%d]{%s\\label{inst%d}}\" % (institute_numbers[aff], institutes[aff][0], institute_numbers[aff]))\n f_icrc_authors.write(\"\\\\textsuperscript{%d}\" % (institute_numbers[aff]))\n num_institutes+=1 \n else:\n f_icrc_authors.write(\"\\\\textsuperscript{%d}\" % (institute_numbers[aff]) ); \n first_aff = False\n first = False\n\nf_icrc_authors.write(\"\\n\\\\\\\\\\n\\\\\\\\\\n\")\nfor i in range(len(sorted_institutes)): \n f_icrc_authors.write(\"\\\\textsuperscript{%d} %s\\\\\\\\\\n\"%( i+1, institutes[sorted_institutes[i]][0])) \n\nf_icrc_authors.close()\n\n## author XML file\nf_xml_authors = open(prefix + \"authors.xml\",\"w\")\n\n# initial header info (DO NOT CHANGE)\nf_xml_authors.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n\\n')\n\nf_xml_authors.write('<!DOCTYPE collaborationauthorlist [\\n')\nf_xml_authors.write('<!ELEMENT collaborationauthorlist ( cal:creationDate, cal:publicationReference, cal:collaborations, cal:organizations, cal:authors ) >\\n')\nf_xml_authors.write('<!ATTLIST collaborationauthorlist\\n')\nf_xml_authors.write('\\txmlns:foaf CDATA #FIXED \"http://xmlns.com/foaf/0.1/\"\\n')\nf_xml_authors.write('\\txmlns:cal CDATA #FIXED \"http://inspirehep.net/info/HepNames/tools/authors_xml/\"\\n')\nf_xml_authors.write('>\\n\\n')\n\nf_xml_authors.write('<!ELEMENT cal:creationDate ( #PCDATA ) >\\n')\nf_xml_authors.write('<!ELEMENT cal:publicationReference ( #PCDATA ) >\\n')\n\nf_xml_authors.write('<!-- **************** COLLABORATIONS ********************* -->\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:collaborations ( cal:collaboration+ ) >\\n')\nf_xml_authors.write('\\t\\t<!ELEMENT cal:collaboration ( foaf:name, cal:experimentNumber?, cal:group? ) >\\n')\nf_xml_authors.write('\\t\\t<!ATTLIST cal:collaboration\\n')\nf_xml_authors.write('\\t\\t\\tid ID #REQUIRED\\n')\nf_xml_authors.write('\\t\\t>\\n\\n')\n\nf_xml_authors.write('\\t<!ELEMENT cal:experimentNumber ( #PCDATA ) >\\n\\n')\n\nf_xml_authors.write('\\t<!ELEMENT cal:group ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t\\t<!ATTLIST cal:group\\n')\nf_xml_authors.write('\\t\\twith IDREF #IMPLIED\\n')\nf_xml_authors.write('\\t\\t>\\n\\n')\n\nf_xml_authors.write('<!-- ORGANIZATIONS -->\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:organizations ( foaf:Organization+ ) >\\n')\nf_xml_authors.write('\\t\\t<!ELEMENT foaf:Organization ( cal:orgDomain?, foaf:name, cal:orgName*, cal:orgStatus*, cal:orgAddress?, cal:group? ) >\\n')\nf_xml_authors.write('\\t\\t<!ATTLIST foaf:Organization\\n')\nf_xml_authors.write('\\t\\t\\tid ID #REQUIRED\\n')\nf_xml_authors.write('\\t\\t>\\n\\n')\n\nf_xml_authors.write('\\t\\t<!ELEMENT cal:orgAddress ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t\\t<!ELEMENT cal:orgDomain ( #PCDATA ) >\\n\\n')\n\nf_xml_authors.write('\\t\\t<!ELEMENT cal:orgName ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t\\t<!ATTLIST cal:orgName\\n')\nf_xml_authors.write('\\t\\t\\tsource CDATA \"INTERNAL\"\\n')\nf_xml_authors.write('\\t\\t>\\n\\n')\n\nf_xml_authors.write('\\t\\t<!ELEMENT cal:orgStatus ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t\\t<!ATTLIST cal:orgStatus\\n')\nf_xml_authors.write('\\t\\t\\tcollaborationid IDREF #IMPLIED\\n')\nf_xml_authors.write('\\t\\t>\\n\\n')\n\nf_xml_authors.write('<!-- AUTHORS -->\\n')\nf_xml_authors.write('<!ELEMENT cal:authors ( foaf:Person+ ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT foaf:Person ( foaf:name?, cal:authorNameNative?, foaf:givenName?, foaf:familyName, cal:authorSuffix?, cal:authorStatus?, cal:authorNamePaper, cal:authorNamePaperGiven?, cal:authorNamePaperFamily?, cal:authorCollaboration?, cal:authorAffiliations?, cal:authorids?, cal:authorFunding? ) >\\n\\n')\n\nf_xml_authors.write('\\t<!ELEMENT foaf:familyName ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT foaf:givenName ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT foaf:name ( #PCDATA ) >\\n\\n')\n\nf_xml_authors.write('\\t<!ELEMENT cal:authorNameNative ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorNamePaper ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorNamePaperGiven ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorNamePaperFamily ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorStatus ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorSuffix ( #PCDATA ) >\\n\\n')\n\nf_xml_authors.write('\\t<!ELEMENT cal:authorCollaboration EMPTY >\\n')\nf_xml_authors.write('\\t<!ATTLIST cal:authorCollaboration\\n')\nf_xml_authors.write('\\t\\tcollaborationid IDREF \"c1\"\\n')\nf_xml_authors.write('\\t\\tposition CDATA #IMPLIED\\n')\nf_xml_authors.write('\\t>\\n\\n')\n\nf_xml_authors.write('\\t<!ELEMENT cal:authorAffiliations ( cal:authorAffiliation* ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorAffiliation EMPTY >\\n')\nf_xml_authors.write('\\t<!ATTLIST cal:authorAffiliation\\n')\nf_xml_authors.write('\\t\\torganizationid IDREF #REQUIRED\\n')\nf_xml_authors.write('\\t\\tconnection CDATA \"Affiliated with\"\\n')\nf_xml_authors.write('\\t>\\n\\n')\n\nf_xml_authors.write('\\t<!ELEMENT cal:authorids ( cal:authorid* ) >\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorid ( #PCDATA ) >\\n')\nf_xml_authors.write('\\t<!ATTLIST cal:authorid\\n')\nf_xml_authors.write('\\t\\tsource CDATA #REQUIRED\\n')\nf_xml_authors.write('\\t>\\n')\nf_xml_authors.write('\\t<!ELEMENT cal:authorFunding ( #PCDATA ) >\\n\\n')\n\n\nf_xml_authors.write('\\n]>\\n')\nf_xml_authors.write('<!--\\n')\nf_xml_authors.write('\\tARA author list for INSPIRE.\\n')\nf_xml_authors.write('-->\\n')\n\nf_xml_authors.write('<collaborationauthorlist\\n')\nf_xml_authors.write('\\txmlns:foaf=\"http://xmlns.com/foaf/0.1/\"\\n')\nf_xml_authors.write('\\txmlns:cal=\"http://inspirehep.net/info/HepNames/tools/authors_xml/\">\\n\\n')\n\n# publication specific info - it's expected that the publication reference is entered by hand after generation\nnow = datetime.datetime.now()\nf_xml_authors.write('\\t<cal:creationDate>%s</cal:creationDate>\\n' % now.strftime(\"%Y-%m-%d_%H:%M\"))\nf_xml_authors.write('\\t<cal:publicationReference>ENTER ARXIV URL HERE</cal:publicationReference>\\n\\n')\n\n# collaboration info\nf_xml_authors.write('\\t<cal:collaborations>\\n')\nf_xml_authors.write('\\t\\t<cal:collaboration id=\"c1\">\\n')\nf_xml_authors.write('\\t\\t\\t<foaf:name>ARA Collaboration</foaf:name>\\n')\nf_xml_authors.write('\\t\\t</cal:collaboration>\\n')\nf_xml_authors.write('\\t</cal:collaborations>\\n\\n')\n\n# institution info\nf_xml_authors.write('\\t<cal:organizations>\\n')\nfor aff in sorted_institutes:\n f_xml_authors.write('\\t\\t<foaf:Organization id=\"a%d\">\\n' % institute_numbers[aff])\n f_xml_authors.write('\\t\\t\\t<cal:orgDomain>http://</cal:orgDomain>\\n')\n f_xml_authors.write('\\t\\t\\t<foaf:name>%s</foaf:name>\\n' % institutes[aff][1])\n f_xml_authors.write('\\t\\t\\t<cal:orgName source=\"INTERNAL\">%s</cal:orgName>\\n' % institutes[aff][1])\n f_xml_authors.write('\\t\\t\\t<cal:orgStatus collaborationid=\"c1\">member</cal:orgStatus>\\n')\n f_xml_authors.write('\\t\\t\\t<cal:orgAddress> %s</cal:orgAddress>\\n' % institutes[aff][0])\n f_xml_authors.write('\\t\\t</foaf:Organization>\\n')\nf_xml_authors.write('\\t</cal:organizations>\\n\\n')\n\n# author info\nf_xml_authors.write('\\t<cal:authors>\\n')\nfor author in authors:\n f_xml_authors.write('\\t\\t<foaf:Person>\\n')\n f_xml_authors.write('\\t\\t\\t<foaf:name>%s</foaf:name>\\n' % author[0])\n f_xml_authors.write('\\t\\t\\t<cal:authorNameNative/>\\n')\n f_xml_authors.write('\\t\\t\\t<foaf:givenName>%s</foaf:givenName>\\n' % author[0].split(' ')[0])\n f_xml_authors.write('\\t\\t\\t<foaf:familyName>%s</foaf:familyName>\\n' % author[0].split('. ')[1])\n f_xml_authors.write('\\t\\t\\t<cal:authorSuffix/>\\n')\n f_xml_authors.write('\\t\\t\\t<cal:authorStatus/>\\n')\n f_xml_authors.write('\\t\\t\\t<cal:authorNamePaper>%s</cal:authorNamePaper>\\n' % author[0])\n f_xml_authors.write('\\t\\t\\t<cal:authorNamePaperGiven>%s</cal:authorNamePaperGiven>\\n' % author[0].split(' ')[0])\n f_xml_authors.write('\\t\\t\\t<cal:authorNamePaperFamily>%s</cal:authorNamePaperFamily>\\n' % author[0].split('. ')[1])\n f_xml_authors.write('\\t\\t\\t<cal:authorCollaboration collaborationid=\"c1\" position=\"\"/>\\n')\n f_xml_authors.write('\\t\\t\\t<cal:authorAffiliations>\\n')\n for aff in author[1]:\n f_xml_authors.write('\\t\\t\\t\\t<cal:authorAffiliation organizationid=\"a%d\" connection=\"\"/>\\n' % institute_numbers[aff])\n f_xml_authors.write('\\t\\t\\t</cal:authorAffiliations>\\n')\n if len(author[2]) > 0:\n f_xml_authors.write('\\t\\t\\t<cal:authorids>\\n')\n for key in author[2]:\n if key =='orcid':\n f_xml_authors.write('\\t\\t\\t\\t<cal:authorid source=\"ORCID\">%s</cal:authorid>\\n' % author[2]['orcid'])\n if key == 'inspireid':\n f_xml_authors.write('\\t\\t\\t\\t<cal:authorid source=\"INSPIRE\">%s</cal:authorid>\\n' % author[2]['inspireid'])\n f_xml_authors.write('\\t\\t\\t</cal:authorids>\\n')\n f_xml_authors.write('\\t\\t</foaf:Person>\\n')\nf_xml_authors.write('\\t</cal:authors>\\n')\n\n# end of xml file\nf_xml_authors.write('</collaborationauthorlist>')\nf_xml_authors.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7078260779380798,
"alphanum_fraction": 0.7310144901275635,
"avg_line_length": 32.588233947753906,
"blob_id": "14e0be8f2f12558bd594855dfbed73decd544554",
"content_id": "abdc104d427b537fb14c59fbcc9c0d67d73dea65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1725,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 51,
"path": "/README.md",
"repo_name": "ara-software/authorlist",
"src_encoding": "UTF-8",
"text": "# ARA Author List\n\nThis is a centralized store for ARA author lists. \n\nThere are two files used as input, authors_in.yaml and institutions_in.yaml\n\nRunning `make` will then generate the other files (using a python script). \n\ninstitutions_in.yml defines a mapping of institution id's to addresses, including an optional short name (used for PoS). Use standard YAML syntax, for example:\n\n```yaml\nUC:\n instituteid: UC\n shortid: University of Chicago\n address: Dept. of Physics, Enrico Fermi Institue, Kavli Institute for Cosmological Physics, University of Chicago, Chicago, IL 60637 \n```\n\nThe format of authors_in.yaml is \n\n```yaml\nC. Deaconu:\n authlistname: C. Deaconu\n affiliations: \n - UC\n orcid: 0000-0002-4953-6397\n```\n\nIf the orcid of the author is known, it can also be added. Currently the only\nsupported fields are `authlistname`, `affiliations`, and `orcid`.\nAn author can have more than one affiliation; for example:\n\n```yaml\nD.Z. Besson:\n authlistname: D.Z. Besson\n affiliations: \n - KU\n - Moscow\n orcid: 0000-0001-6733-963X\n```\n\n\n\nOutput is generated in several formats: \n\n - `ara_revtex_authors.tex` and `ara_revtex_institutes.txt` for use with revtex journals\n - `ara_elsarticle_authors.tex` for use with elsevier journals\n - `ara_pos_authors.tex` for use with PoS (a sort of raw format)\n - `ara_icrc_authors.tex` for use with the 2019 ICRC authorlist format. \n - `ara_authors.html` for web display, this is used to generate an index.html that we can use for gh-pages (you should commit this if it changed!) \n - `ara_authors.txt` for text\n - `ara_authors.xml` format for arxiv/inspirehep -- NOTE: the xml file generated uses a placeholder publicationReference which should be changed by hand\n\n\n\n\n\n\n\n\n\n\n\n\n"
}
] | 3 |
GradyLee/learn-python
|
https://github.com/GradyLee/learn-python
|
945b4fa5f185eedc4a36611a072fddd52449abb4
|
0312d9a7885ca3c139b2e8c235d37c092c3afb63
|
93727d8c1ef9473498ea2ae1eca4b73066adf065
|
refs/heads/master
| 2020-09-23T14:03:28.700538 | 2019-12-03T02:52:24 | 2019-12-03T02:52:24 | 225,517,237 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47833332419395447,
"alphanum_fraction": 0.5,
"avg_line_length": 18.387096405029297,
"blob_id": "97bc3bfde573f62922dec3d421f47dea846e7a4e",
"content_id": "686501ff907d7ee6084e95454d42e952aba61475",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 666,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 31,
"path": "/createcounter.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n'''\n利用闭包返回一个计数器函数,每次调用它返回递增整数\n'''\n\ndef createCounter():\n def auto_add():\n n = 0\n while True:\n n = n + 1\n yield n\n \n x = auto_add()\n\n def counter():\n return next(x)\n \n return counter\n\n\n\nif __name__ == \"__main__\":\n counterA = createCounter()\n print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5\n counterB = createCounter()\n if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:\n print('测试通过!')\n else:\n print('测试失败!')"
},
{
"alpha_fraction": 0.3046683073043823,
"alphanum_fraction": 0.3734643757343292,
"avg_line_length": 17.545454025268555,
"blob_id": "a28ccd2e8cb71c9cfe289e7f97285ef94373e2de",
"content_id": "868403bc6d81b01a89cc4d3f11f90f0b5bc347f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 423,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 22,
"path": "/testfuncparam.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n\n\n'''\n测试函数参数组合\n'''\n\ndef f1(a, b, c=0, *args, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)\n\n\nif __name__ == \"__main__\":\n f1(1, 2)\n f1(1, 2, c=3)\n f1(1, 2, 3, 'a', 'b')\n f1(1, 2, 3, 'a', 'b', x=99)\n f2(1, 2, d=99, ext=None)"
},
{
"alpha_fraction": 0.5380200743675232,
"alphanum_fraction": 0.559540867805481,
"avg_line_length": 17.864864349365234,
"blob_id": "67b417f9c50633f537dd672e8a8e44eab201e20b",
"content_id": "a5ccaab355218ab9e8232c37b7f8659042eb4a2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 767,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 37,
"path": "/property.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n'''\n设计一个decorator,它可作用于任何函数上,并打印该函数的执行时间\n'''\nclass Screen(object):\n \n @property\n def width(self):\n return self.__width\n\n @width.setter\n def width(self, value):\n self.__width = value\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, value):\n self.__height = value\n\n @property\n def resolution(self):\n return self.__height * self.__width\n\nif __name__ == \"__main__\":\n s = Screen()\n s.width = 1024\n s.height = 768\n print('resolution =', s.resolution)\n if s.resolution == 786432:\n print('测试通过!')\n else:\n print('测试失败!')"
},
{
"alpha_fraction": 0.3961038887500763,
"alphanum_fraction": 0.42694804072380066,
"avg_line_length": 18.28125,
"blob_id": "29ca54f97eedee7c5d54a910fa413a30159bcaca",
"content_id": "f80ee72f252077fb4f2e2fc18dcbae6d40dd81a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 32,
"path": "/findMinAndMax.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n'''\n使用迭代查找一个list中最小和最大值\n'''\n\ndef findMinAndMax(L):\n if len(L) == 0:\n return (None, None)\n h=L[0]\n l=L[0]\n for i in L:\n if (i >= h):\n h = i\n if (i <= l):\n l = i\n return (l, h)\n \n \n\nif __name__ == \"__main__\":\n if findMinAndMax([]) != (None, None):\n print('测试失败!')\n elif findMinAndMax([7]) != (7, 7):\n print('测试失败!')\n elif findMinAndMax([7, 1]) != (1, 7):\n print('测试失败!')\n elif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):\n print('测试失败!')\n else:\n print('测试成功!')"
},
{
"alpha_fraction": 0.39498433470726013,
"alphanum_fraction": 0.517241358757019,
"avg_line_length": 28.045454025268555,
"blob_id": "0d6503a25a3bc6470d1e8e3728339f78e8348d2f",
"content_id": "8e2d4af634545cf0250cb619790162d0feb32dce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 694,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 22,
"path": "/str2float.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n'''\n利用map和reduce编写一个str2float函数,把字符串'123.456'转换成浮点数123.456\n'''\n\nfrom functools import reduce\n\ndef str2float(s):\n digit = {'1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '0':0}\n\n return reduce(lambda x, y: x * 10 + y, map(lambda a : digit[a], s[0:s.index('.')])) \\\n + 0.1 * reduce(lambda x, y: x * 0.1 + y, map(lambda a : digit[a],s[-1:s.index('.'):-1]))\n \n\nif __name__ == \"__main__\":\n print('str2float(\\'123.456\\') =', str2float('123.456'))\n if abs(str2float('123.456') - 123.456) < 0.00001:\n print('测试成功!')\n else:\n print('测试失败!')"
},
{
"alpha_fraction": 0.5024271607398987,
"alphanum_fraction": 0.5242718458175659,
"avg_line_length": 16.95652198791504,
"blob_id": "cf74184f1ad50a7d7b9ad22acc9b053d8d9a62b1",
"content_id": "be71666c3120131dc249716ea4406cab8bb27528",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 23,
"path": "/normalize.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n'''\n利用map()函数,把用户输入的不规范的英文名字,变为首字母大写,其他小写的规范名字\n'''\n\n\ndef normalize(name):\n if len(name) == 0:\n return None\n s = str.upper(name[0]) + \"\".join(list(map(str.lower, name[1:]))) \n return s\n '''\n return s.capitalize(name)\n '''\n \n \n\nif __name__ == \"__main__\":\n L1 = ['adam', 'LISA', 'barT']\n L2 = list(map(normalize, L1))\n print(L2)"
},
{
"alpha_fraction": 0.41046276688575745,
"alphanum_fraction": 0.48490944504737854,
"avg_line_length": 16.785715103149414,
"blob_id": "fd54830e5856157b4976090973fea1407dccb5a3",
"content_id": "401edadb67c73010150ee1255e3dff1be60c44da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 28,
"path": "/quadratic.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n\n\n'''\n求解一元二次方程ax2+bx+c=0的解\n'''\n\nimport math\n\ndef quadratic(a, b, c):\n t = math.sqrt(b * b - 4 * a * c)\n x1 = (-b + t) / (2 * a)\n x2 = (-b - t) / (2 * a)\n return x1, x2\n\n\nif __name__ == \"__main__\":\n\tprint('quadratic(2, 3, 1) =', quadratic(2, 3, 1))\n\tprint('quadratic(1, 3, -4) =', quadratic(1, 3, -4))\n\n\tif quadratic(2, 3, 1) != (-0.5, -1.0):\n\t\tprint('测试失败')\n\telif quadratic(1, 3, -4) != (1.0, -4.0):\n\t\tprint('测试失败')\n\telse:\n\t\tprint('测试成功')"
},
{
"alpha_fraction": 0.40303030610084534,
"alphanum_fraction": 0.4121212065219879,
"avg_line_length": 20.322580337524414,
"blob_id": "3a7bf462252dfb08aa6073900354e3847809529e",
"content_id": "7436dc09530f99636a6fb08cf1a21a88ef5151c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 31,
"path": "/mytrim.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n'''\n利用slice来实现字符串去除前后的空格\n'''\n\ndef trim(s):\n if s == '':\n return s\n while s != '' and s[0] == ' ':\n s = s[1:]\n while s != '' and s[-1] == ' ':\n s = s[:-1]\n return s\n\nif __name__ == \"__main__\":\n if trim('hello ') != 'hello':\n print('测试失败!')\n elif trim(' hello') != 'hello':\n print('测试失败!')\n elif trim(' hello ') != 'hello':\n print('测试失败!')\n elif trim(' hello world ') != 'hello world':\n print('测试失败!')\n elif trim('') != '':\n print('测试失败!')\n elif trim(' ') != '':\n print('测试失败!')\n else:\n print('测试成功!')"
},
{
"alpha_fraction": 0.447988897562027,
"alphanum_fraction": 0.49237170815467834,
"avg_line_length": 18,
"blob_id": "f21b7d67f3b83ea55686981dab973a0ea42a307c",
"content_id": "02f373afa15a28ec5dfd80dbc83f35784ee87abb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 799,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 38,
"path": "/metric.py",
"repo_name": "GradyLee/learn-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding utf-8 -*-\n\n'''\n设计一个decorator,它可作用于任何函数上,并打印该函数的执行时间\n'''\n\nimport functools,time\n\ndef metric(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kw):\n t1=time.time()\n ret = fn(*args, **kw)\n t2=time.time()\n print('%s executed in %s ms' % (fn.__name__, t2-t1))\n return ret\n return wrapper\n\nif __name__ == \"__main__\":\n @metric\n def fast(x, y):\n time.sleep(0.0012)\n return x + y;\n\n @metric\n def slow(x, y, z):\n time.sleep(0.1234)\n return x * y * z;\n\n f = fast(11, 22)\n s = slow(11, 22, 33)\n if f != 33:\n print('测试失败!')\n elif s != 7986:\n print('测试失败!')\n else:\n print('测试成功!')"
}
] | 9 |
febacc103/luminarpythonprograms
|
https://github.com/febacc103/luminarpythonprograms
|
22eafa916b5d21f093ec9b88ea1259456598336b
|
4d7286a9d76efe967eff504e329b1583fd529e8b
|
6d886a7553360142e8cb07b84c3d0a412ddb0e5b
|
refs/heads/master
| 2023-04-17T05:57:05.192337 | 2021-04-20T04:14:09 | 2021-04-20T04:14:09 | 359,679,296 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.805084764957428,
"alphanum_fraction": 0.805084764957428,
"avg_line_length": 18.75,
"blob_id": "1880c1e8ad8f24fb80140b16532bcec965340105",
"content_id": "bb9b345f464fd8f309551b60345ec140a0d7e4c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 236,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 12,
"path": "/identifiers/demo1.py",
"repo_name": "febacc103/luminarpythonprograms",
"src_encoding": "UTF-8",
"text": "print(\"hello luminr technolab\")\ncname=\"luminar technolab\"\nlocation=\"kakkanadu\"\nprint(cname)\nprint(location)\n\n#luminar technolab kakkanadu\nprint(cname,location)\n\n\n#luminar technolab located in kakkanadu\nprint(cname,\"located in\",location)"
},
{
"alpha_fraction": 0.5923566818237305,
"alphanum_fraction": 0.7229299545288086,
"avg_line_length": 14.75,
"blob_id": "b63d30fa18083c89108bd98c3e56e83fa275f4fc",
"content_id": "08afe71b5a5d8f602e2ca5e0f1e6ec48bc51c640",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 20,
"path": "/identifiers/swapping.py",
"repo_name": "febacc103/luminarpythonprograms",
"src_encoding": "UTF-8",
"text": "num1=10\nnum2=20\n\n#num1=20\n#num2=10\nprint(\"num1=\",num2)\nprint(\"num2=\",num1)\nprint(value before swapping)\nprint(\"number1 is\",num1)\nprint(\"number2 is\",num2)\n\nprint(\"value after swapping\")\ntemp=num1\nnum1=num2\nnum2=temp\nprint(\"number1 is\",num1)\nprint(\"number2 is\",num2)\n\n#num1=num1-num2 #10+20=30\n#num2=num1-num2 #num2"
},
{
"alpha_fraction": 0.6836734414100647,
"alphanum_fraction": 0.704081654548645,
"avg_line_length": 11.375,
"blob_id": "ad1d00a512157b44177ac9ab70a3e1d14ed76a83",
"content_id": "226ba446ef91255f5c0347ca642b2c18c6c92700",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/identifiers/demo3.py",
"repo_name": "febacc103/luminarpythonprograms",
"src_encoding": "UTF-8",
"text": "#name\n#age\n#color\n\nname=\"feba\"\nage=28\ncolor=\"rose\"\nprint(name,age,\"years old like\",color,\"colour\")"
},
{
"alpha_fraction": 0.692556619644165,
"alphanum_fraction": 0.7411003112792969,
"avg_line_length": 17.235294342041016,
"blob_id": "c87d0cf363e1524dd346d2f3dd7d94f70bc889c2",
"content_id": "ab8f0405d75f84d32097dd57b8bf6096eb62144d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 309,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 17,
"path": "/identifiers/input.py",
"repo_name": "febacc103/luminarpythonprograms",
"src_encoding": "UTF-8",
"text": "#input function\n#num1=10\n#num2=20\nnum1=input(\"enter first number\")\nnum2=input(\"enter number2\")\nprint(num1)\nprint(num2)\n\nname=input(\"what is your name\")\nage=input(\"your age\")\ncolor=input(\"which is your favorite colour\")\nprint(name)\nprint(age)10\n20\n\nprint(color)\nprint(name,age,\"yearse old like\",color,\"colour\")"
}
] | 4 |
kingrichard2005/spring2015_cs612_a1
|
https://github.com/kingrichard2005/spring2015_cs612_a1
|
08842dacf973be2bde09a0b2bbfd9196946a5be1
|
5c66e71520ea777da5d80b4521880da8e31dc2c0
|
7ddf29ad96d6fa48796d17d7cf5c82248444044b
|
refs/heads/master
| 2020-04-05T05:48:39.365464 | 2015-02-21T21:07:00 | 2015-02-21T21:07:00 | 31,097,182 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5725826025009155,
"alphanum_fraction": 0.5960832238197327,
"avg_line_length": 35.15044403076172,
"blob_id": "0bc9c426393c3fb136be5f5af059221fadce20f7",
"content_id": "9bb43e1e2198b884b76ce5f632676bb0c59bff62",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4085,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 113,
"path": "/a1.py",
"repo_name": "kingrichard2005/spring2015_cs612_a1",
"src_encoding": "UTF-8",
"text": "#-------------------------------------------------------------------------------\n# Name: cs612-a1\n# Description: Assignment 1 methods for loading data with Numpy\n#\n# Author: kingrichard2005\n#\n# Created: 2015-02-20\n# Copyright: (c) kingrichard2005 2015\n# Licence: MIT\n#-------------------------------------------------------------------------------\nimport numpy as np\nimport unittest\nfrom tempfile import TemporaryFile\n\nclass TestHelper():\n # A test helper class\n def __init__(self, numberOfNumbers = 100, rangeOfEachNumber = 100 ):\n self.tempTestFile = TemporaryFile();\n self.aBunchOfIntegers = np.random.randint(rangeOfEachNumber, size=numberOfNumbers);\n self.writeABunchOfNumbersToATempFile();\n\n def writeABunchOfNumbersToATempFile(self):\n try:\n # Writes a bunch of numbers to a temp test file\n np.save(self.tempTestFile, self.aBunchOfIntegers)\n self.tempTestFile.seek(0);\n except:\n print \"error in writeABunchOfNumbersToAFile(...)\"\n\nclass DataManager():\n # Assignment 1 class\n def __init__(self, rangeOfRandInts = 100, square_dim = 5 ):\n self.OneD = [];\n self.TwoD = np.random.random_integers(rangeOfRandInts, size=(square_dim,square_dim));\n self.array1 = [[]];\n self.array2 = [[]];\n self.array3 = [[]];\n self.File1Handle = TemporaryFile();\n self.File2Handle = TemporaryFile();\n self.File3Handle = TemporaryFile();\n\n def method1LoadAFileToOneD( self, aFileObj ):\n try:\n # Method 1\n self.OneD = np.load(aFileObj);\n except:\n print \"error in method1LoadAFile(...)\"\n\n def method2CreateNewTwoD( self, rangeOfRandInts = 100, square_dim = 5 ):\n try:\n # Method 2\n self.TwoD = np.random.random_integers(rangeOfRandInts, size=(square_dim,square_dim));\n except:\n print \"error in method2PlaceRandomIntsInSquareMatrix(...)\"\n\n def method3SortOneD( self ):\n try:\n # Method 3\n self.SortOneD = np.sort(self.OneD, kind='mergesort');\n except:\n print \"error in method3SortOneD()\"\n\n def method4PartitionTwoD( self, splitNdx1 = 0, splitNdx2 = 2, splitNdx3 = 4 ):\n try:\n # Method 4\n horizontalColumnAxisNdx = 1\n splitArrays = np.split(self.TwoD, [splitNdx1, splitNdx2, splitNdx3], axis = horizontalColumnAxisNdx)\n self.array1 = splitArrays[0];\n self.array2 = splitArrays[1];\n self.array3 = splitArrays[2];\n except:\n print \"error in method4PartitionTwoD(...)\"\n\n def method5PlacePartitionsInFiles( self ):\n try:\n np.save(self.File1Handle, self.array1)\n self.File1Handle.seek(0);\n\n np.save(self.File2Handle, self.array2)\n self.File2Handle.seek(0);\n\n np.save(self.File3Handle, self.array3)\n self.File3Handle.seek(0);\n except:\n print \"error in method5PlacePartitionsInFiles()\"\n\nclass TestUM(unittest.TestCase):\n # Data Manager tester\n def setUp(self):\n # Arrange: Create a test helper that provides\n # a file with a bunch of integers\n self.ts = TestHelper();\n \n def test_method1_load_a_file(self):\n # Arrange: Create a DataMananger\n dataManager = DataManager();\n # Act: Load a bunch of integers \n dataManager.method1LoadAFileToOneD(self.ts.tempTestFile); \n # Assert: a bunch of integers from file are equal to OneD\n expected = self.ts.aBunchOfIntegers;\n result = dataManager.OneD;\n self.assertEqual( np.array_equal(expected,result), True ) \n\nif __name__ == \"__main__\":\n unittest.main();\n print \"testing complete\"\n #ts = TestHelper();\n #dataManager = DataManager();\n #dataManager.method1LoadAFileToOneD(ts.tempTestFile);\n #dataManager.method2CreateTwoD();\n #dataManager.method3SortOneD();\n #dataManager.method4PartitionTwoD();\n #dataManager.method5PlacePartitionsInFiles()\n"
}
] | 1 |
AniolSala/Visualizing-differential-equations
|
https://github.com/AniolSala/Visualizing-differential-equations
|
5d410d5be4f81981ba2ada0354ab41b63be99f48
|
333b8475fd690a3328692e1b6cb57a1a71149065
|
15b7eef7f0b0395351291abbbace39e3388b544c
|
refs/heads/master
| 2020-04-21T22:06:28.489747 | 2019-07-11T15:14:48 | 2019-07-11T15:14:48 | 169,862,438 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5392809510231018,
"alphanum_fraction": 0.5541500449180603,
"avg_line_length": 28.070968627929688,
"blob_id": "9353a2d50bedc9d6c43fe0f6307833b45774de96",
"content_id": "0dc1aa01b80d77b1515975ec39fbec27959ebacd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4506,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 155,
"path": "/dif_eq_solver.py",
"repo_name": "AniolSala/Visualizing-differential-equations",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nfrom pandas import DataFrame\n\n\nclass SolutionObject(DataFrame):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def fast_plot(self, x_col, y_col, z_col=None, fig_size=(9, 6),\n *args, **kwargs):\n \"\"\"\n The arguments x_col, y_col and z_col must be an integer or an array. If\n the argument is an integer then the data for that column will be takan\n as the column number corresponding to this integer (for example, if\n x_col=1 then the data for the x label will be the second column of the\n solution).\n\n If an array is passed, the data for that lebel will be that array.\n\n The plot will be a 2D plot but default, unless the data for the z\n label is passed.\n\n \"\"\"\n\n data = []\n for coord in (x_col, y_col, z_col):\n if isinstance(coord, int):\n data.append(self[coord])\n else:\n data.append(coord)\n\n # 2D plot\n if z_col is None:\n fig, ax = plt.subplots(figsize=fig_size)\n ax.plot(data[0], data[1], *args, **kwargs)\n\n return ax\n\n # 3D plot\n fig = plt.figure(frameon=0)\n proj_3D = fig.gca(projection='3d')\n proj_3D.plot(data[0], data[1], data[2])\n\n return proj_3D\n\n\nclass DiffEqSolver:\n\n def __init__(self, dif_equation, method='rk4'):\n \"\"\"\n\n dif_equation must take by the arguments the time t and vector y\n and return a numpy like array with the same length as\n y.\n\n \"\"\"\n\n self.dy = dif_equation\n self.method = method\n self._solution = None\n\n @property\n def method(self):\n return self._method\n\n @method.setter\n def method(self, m):\n valid_methods = ['rk4', 'rk2', 'newton']\n\n try:\n valid_method = m.lower() in valid_methods\n except AttributeError:\n raise AttributeError(\"Argument method must be a string\")\n\n if valid_method is False:\n msg = \"Unknown method. Valid methods are: 'rk4', 'rk2', 'newton'\"\n raise TypeError(msg)\n\n self._method = m\n\n def next_step_newton(self, t, y):\n raise AttributeError('This method is not implemented!')\n\n def next_step_rk2(self, t, y):\n k1 = self.dy(t, y)\n k2 = self.dy(t + self.h, y + self.h * k1)\n\n return y + (self.h / 2.0) * (k1 + k2)\n\n def next_step_rk4(self, t, y):\n\n k1 = self.dy(t, y)\n k2 = self.dy(t + 0.5 * self.h, y + 0.5 * self.h * k1)\n k3 = self.dy(t + 0.5 * self.h, y + 0.5 * self.h * k2)\n k4 = self.dy(t + self.h, y + self.h * k3)\n\n return y + (self.h / 6.0) * (k1 + 2 * k2 + 2 * k3 + k4)\n\n def get_trajectory(self, y0, next_step):\n '''\n Here it is performed the main loop to find the trajectory\n\n '''\n\n # Number of steps\n n_steps = len(self.time_steps)\n\n # Initial conditions\n sol_eqs = np.empty((n_steps, len(y0)))\n sol_eqs[0] = y0\n\n # Loop for solving the equations\n for i, t in enumerate(self.time_steps[1:], 1):\n sol_eqs[i] = next_step(t, sol_eqs[i-1])\n\n return sol_eqs\n\n def solve_eqs(self, initial_conditions, ti, tf, h, index=None, column_names=None,\n numpy_array=False, save=False, file_name=\"sol_eqs.csv\"):\n\n # Initial conditions\n self.initial_conditions = initial_conditions\n self.initial_time, self.final_time = ti, tf\n self.h = h\n\n # Choose the method\n if self.method.lower() == 'rk4':\n next_step = self.next_step_rk4\n elif self.method.lower() == 'rk2':\n next_step = self.next_step_rk2\n elif self.method.lower() == 'newton':\n next_step = self.next_step_newton\n\n # Time vector\n n_steps = int((tf - ti) / h)\n self.time_steps = np.linspace(ti, tf, n_steps)\n\n # Get the trajectory:\n sol_eqs = self.get_trajectory(initial_conditions, next_step)\n\n # Save the file if save is True:\n if save is True:\n np.savetxt(file_name, sol_eqs)\n\n # Create a dataframe\n self._solution = SolutionObject(\n sol_eqs, index=None, columns=column_names)\n\n # Return a dataframe unless numpy_array is not False\n if numpy_array:\n return sol_eqs\n\n return self._solution\n"
}
] | 1 |
rob-deans/pysc2-bot
|
https://github.com/rob-deans/pysc2-bot
|
8a33d757cacb77fcece35e35ce76d7f9a6b5accc
|
0e3801e122611e0c392fe9b799bf979324c94821
|
6c558ccaad412bb49d00f707406765260d7f8e06
|
refs/heads/master
| 2021-05-04T22:19:40.671868 | 2018-04-30T03:40:25 | 2018-04-30T03:40:25 | 120,027,906 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6145716309547424,
"alphanum_fraction": 0.6233415603637695,
"avg_line_length": 37.66086959838867,
"blob_id": "e5c758ad5a9c74e21bb837fe4de9430ab52dea9f",
"content_id": "ee26e80096ce0fab473efca61fb27d7330f31801",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4447,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 115,
"path": "/DQN_PARAM.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nfrom collections import deque\nimport random\n\n\nclass Model:\n def __init__(self, input_size, input_flat, action_size, learning_rate, memory):\n self.wh = input_size\n self.input_flat = input_flat\n self.num_actions = action_size\n self.memory = memory\n\n self.screen_input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='input')\n self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name='actions')\n self.rewards = tf.placeholder(tf.float32, shape=[None], name='rewards')\n\n x_image = tf.reshape(self.screen_input, [-1, self.wh, self.wh, 1])\n\n init = tf.truncated_normal_initializer()\n\n # create the network\n net = x_image\n\n net = tf.layers.conv2d(inputs=net, filters=16, kernel_size=5, padding='same', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=32, kernel_size=5, padding='same', activation=tf.nn.relu)\n\n net = tf.contrib.layers.flatten(net)\n\n net = tf.layers.dense(inputs=net, units=64, activation=tf.nn.relu, kernel_initializer=init, name='dense1')\n net = tf.layers.dense(inputs=net, units=32, activation=tf.nn.relu, kernel_initializer=init, name='dense2')\n\n net = tf.layers.dense(inputs=net, units=self.num_actions, activation=None, kernel_initializer=init)\n\n self.output = net\n\n q_reward = tf.reduce_sum(tf.multiply(self.output, self.actions), 1)\n loss = tf.reduce_mean(tf.squared_difference(self.rewards, q_reward))\n self.optimiser = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n self.saver = tf.train.Saver()\n\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n self.session.run(tf.global_variables_initializer())\n try:\n self.saver.restore(self.session, '/home/rob/Documents/uni/fyp/sc2/model_param.ckpt')\n self.loaded_model = True\n except:\n print('No model found - training new one')\n self.loaded_model = False\n\n def train(self):\n if len(self.memory.memory) < self.memory.batch_size:\n return\n states, actions, rewards = self.memory.get_batch(self, self.memory.batch_size)\n self.session.run(self.optimiser, feed_dict={\n self.screen_input: states,\n self.actions: actions,\n self.rewards: rewards}\n )\n\n def get_action(self, state, epsilon):\n if random.random() < epsilon:\n action = random.randint(0, self.num_actions - 1)\n else:\n temp = self.session.run(self.output, feed_dict={self.screen_input: [state]})[0]\n action = np.argmax(temp)\n\n return action\n\n def get_batch_action(self, states):\n return self.session.run(self.output, feed_dict={self.screen_input: states})\n\n def save(self):\n self.saver.save(self.session, '/home/rob/Documents/uni/fyp/sc2/model_param.ckpt')\n\n\nclass ReplayMemory:\n def __init__(self, num_actions, batch_size, max_memory_size, gamma):\n self.num_actions = num_actions\n self.batch_size = batch_size\n self.memory = deque(maxlen=max_memory_size)\n self.gamma = gamma\n\n # Add the current state, the action we took, the reward we got for it and\n # whether it was the terminal (done) state for the ep\n def add(self, state, action, reward, done):\n actions = np.zeros(self.num_actions)\n actions[action] = 1\n self.memory.append([state, actions, reward, done, None])\n self.update(state)\n\n # Update the memory to include the next state\n def update(self, next_state):\n if len(self.memory) > 0:\n self.memory[-1][4] = next_state\n\n def get_batch(self, model, batch_size=50):\n mini_batch = random.sample(self.memory, batch_size)\n states = [item[0] for item in mini_batch]\n actions = [item[1] for item in mini_batch]\n rewards = [item[2] for item in mini_batch]\n done = [item[3] for item in mini_batch]\n next_states = [item[4] for item in mini_batch]\n\n q_values = model.get_batch_action(next_states)\n y_batch = []\n\n for i in range(batch_size):\n if done[i]:\n y_batch.append(rewards[i])\n else:\n y_batch.append(rewards[i] + self.gamma * np.max(q_values[i]))\n\n return states, actions, y_batch\n\n"
},
{
"alpha_fraction": 0.5548835396766663,
"alphanum_fraction": 0.5660842061042786,
"avg_line_length": 33.07633590698242,
"blob_id": "f8d89877f60e682f8739a4552644fc2807b773c5",
"content_id": "b6d7b18bad81f6c9b8ee0f32d0efafd0008bd007",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4464,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 131,
"path": "/param_agent.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom ac_param import *\nimport matplotlib.pyplot as plt\nfrom collections import deque\nimport numpy as np\n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nimport pickle\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_SELECT = features.SCREEN_FEATURES.selected.index\n_PLAYER_FRIENDLY = 1\n_PLAYER_NEUTRAL = 3 # beacon/minerals\n_PLAYER_HOSTILE = 4\n_NO_OP = actions.FUNCTIONS.no_op.id\n_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id\n_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_NOT_QUEUED = [0]\n_SELECT_ALL = [0]\n\n\nclass MoveToBeacon(base_agent.BaseAgent):\n\n def __init__(self):\n super(MoveToBeacon, self).__init__()\n self.wh = 64\n self.num_actions = self.wh ** 2\n self.input_flat = self.wh ** 2 # Size of the screen\n\n self.batch_size = 32\n self.max_memory_size = 5000\n\n self.gamma = .99\n self.actor_lr = 1e-3\n self.critic_lr = 5e-3\n\n self.total_rewards = deque(maxlen=100)\n self.log_rewards = []\n self.current_reward = 0\n\n self.allow_pick = True\n self.action = 0\n\n self.targets = []\n self.beacons = []\n self.beacons_store = True\n\n self.memory = ReplayMemory(self.batch_size, self.max_memory_size)\n self.model = ActorCriticModelCont(self.wh, self.input_flat, self.num_actions,\n self.actor_lr, self.critic_lr, self.memory, self.gamma)\n \"\"\"An agent specifically for solving the MoveToBeacon map.\"\"\"\n\n def step(self, obs):\n\n player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n current_state = player_relative.flatten()\n # current_state = [1 if c == 3 else 0 for c in current_state]\n\n if len(self.memory.memory) > 0:\n self.memory.update(current_state)\n self.model.train()\n\n super(MoveToBeacon, self).step(obs)\n\n done = False\n\n if _MOVE_SCREEN in obs.observation[\"available_actions\"]:\n\n player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()\n if self.beacons_store:\n self.beacons.append([neutral_x, neutral_y])\n self.beacons_store = False\n\n self.action = self.model.run(current_state)\n try:\n action_ = np.argmax(np.random.multinomial(1, self.action))\n except ValueError:\n action_ = np.argmax(np.random.multinomial(1, self.action/(1+1e-6)))\n\n self.allow_pick = False\n target_x = action_ // 64\n target_y = action_ % 64\n\n target = [target_y, target_x]\n self.targets.append(target)\n\n reward = obs.reward\n if reward == 1:\n print(target)\n self.beacons_store = True\n self.allow_pick = True\n # self.model.train()\n\n # STATS ONLY\n self.current_reward += reward\n if obs.last():\n done = True\n self.allow_pick = True\n self.total_rewards.append(self.current_reward)\n self.log_rewards.append(self.current_reward)\n if self.episodes % 100 == 0 and self.episodes > 0:\n self.model.save()\n pickle.dump(self.log_rewards, open('./results.pkl', 'wb'))\n print('Highest: {} | Lowest: {} | Average: {} | Timesteps: {}'.format(\n max(self.total_rewards),\n min(self.total_rewards),\n np.mean(self.total_rewards),\n self.steps)\n )\n del self.targets[:]\n del self.beacons[:]\n self.current_reward = 0\n\n if not neutral_y.any():\n return actions.FunctionCall(_NO_OP, [])\n\n actions_oh = np.zeros(self.num_actions)\n actions_oh[action_] = 1\n self.memory.add(current_state, actions_oh, reward, done)\n # self.model.train()\n\n return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, target])\n else:\n return actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])\n"
},
{
"alpha_fraction": 0.5657839179039001,
"alphanum_fraction": 0.5756620168685913,
"avg_line_length": 34.50746154785156,
"blob_id": "40923d8d7a4cf86b01695ffe361227592e749048",
"content_id": "2d665d9950537a1dea899fd187d73129d8484031",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4758,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 134,
"path": "/policy_param_agent.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom policy_param import ReplayMemory\nfrom policy_param import Model\nimport numpy as np\nfrom collections import deque\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nimport pickle\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_SELECT = features.SCREEN_FEATURES.selected.index\n_PLAYER_FRIENDLY = 1\n_PLAYER_NEUTRAL = 3 # beacon/minerals\n_PLAYER_HOSTILE = 4\n_NO_OP = actions.FUNCTIONS.no_op.id\n_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id\n_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_NOT_QUEUED = [0]\n_SELECT_ALL = [0]\n\n\nclass MoveToBeacon(base_agent.BaseAgent):\n\n def __init__(self):\n super(MoveToBeacon, self).__init__()\n self.wh = 64\n self.num_actions = self.wh ** 2\n self.input_flat = self.wh ** 2 # Size of the screen\n\n self.batch_size = 1\n self.gamma = .99\n self.learning_rate = 1e-2\n\n self.actions = []\n self.states = []\n self.rewards = []\n\n # Stat count\n self.log_rewards = []\n self.total_rewards = deque(maxlen=100)\n self.total_actions = []\n self.current_reward = 0\n\n self.memory = ReplayMemory()\n self.model = Model(self.wh, self.input_flat, self.num_actions, self.learning_rate, self.memory)\n \"\"\"An agent specifically for solving the MoveToBeacon map.\"\"\"\n\n def step(self, obs):\n\n player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n current_state = player_relative.flatten()\n # current_state = [1 if c == 3 else 0 for c in current_state]\n\n super(MoveToBeacon, self).step(obs)\n\n if obs.first():\n del self.states[:]\n del self.actions[:]\n del self.rewards[:]\n\n if _MOVE_SCREEN in obs.observation[\"available_actions\"]:\n\n player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()\n if not neutral_y.any():\n return actions.FunctionCall(_NO_OP, [])\n\n feed_dict = {self.model.screen_input: [current_state]}\n\n output = self.model.session.run(self.model.output, feed_dict)[0]\n try:\n action_ = np.argmax(np.random.multinomial(1, output))\n except ValueError:\n action_ = np.argmax(np.random.multinomial(1, output/(1+1e-6)))\n\n target_x = action_ // 64\n target_y = action_ % 64\n\n target = [target_y, target_x]\n\n self.states.append(current_state)\n actions_oh = np.zeros(self.num_actions)\n actions_oh[action_] = 1\n self.actions.append(actions_oh)\n\n reward = obs.reward\n self.rewards.append(reward)\n\n if reward == 1:\n print(target)\n\n if obs.last():\n rewards_discounted = self.discount_rewards(self.rewards)\n self.memory.add(self.states, self.actions, rewards_discounted)\n # Delete all the actions and states ready for more to be appended\n del self.states[:]\n del self.actions[:]\n del self.rewards[:]\n if self.episodes % self.batch_size == 0 and self.episodes > 0:\n self.model.train()\n\n # STATS ONLY\n self.current_reward += reward\n if obs.last():\n self.total_rewards.append(self.current_reward)\n self.log_rewards.append(self.current_reward)\n if self.episodes % 100 == 0 and self.episodes > 0:\n self.model.save()\n pickle.dump(self.log_rewards, open('./policy_results.pkl', 'wb'))\n print('Highest: {} | Lowest: {} | Average: {} | Timesteps: {}'.format(\n max(self.total_rewards),\n min(self.total_rewards),\n np.mean(self.total_rewards),\n self.steps)\n )\n self.current_reward = 0\n\n return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, target])\n else:\n return actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])\n\n def discount_rewards(self, r):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n discounted_r = np.zeros_like(r, dtype=float)\n running_add = 0\n for t in reversed(range(0, len(r))):\n running_add = running_add * self.gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n"
},
{
"alpha_fraction": 0.6126047968864441,
"alphanum_fraction": 0.6221451163291931,
"avg_line_length": 35.98930358886719,
"blob_id": "6b169e2107961a8894274f138c0705eed84435c4",
"content_id": "13eede1011ff4d09fccb2920cd7708e242a3aa52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6918,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 187,
"path": "/actor_critic.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nfrom collections import deque\nimport random\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nfrom pysc2.agents import base_agent\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_MM_PLAYER_RELATIVE = features.MINIMAP_FEATURES.player_relative.index\n_SELECT = features.SCREEN_FEATURES.selected.index\n_PLAYER_FRIENDLY = 1\n_PLAYER_NEUTRAL = 3 # beacon/minerals\n_PLAYER_HOSTILE = 4\n\n_NOT_QUEUED = [0]\n_SELECT_ALL = [0]\n\n_NO_OP = actions.FUNCTIONS.no_op.id\n_MOVE_CAMERA = actions.FUNCTIONS.move_camera.id\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n_SELECT_RECT = actions.FUNCTIONS.select_rect.id\n_SELECT_CONTROL_GROUP = actions.FUNCTIONS.select_control_group.id\n_STOP_QUICK = actions.FUNCTIONS.Stop_quick.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id\n_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id\n_MOVE_MINIMAP = actions.FUNCTIONS.Move_minimap.id\n_PATROL_SCREEN = actions.FUNCTIONS.Patrol_screen.id\n_PATROL_MINIMAP = actions.FUNCTIONS.Patrol_minimap.id\n_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id\n_HOLD_POSITION_QUICK = actions.FUNCTIONS.HoldPosition_quick.id\n_SMART_SCREEN = actions.FUNCTIONS.Smart_screen.id\n_SMART_MINIMAP = actions.FUNCTIONS.Smart_minimap.id\n\n# All the available actions\navailable_actions = [\n _NO_OP,\n _MOVE_CAMERA,\n _SELECT_POINT,\n _SELECT_RECT,\n _SELECT_CONTROL_GROUP,\n _STOP_QUICK,\n _SELECT_ARMY,\n _ATTACK_SCREEN,\n _MOVE_SCREEN,\n _MOVE_MINIMAP,\n _PATROL_SCREEN,\n _PATROL_MINIMAP,\n _ATTACK_MINIMAP,\n _HOLD_POSITION_QUICK,\n _SMART_SCREEN,\n _SMART_MINIMAP\n]\n\n\nclass ActorCriticModel:\n def __init__(self, input_size, input_flat, minimap_size, minimap_flat,\n action_size, actor_lr, critic_lr, memory, gamma):\n self.wh = input_size\n self.input_flat = input_flat\n self.mm_wh = minimap_size\n self.minimap_flat = minimap_flat\n self.num_actions = action_size\n self.gamma = gamma\n self.memory = memory\n\n # Generic\n init = tf.truncated_normal_initializer(0, 0.01)\n\n # ===================================== #\n # Actor #\n # ===================================== #\n\n self.input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='screen_input')\n self.minimap_input = tf.placeholder(tf.float32, shape=[None, self.minimap_flat], name='mini_input')\n self.army_selected = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='army_input')\n self.actor_actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name='actions')\n self.td_error = tf.placeholder(tf.float32, shape=[None, 1], name='rewards')\n\n self.actor_lr = actor_lr\n\n image = tf.reshape(self.army_selected, [-1, self.wh, self.wh, 3])\n\n net = tf.contrib.layers.conv2d(inputs=image, num_outputs=16, kernel_size=5, padding='same',\n activation_fn=tf.nn.relu)\n conv_net = tf.contrib.layers.conv2d(inputs=net, num_outputs=32, kernel_size=3, padding='same',\n activation_fn=tf.nn.relu)\n\n logits = tf.contrib.layers.conv2d(conv_net, num_outputs=1, kernel_size=1, activation_fn=None)\n\n self.output = tf.nn.softmax(tf.contrib.layers.flatten(logits))\n\n loss = tf.log(tf.reduce_sum(tf.multiply(self.output, self.actor_actions))) * self.td_error\n # entropy = tf.reduce_sum(tf.multiply(self.output, tf.log(self.output)))\n # loss += 0.001 * entropy\n\n self.optimiser = tf.train.AdamOptimizer(self.actor_lr).minimize(-loss)\n\n # ===================================== #\n # Critic #\n # ===================================== #\n\n self.critic_td_target = tf.placeholder(tf.float32, shape=[None, 1], name='rewards')\n self.critic_lr = critic_lr\n\n critic_net = tf.layers.dense(inputs=conv_net, units=256, activation=tf.nn.relu, kernel_initializer=init)\n self.critic_output = tf.layers.dense(inputs=critic_net, units=1, activation=None, kernel_initializer=init)\n\n self.critic_loss = tf.reduce_mean(tf.squared_difference(self.critic_output, self.critic_td_target))\n self.critic_optimise = tf.train.AdamOptimizer(self.critic_lr).minimize(self.critic_loss)\n\n self.saver = tf.train.Saver()\n\n self.session = tf.Session()\n self.session.run(tf.global_variables_initializer())\n try:\n self.saver.restore(self.session, '/home/rob/Documents/uni/fyp/sc2/ac_model.ckpt')\n except:\n print('No model found - training new one')\n\n def train(self):\n if len(self.memory.memory) < self.memory.batch_size:\n return\n\n mini_batch = self.memory.get_batch()\n\n td_targets = []\n td_errors = []\n\n army = [item[0] for item in mini_batch]\n actions = [item[1] for item in mini_batch]\n rewards = [item[2] for item in mini_batch]\n done = [item[3] for item in mini_batch]\n next_states = [item[4] for item in mini_batch]\n\n values = self.batch_predict(army)\n\n for i in range(len(done)):\n if done[i]:\n td_targets.append([rewards[i]])\n else:\n td_targets.append(rewards[i] + self.gamma * self.predict(next_states[i]))\n\n td_errors.append(td_targets[-1] - values[i])\n\n # Training the critic\n self.session.run(self.critic_optimise, feed_dict={\n self.army_selected: army,\n self.critic_td_target: td_targets\n })\n\n # Training the actor\n self.session.run(self.optimiser, feed_dict={\n self.army_selected: army,\n self.actor_actions: actions,\n self.td_error: td_errors\n })\n\n def run(self, army):\n return self.session.run(self.output, feed_dict={self.army_selected: [army]})[0]\n\n def batch_predict(self, army):\n return self.session.run(self.critic_output, feed_dict={self.army_selected: army})\n\n def predict(self, next_army):\n return self.session.run(self.critic_output, feed_dict={self.army_selected: [next_army]})[0]\n\n def save(self):\n self.saver.save(self.session, '/home/rob/Documents/uni/fyp/sc2/ac_model.ckpt')\n\n\nclass ReplayMemory:\n def __init__(self, batch_size, max_memory_size=2000):\n self.batch_size = batch_size\n self.memory = deque(maxlen=max_memory_size)\n\n def add(self, state, action, reward, done):\n self.memory.append([state, action, reward, done, None])\n\n def update(self, next_state):\n if len(self.memory) > 0:\n if not self.memory[-1][3]:\n self.memory[-1][4] = next_state\n\n def get_batch(self):\n return random.sample(self.memory, self.batch_size)\n\n"
},
{
"alpha_fraction": 0.6046027541160583,
"alphanum_fraction": 0.6131606101989746,
"avg_line_length": 37.083702087402344,
"blob_id": "eb3660dc0e73b6af4ae19c5296fc17600f3fed00",
"content_id": "f4cf918319e9457c16fb0ebbb026300cd8de8549",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8647,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 227,
"path": "/policy_agent.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\n\nfrom policy import ReplayMemory\nfrom policy import Model\n\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_MM_PLAYER_RELATIVE = features.MINIMAP_FEATURES.player_relative.index\n_SELECT = features.SCREEN_FEATURES.selected.index\n_PLAYER_FRIENDLY = 1\n_PLAYER_NEUTRAL = 3 # beacon/minerals\n_PLAYER_HOSTILE = 4\n\n_NOT_QUEUED = [0]\n_SELECT_ALL = [0]\n\n_NO_OP = actions.FUNCTIONS.no_op.id\n_MOVE_CAMERA = actions.FUNCTIONS.move_camera.id\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n_SELECT_RECT = actions.FUNCTIONS.select_rect.id\n_SELECT_CONTROL_GROUP = actions.FUNCTIONS.select_control_group.id\n_STOP_QUICK = actions.FUNCTIONS.Stop_quick.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id\n_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id\n_MOVE_MINIMAP = actions.FUNCTIONS.Move_minimap.id\n_PATROL_SCREEN = actions.FUNCTIONS.Patrol_screen.id\n_PATROL_MINIMAP = actions.FUNCTIONS.Patrol_minimap.id\n_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id\n_HOLD_POSITION_QUICK = actions.FUNCTIONS.HoldPosition_quick.id\n_SMART_SCREEN = actions.FUNCTIONS.Smart_screen.id\n_SMART_MINIMAP = actions.FUNCTIONS.Smart_minimap.id\n\n# All the available actions\navailable_actions = [\n _NO_OP,\n _MOVE_CAMERA,\n _SELECT_POINT,\n _SELECT_RECT,\n _SELECT_CONTROL_GROUP,\n _STOP_QUICK,\n _SELECT_ARMY,\n _ATTACK_SCREEN,\n _MOVE_SCREEN,\n _MOVE_MINIMAP,\n _PATROL_SCREEN,\n _PATROL_MINIMAP,\n _ATTACK_MINIMAP,\n _HOLD_POSITION_QUICK,\n _SMART_SCREEN,\n _SMART_MINIMAP\n]\n\n\nclass MoveToBeacon(base_agent.BaseAgent):\n \"\"\"An agent specifically for solving the MoveToBeacon map.\"\"\"\n def __init__(self):\n super(MoveToBeacon, self).__init__()\n self.num_actions = len(available_actions)\n # Screen sizes\n self.input_flat = 84*84 # Size of the screen\n self.wh = 84\n # Minimap sizes\n self.mm_input_flat = 64*64\n self.mm_wh = 64\n\n self.batch_size = 15\n self.gamma = .99\n self.learning_rate = 1e-2\n\n self.actions = []\n self.states = []\n self.minimap_states = []\n self.army_state = []\n\n # Stat count\n self.total_rewards = []\n self.total_actions = []\n self.current_reward = 0\n self.actions_taken = np.zeros(self.num_actions)\n self.rewards = []\n\n self.memory = ReplayMemory()\n self.model = Model(self.wh, self.input_flat, self.mm_wh, self.mm_input_flat,\n 1, self.num_actions, self.learning_rate, self.memory)\n\n def discount_rewards(self, r):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n discounted_r = np.zeros_like(r, dtype=float)\n running_add = 0\n for t in reversed(range(0, len(r))):\n running_add = running_add * self.gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n def step(self, obs):\n # Current observable state\n screen_player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n current_state = screen_player_relative.flatten()\n # The minimap\n mm_player_relative = obs.observation['minimap'][_MM_PLAYER_RELATIVE]\n minimap_state = mm_player_relative.flatten()\n\n army_selected = np.array([1]) if 1 in obs.observation['screen'][_SELECT] else np.array([0])\n\n super(MoveToBeacon, self).step(obs)\n\n if obs.first():\n del self.states[:]\n del self.minimap_states[:]\n del self.actions[:]\n del self.army_state[:]\n del self.rewards[:]\n\n legal_actions = obs.observation['available_actions']\n\n feed_dict = {self.model.screen_input: [current_state],\n self.model.minimap_input: [minimap_state],\n self.model.army_input: [army_selected]}\n\n output = self.model.session.run(self.model.output, feed_dict)[0]\n out = redistribute(output, legal_actions)\n try:\n action = int(np.argmax(np.random.multinomial(1, out)))\n except ValueError:\n action = int(np.argmax(np.random.multinomial(1, out / (1 + 1e-6))))\n\n self.actions_taken[int(action)] += 1\n self.total_actions.append(action)\n\n self.states.append(current_state)\n self.minimap_states.append(minimap_state)\n self.army_state.append(army_selected)\n\n actions_oh = np.zeros(self.num_actions)\n actions_oh[action] = 1\n self.actions.append(actions_oh)\n\n # reward = obs.reward\n self.rewards.append(obs.reward)\n self.current_reward += obs.reward\n if obs.last():\n\n # if self.current_reward == 0:\n # reward = -1\n\n # rewards = [reward] * len(self.actions)\n rewards_discounted = self.discount_rewards(self.rewards)\n self.memory.add(self.states, self.minimap_states, self.army_state, self.actions, rewards_discounted)\n # Delete all the actions and states ready for more to be appended\n del self.states[:]\n del self.actions[:]\n del self.army_state[:]\n del self.rewards[:]\n if self.episodes % self.batch_size == 0 and self.episodes > 0:\n self.model.train()\n\n if obs.last():\n\n # Printing out the stats\n self.total_rewards.append(self.current_reward)\n self.current_reward = 0\n if self.episodes % 100 == 0 and self.episodes > 0:\n print('Highest: {} | Lowest: {} | Average: {} | Time steps: {}'.format(\n max(self.total_rewards[-100:]),\n min(self.total_rewards[-100:]),\n np.mean(self.total_rewards[-100:]),\n self.steps\n )\n )\n print(self.actions_taken)\n if self.episodes % 500 == 0 and self.episodes > 0:\n pickle.dump(self.total_actions, open('/home/rob/Documents/uni/fyp/sc2/policy_actions5.pkl', 'wb'))\n pickle.dump(self.total_rewards, open('/home/rob/Documents/uni/fyp/sc2/policy_rewards5.pkl', 'wb'))\n exit(0)\n # End stats #\n\n # The group of actions to take\n if available_actions[action] == _NO_OP:\n return actions.FunctionCall(_NO_OP, [])\n elif available_actions[action] == _SELECT_ARMY:\n return actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])\n elif available_actions[action] == _ATTACK_SCREEN \\\n or available_actions[action] == _MOVE_SCREEN \\\n or available_actions[action] == _PATROL_SCREEN \\\n or available_actions[action] == _SMART_SCREEN:\n # This is the scripted one\n neutral_y, neutral_x = (screen_player_relative == _PLAYER_NEUTRAL).nonzero()\n target = [int(neutral_x.mean()), int(neutral_y.mean())]\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED, target])\n elif available_actions[action] == _STOP_QUICK:\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED])\n elif available_actions[action] == _HOLD_POSITION_QUICK:\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED])\n elif available_actions[action] == _ATTACK_MINIMAP \\\n or available_actions[action] == _MOVE_MINIMAP \\\n or available_actions[action] == _PATROL_MINIMAP \\\n or available_actions[action] == _SMART_MINIMAP:\n neutral_y, neutral_x = (mm_player_relative == _PLAYER_NEUTRAL).nonzero()\n target = [int(neutral_x.mean()), int(neutral_y.mean())]\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED, target])\n else:\n return actions.FunctionCall(_NO_OP, [])\n\n\n# Defines if we have the potential of picking an illegal action and how we redistribute the probabilities\ndef redistribute(output, legal_actions):\n for i, action in enumerate(available_actions):\n if action not in legal_actions:\n output[i] = 0\n if sum(output) == 0:\n for i, a in enumerate(available_actions):\n if a in legal_actions:\n output[i] = float(1/len(legal_actions))\n else:\n output /= sum(output)\n return output\n\n\n"
},
{
"alpha_fraction": 0.5816382765769958,
"alphanum_fraction": 0.6107751727104187,
"avg_line_length": 23.567567825317383,
"blob_id": "4b64442ed2a2863ffa84570150cb2a6b3915e9bb",
"content_id": "9ef193f8aee3288368d46ff5a6c6037479652575",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1819,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 74,
"path": "/run_loop.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport time\nfrom pysc2.env import sc2_env\nfrom absl import flags\n\nfrom actor_critic import ActorCriticModel\n\nFLAGS = flags.FLAGS\n\n\ndef run_loop(agent, env, max_frames=60):\n \"\"\"A run loop to have agents and an environment interact.\"\"\"\n start_time = time.time()\n current_state = env.reset()\n rollout = []\n try:\n for i in range(max_frames):\n # Take an action\n action = agent.get_action(current_state)\n next_state, reward, done = env.step(action)\n rollout.append([current_state, action, reward, done, next_state])\n current_state = next_state\n yield rollout\n except KeyboardInterrupt as e:\n print(e)\n finally:\n elapsed_time = time.time() - start_time\n print(\"Took %.3f seconds\" % elapsed_time)\n\n\ndef collect_rollout(agent, env):\n rollout = run_loop(agent, env, 60)\n # agent.train(rollout)\n\n\ndef train(agent, env):\n for i in range(1000):\n collect_rollout(agent, env)\n\n\nif __name__ == '__main__':\n FLAGS(sys.argv)\n sc_env = sc2_env.SC2Env(\n map_name=\"MoveToBeacon\",\n visualize=False,\n screen_size_px=(84, 84),\n minimap_size_px=(64, 64),\n )\n # --agent\n # simple_agent.MoveToBeacon - -map\n # MoveToBeacon - -max_agent_steps = 1000000 - -norender\n\n num_actions = 16\n # Screen sizes\n input_flat = 84 * 84 # Size of the screen\n wh = 84\n # Minimap sizes\n mm_input_flat = 64 * 64\n mm_wh = 64\n\n batch_size = 8\n max_memory_size = 2000\n\n gamma = .99\n actor_lr = 1e-3\n critic_lr = 5e-3\n\n ac_agent = ActorCriticModel(wh, input_flat, num_actions, actor_lr, critic_lr, gamma)\n\n train(ac_agent, sc_env)\n\n"
},
{
"alpha_fraction": 0.6218522191047668,
"alphanum_fraction": 0.6318976283073425,
"avg_line_length": 39.149169921875,
"blob_id": "5f7bcd40236919b094722b1dc735c312ae45f38d",
"content_id": "863167ea7589abdddc7f755fd3f235ded4651d33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7267,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 181,
"path": "/simple_agent.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\n\nfrom DQN import ReplayMemory\nfrom DQN import Model\n\nimport random\nimport numpy as np\nimport pickle\nfrom collections import deque\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_MM_PLAYER_RELATIVE = features.MINIMAP_FEATURES.player_relative.index\n_SELECT = features.SCREEN_FEATURES.selected.index\n_PLAYER_FRIENDLY = 1\n_PLAYER_NEUTRAL = 3 # beacon/minerals\n_PLAYER_HOSTILE = 4\n\n_NOT_QUEUED = [0]\n_SELECT_ALL = [0]\n\n_NO_OP = actions.FUNCTIONS.no_op.id\n_MOVE_CAMERA = actions.FUNCTIONS.move_camera.id\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n_SELECT_RECT = actions.FUNCTIONS.select_rect.id\n_SELECT_CONTROL_GROUP = actions.FUNCTIONS.select_control_group.id\n_STOP_QUICK = actions.FUNCTIONS.Stop_quick.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id\n_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id\n_MOVE_MINIMAP = actions.FUNCTIONS.Move_minimap.id\n_PATROL_SCREEN = actions.FUNCTIONS.Patrol_screen.id\n_PATROL_MINIMAP = actions.FUNCTIONS.Patrol_minimap.id\n_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id\n_HOLD_POSITION_QUICK = actions.FUNCTIONS.HoldPosition_quick.id\n_SMART_SCREEN = actions.FUNCTIONS.Smart_screen.id\n_SMART_MINIMAP = actions.FUNCTIONS.Smart_minimap.id\n\n# All the available actions\navailable_actions = [\n _NO_OP,\n _MOVE_CAMERA,\n _SELECT_POINT,\n _SELECT_RECT,\n _SELECT_CONTROL_GROUP,\n _STOP_QUICK,\n _SELECT_ARMY,\n _ATTACK_SCREEN,\n _MOVE_SCREEN,\n _MOVE_MINIMAP,\n _PATROL_SCREEN,\n _PATROL_MINIMAP,\n _ATTACK_MINIMAP,\n _HOLD_POSITION_QUICK,\n _SMART_SCREEN,\n _SMART_MINIMAP\n]\n\n\nclass MoveToBeacon(base_agent.BaseAgent):\n \"\"\"An agent specifically for solving the MoveToBeacon map.\"\"\"\n def __init__(self):\n super(MoveToBeacon, self).__init__()\n\n self.num_actions = len(available_actions)\n self.input_flat = 84*84 # Size of the screen\n self.wh = 84\n # Minimap sizes\n self.mm_input_flat = 64*64\n self.mm_wh = 64\n\n self.batch_size = 32\n self.max_memory_size = 2000\n\n self.gamma = .99\n self.learning_rate = 1e-4\n self.epsilon = 1.\n self.final_epsilon = .05\n self.epsilon_decay = 0.999\n\n self.total_rewards = deque(maxlen=100)\n self.current_reward = 0\n self.actions_taken = np.zeros(self.num_actions)\n self.rewards = []\n\n self.total_actions = []\n\n self.memory = ReplayMemory(self.num_actions, self.batch_size, self.max_memory_size, self.gamma)\n self.model = Model(self.wh, self.input_flat, self.mm_wh, self.mm_input_flat,\n 1, self.num_actions, self.learning_rate, self.memory)\n if self.model.loaded_model:\n self.epsilon = 0.05\n\n def step(self, obs):\n # Current observable state\n screen_player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n current_state = screen_player_relative.flatten()\n mm_player_relative = obs.observation['minimap'][_MM_PLAYER_RELATIVE]\n minimap_state = mm_player_relative.flatten()\n\n army_state = obs.observation['screen'][_SELECT].flatten()\n # army_selected = np.array([1]) if 1 in obs.observation['screen'][_SELECT] else np.array([0])\n\n if len(self.memory.memory) > 0:\n self.memory.update([current_state, minimap_state, army_state])\n self.model.train()\n\n super(MoveToBeacon, self).step(obs)\n\n legal_actions = obs.observation['available_actions']\n\n if random.random() < self.epsilon:\n action = legal_actions[random.randint(0, len(legal_actions)) - 1]\n action = available_actions.index(action)\n else:\n # feed_dict = {self.model.screen_input: [current_state], self.model.minimap_input: [minimap_state],\n # self.model.army_input: [army_selected]}\n feed_dict = {self.model.army_input: [army_state]}\n output = self.model.session.run(self.model.output, feed_dict)[0]\n output = [value if action in legal_actions else -9e10 for action, value in zip(available_actions, output)]\n action = np.argmax(output)\n self.actions_taken[int(action)] += 1\n self.total_actions.append(action)\n\n # print('Action taken: {}'.format(action))\n reward = obs.reward\n\n self.current_reward += reward\n if obs.last():\n self.total_rewards.append(self.current_reward)\n self.rewards.append(self.current_reward)\n self.current_reward = 0\n if self.episodes % 100 == 0 and self.episodes > 0:\n self.model.save()\n print('Highest: {} | Lowest: {} | Average: {}'.format(\n max(self.total_rewards),\n min(self.total_rewards),\n np.mean(self.total_rewards))\n )\n print(self.actions_taken)\n if self.episodes % 1000 == 0 and self.episodes > 0:\n pickle.dump(self.total_actions, open('/home/rob/Documents/uni/fyp/sc2/actions8.pkl', 'wb'))\n pickle.dump(self.rewards, open('/home/rob/Documents/uni/fyp/sc2/rewards8.pkl', 'wb'))\n exit(0)\n\n if self.epsilon > self.final_epsilon:\n self.epsilon = self.epsilon * self.epsilon_decay\n\n self.memory.add([current_state, minimap_state, army_state], action, reward, obs.last())\n # self.model.train()\n\n if available_actions[action] == _NO_OP:\n return actions.FunctionCall(_NO_OP, [])\n elif available_actions[action] == _SELECT_ARMY:\n return actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])\n elif available_actions[action] == _ATTACK_SCREEN \\\n or available_actions[action] == _MOVE_SCREEN \\\n or available_actions[action] == _PATROL_SCREEN \\\n or available_actions[action] == _SMART_SCREEN:\n # This is the scripted one\n neutral_y, neutral_x = (screen_player_relative == _PLAYER_NEUTRAL).nonzero()\n target = [int(neutral_x.mean()), int(neutral_y.mean())]\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED, target])\n elif available_actions[action] == _STOP_QUICK:\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED])\n elif available_actions[action] == _HOLD_POSITION_QUICK:\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED])\n elif available_actions[action] == _ATTACK_MINIMAP \\\n or available_actions[action] == _MOVE_MINIMAP \\\n or available_actions[action] == _PATROL_MINIMAP \\\n or available_actions[action] == _SMART_MINIMAP:\n neutral_y, neutral_x = (mm_player_relative == _PLAYER_NEUTRAL).nonzero()\n target = [int(neutral_x.mean()), int(neutral_y.mean())]\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED, target])\n else:\n return actions.FunctionCall(_NO_OP, [])\n"
},
{
"alpha_fraction": 0.6125084757804871,
"alphanum_fraction": 0.6233854293823242,
"avg_line_length": 36.227848052978516,
"blob_id": "1d8a65e889f6f4618841158977267c3f49560c33",
"content_id": "cf78fda054fecd20be755477d72369093a4303e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2942,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 79,
"path": "/policy_param.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nimport random\n\n\nclass Model:\n def __init__(self, wh, input_flat, action_size, learning_rate, memory):\n self.wh = wh\n self.input_flat = input_flat\n self.num_actions = action_size\n self.memory = memory\n\n self.screen_input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='input')\n self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name='actions')\n\n init = tf.random_normal_initializer\n\n image = tf.reshape(self.screen_input, [-1, self.wh, self.wh, 1])\n\n net = tf.contrib.layers.conv2d(inputs=image, num_outputs=16, kernel_size=5, padding='same', activation_fn=tf.nn.relu)\n conv_net = tf.contrib.layers.conv2d(inputs=net, num_outputs=32, kernel_size=3, padding='same', activation_fn=tf.nn.relu)\n\n logits = tf.contrib.layers.conv2d(conv_net, num_outputs=1, kernel_size=1, activation_fn=None)\n\n self.output = tf.nn.softmax(tf.contrib.layers.flatten(logits))\n\n # training part of graph\n self._acts = tf.placeholder(tf.float32)\n self._advantages = tf.placeholder(tf.float32)\n\n # loss function\n loss = tf.log(tf.reduce_sum(tf.multiply(self._acts, self.output))) * self._advantages\n # entropy = tf.reduce_sum(tf.multiply(self.output, tf.log(tf.clip_by_value(self.output, 1e-12, 1.))))\n # loss += 0.001 * tf.reduce_mean(entropy)\n self._train = tf.train.AdamOptimizer(learning_rate).minimize(-loss)\n\n self.saver = tf.train.Saver()\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n self.session.run(tf.global_variables_initializer())\n\n def train(self):\n states, actions, advantages = self.memory.get()\n self.session.run(self._train, feed_dict={\n self.screen_input: states,\n self._acts: actions,\n self._advantages: advantages\n })\n self.memory.delete()\n\n def get_action(self, state):\n return self.session.run(self.output, feed_dict={self.screen_input: [state]})\n\n def save(self):\n self.saver.save(self.session, './policy_model.ckpt')\n\n\nclass ReplayMemory:\n def __init__(self):\n self.states = []\n self.actions = []\n self.advantages = []\n\n # Add the run to the memory\n def add(self, states, actions, rewards):\n self.states.extend(states)\n self.actions.extend(actions)\n self.advantages.extend(rewards)\n\n # Get the full run\n def get(self):\n # Normalise the rewards\n self.advantages = (self.advantages - np.mean(self.advantages)) // (np.std(self.advantages) + 1e-10)\n return self.states, self.actions, self.advantages\n\n # Delete the runs after each training session\n def delete(self):\n del self.states[:]\n del self.actions[:]\n self.advantages = []\n\n"
},
{
"alpha_fraction": 0.5870625376701355,
"alphanum_fraction": 0.5957581996917725,
"avg_line_length": 37.95867919921875,
"blob_id": "c8d3150684a737bc695a889a9030f6dd87b2bf83",
"content_id": "c3d5481bcb9a84f55900f761bcdf4dee737ff7ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4715,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 121,
"path": "/policy.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nimport random\n\n\nclass Model:\n def __init__(self, input_size, input_flat, minimap_size, minimap_flat,\n army_input, action_size, learning_rate, memory):\n self.wh = input_size\n self.input_flat = input_flat\n self.mm_wh = minimap_size\n self.minimap_flat = minimap_flat\n self.army_input = army_input\n self.num_actions = action_size\n self.memory = memory\n\n self.screen_input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='input')\n self.minimap_input = tf.placeholder(tf.float32, shape=[None, self.minimap_flat], name='minimap_input')\n self.army_input = tf.placeholder(tf.float32, shape=[None, 1], name='army_input')\n self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name='actions')\n\n x_image = tf.reshape(self.screen_input, [-1, self.wh, self.wh, 1])\n mm_image = tf.reshape(self.minimap_input, [-1, self.mm_wh, self.mm_wh, 1])\n\n init = tf.random_normal_initializer\n\n # create the network\n net = x_image\n\n net = tf.layers.conv2d(inputs=net, filters=8, kernel_size=5, padding='same', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=16, kernel_size=5, padding='same', activation=tf.nn.relu)\n\n net = tf.contrib.layers.flatten(net)\n\n mm_image = tf.layers.conv2d(inputs=mm_image, filters=4, kernel_size=5, padding='same', activation=tf.nn.relu)\n mm_image = tf.layers.conv2d(inputs=mm_image, filters=8, kernel_size=5, padding='same', activation=tf.nn.relu)\n\n mm_image = tf.contrib.layers.flatten(mm_image)\n\n x_army = tf.layers.dense(inputs=self.army_input, units=9, activation=tf.nn.relu, kernel_initializer=init)\n\n dense_1 = tf.concat([net, mm_image, x_army], 1)\n\n hidden1 = tf.contrib.layers.fully_connected(\n inputs=dense_1,\n num_outputs=36,\n activation_fn=tf.nn.relu,\n weights_initializer=tf.random_normal_initializer\n )\n\n logits = tf.contrib.layers.fully_connected(\n inputs=hidden1,\n num_outputs=self.num_actions,\n activation_fn=tf.nn.softmax\n )\n\n self.output = logits\n\n # training part of graph\n self._acts = tf.placeholder(tf.float32)\n self._advantages = tf.placeholder(tf.float32)\n\n # loss function\n loss = tf.log(tf.reduce_sum(tf.multiply(self._acts, self.output))) * self._advantages\n self._train = tf.train.AdamOptimizer(learning_rate).minimize(-loss)\n\n self.saver = tf.train.Saver()\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n self.session.run(tf.global_variables_initializer())\n\n def train(self):\n print('== TRAINING ==')\n states, minimaps, army_selected, actions, advantages = self.memory.get()\n self.session.run(self._train, feed_dict={\n self.screen_input: states,\n self.minimap_input: minimaps,\n self.army_input: army_selected,\n self._acts: actions,\n self._advantages: advantages\n })\n self.memory.delete()\n\n def get_action(self, state, minimap, army):\n return self.session.run(self.output, feed_dict={self.screen_input: [state],\n self.minimap_input: [minimap],\n self.army_input: [army]}\n )\n\n def save(self):\n self.saver.save(self.session, './policy_model.ckpt')\n\n\nclass ReplayMemory:\n def __init__(self):\n self.states = []\n self.minimap_states = []\n self.army_selected = []\n self.actions = []\n self.advantages = []\n\n # Add the run to the memory\n def add(self, states, minimap_states, army_state, actions, rewards):\n self.states.extend(states)\n self.minimap_states.extend(minimap_states)\n self.army_selected.extend(army_state)\n self.actions.extend(actions)\n self.advantages.extend(rewards)\n\n # Get the full run\n def get(self):\n # Normalise the rewards\n self.advantages = (self.advantages - np.mean(self.advantages)) // (np.std(self.advantages) + 1e-10)\n return self.states, self.minimap_states, self.army_selected, self.actions, self.advantages\n\n # Delete the runs after each training session\n def delete(self):\n del self.states[:]\n del self.minimap_states[:]\n del self.army_selected[:]\n del self.actions[:]\n self.advantages = []\n\n"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 11,
"blob_id": "b0a3ab5dceb6ddd82255fcf28a9a5e9b79e03d7a",
"content_id": "d97038037b8477ac7e5a4ff6d9f0b20b61be6b7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 1,
"path": "/README.md",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "# pysc2-bot\n"
},
{
"alpha_fraction": 0.5722543597221375,
"alphanum_fraction": 0.5834749937057495,
"avg_line_length": 38.21333312988281,
"blob_id": "049b2d2b8f345ce4b2fed039f6fa66b23615164d",
"content_id": "03b06ca9df341ff73e0ed067e8725487043280de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5882,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 150,
"path": "/ac_param.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport random\nfrom collections import deque\n\n\nclass ActorCriticModelCont:\n def __init__(self, input_size, input_flat, action_size, actor_lr, critic_lr, memory, gamma):\n self.wh = input_size\n self.input_flat = input_flat\n self.num_actions = action_size\n self.gamma = gamma\n self.memory = memory\n\n # Generic\n init = tf.truncated_normal_initializer(0., .01)\n\n # ===================================== #\n # Actor #\n # ===================================== #\n\n self.input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='input')\n self.actor_actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name='actions')\n self.td_error = tf.placeholder(tf.float32, shape=[None, 1], name='rewards')\n\n self.actor_lr = actor_lr\n\n image = tf.reshape(self.input, [-1, self.wh, self.wh, 1])\n\n conv_net = tf.layers.conv2d(inputs=image, filters=16, kernel_size=5, padding='same', activation=tf.nn.relu)\n conv_net = tf.layers.conv2d(inputs=conv_net, filters=32, kernel_size=3, padding='same', activation=tf.nn.relu)\n #\n conv_net = tf.contrib.layers.flatten(conv_net)\n\n net = tf.layers.dense(inputs=conv_net, units=256, activation=tf.nn.relu, kernel_initializer=init, name='dense1')\n # net = tf.layers.dense(inputs=net, units=128, activation=tf.nn.relu, kernel_initializer=init, name='dense2')\n\n self.output = tf.layers.dense(inputs=net, units=self.num_actions, activation=tf.nn.softmax)\n\n loss = tf.log(tf.reduce_sum(tf.multiply(self.output, self.actor_actions))) * self.td_error\n self.optimiser = tf.train.AdamOptimizer(self.actor_lr).minimize(-loss)\n\n # ===================================== #\n # Critic #\n # ===================================== #\n\n # self.critic_input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='critic_input')\n self.critic_td_target = tf.placeholder(tf.float32, shape=[None, 1], name='rewards')\n self.critic_lr = critic_lr\n\n critic_net = tf.layers.dense(inputs=conv_net, units=256, activation=tf.nn.relu, kernel_initializer=init)\n self.critic_output = tf.layers.dense(inputs=critic_net, units=1, activation=None, kernel_initializer=init)\n\n self.critic_loss = tf.reduce_mean(tf.squared_difference(self.critic_output, self.critic_td_target))\n self.critic_optimise = tf.train.AdamOptimizer(self.critic_lr).minimize(self.critic_loss)\n\n self.saver = tf.train.Saver()\n\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n self.session.run(tf.global_variables_initializer())\n try:\n self.saver.restore(self.session, '/home/rob/Documents/uni/fyp/sc2/ac_model_param.ckpt')\n except:\n print('No model found - training new one')\n\n def train(self):\n if len(self.memory.memory) < 16:\n return\n\n td_targets = []\n td_errors = []\n\n mini_batch = self.memory.get_batch(self.memory.batch_size)\n\n states = [item[0] for item in mini_batch]\n actions = [item[1] for item in mini_batch]\n rewards = [item[2] for item in mini_batch]\n done = [item[3] for item in mini_batch]\n next_states = [item[4] for item in mini_batch]\n\n values = self.batch_predict(states)\n # next_values = self.batch_predict(next_states)\n\n for i in range(len(mini_batch)):\n if done[i]:\n td_targets.append([rewards[i]])\n else:\n td_targets.append(rewards[i] + self.gamma * self.predict(next_states[i]))\n\n td_errors.append(td_targets[-1] - values[i])\n\n # Training the critic\n self.session.run(self.critic_optimise, feed_dict={\n self.input: states,\n self.critic_td_target: td_targets\n })\n\n # Training the actor\n self.session.run(self.optimiser, feed_dict={\n self.input: states,\n self.actor_actions: actions,\n self.td_error: td_errors\n })\n del self.memory.memory[:]\n\n def run(self, state):\n return self.session.run(self.output, feed_dict={self.input: [state]})[0]\n\n def batch_predict(self, states):\n return self.session.run(self.critic_output, feed_dict={self.input: states})\n\n def predict(self, state):\n return self.session.run(self.critic_output, feed_dict={self.input: [state]})[0]\n\n def save(self):\n self.saver.save(self.session, '/home/rob/Documents/uni/fyp/sc2/ac_model_param.ckpt')\n\n\nclass ReplayMemory:\n def __init__(self, batch_size, max_memory_size):\n self.batch_size = batch_size\n self.memory = []\n\n # Add the current state, the action we took, the reward we got for it and\n # whether it was the terminal (done) state for the ep\n def add(self, state, action, reward, done):\n self.update(state)\n if len(self.memory) == 15:\n done = True\n self.memory.append([state, action, reward, done, None])\n if reward == 1.:\n self.memory.reverse()\n running_add = 0\n for m, mem in enumerate(self.memory):\n _, _, r, d, _ = mem\n if done and m > 0:\n break\n running_add = running_add * 0.99 + r\n mem[3] = running_add\n self.memory.reverse()\n\n # Update the memory to include the next state\n def update(self, next_state):\n if len(self.memory) > 0 and not self.memory[-1][3]:\n self.memory[-1][4] = next_state\n\n def get_batch(self, batch_size=50):\n return self.memory\n # temp = self.memory\n # temp.pop()\n # return random.sample(temp, batch_size - 1)\n"
},
{
"alpha_fraction": 0.6081297397613525,
"alphanum_fraction": 0.6183419823646545,
"avg_line_length": 39.26612854003906,
"blob_id": "8f2566d7485a8ee224b3acf49edf1e8163d87f3d",
"content_id": "2f9d27d709bdf0e0f27187a995868ceaddc8b967",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4994,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 124,
"path": "/DQN.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nfrom collections import deque\nimport random\n\n\nclass Model:\n def __init__(self, input_size, input_flat, minimap_size, minimap_flat,\n army_input, action_size, learning_rate, memory):\n self.wh = input_size\n self.input_flat = input_flat\n self.mm_wh = minimap_size\n self.minimap_flat = minimap_flat\n self.army_input = army_input\n self.num_actions = action_size\n self.memory = memory\n\n self.screen_input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='input')\n self.minimap_input = tf.placeholder(tf.float32, shape=[None, self.minimap_flat], name='mm_input')\n self.army_input = tf.placeholder(tf.float32, shape=[None, self.input_flat], name='army_input')\n\n self.actions = tf.placeholder(tf.float32, shape=[None, self.num_actions], name='actions')\n self.rewards = tf.placeholder(tf.float32, shape=[None], name='rewards')\n\n x_image = tf.reshape(self.army_input, [-1, self.wh, self.wh, 1])\n # mm_image = tf.reshape(self.minimap_input, [-1, self.mm_wh, self.mm_wh, 1])\n\n init = tf.truncated_normal_initializer(0, 0.01)\n\n # create the network\n net = x_image\n\n net = tf.layers.conv2d(inputs=net, filters=8, kernel_size=5, padding='same', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=16, kernel_size=5, padding='same', activation=tf.nn.relu)\n\n net = tf.contrib.layers.flatten(net)\n\n net = tf.layers.dense(inputs=net, units=64, activation=tf.nn.relu, kernel_initializer=init, name='dense1')\n\n net = tf.layers.dense(inputs=net, units=self.num_actions, activation=None, kernel_initializer=init)\n\n self.output = net\n\n q_reward = tf.reduce_sum(tf.multiply(self.output, self.actions), 1)\n loss = tf.reduce_mean(tf.squared_difference(self.rewards, q_reward))\n self.optimiser = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n self.saver = tf.train.Saver()\n\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n self.session.run(tf.global_variables_initializer())\n try:\n self.saver.restore(self.session, '/home/rob/Documents/uni/fyp/sc2/model.ckpt')\n self.loaded_model = True\n except:\n print('No model found - training new one')\n self.loaded_model = False\n\n def train(self):\n if len(self.memory.memory) - 1 < self.memory.batch_size:\n return\n states, minimaps, army_selected, actions, rewards = self.memory.get_batch(self, self.memory.batch_size)\n self.session.run(self.optimiser, feed_dict={\n self.army_input: army_selected,\n self.actions: actions,\n self.rewards: rewards}\n )\n\n def get_action(self, state):\n screen, mini_map, army = state\n feed_dict = {self.army_input: [army]}\n return self.session.run(self.output, feed_dict)[0]\n\n def get_batch_action(self, states, minimap, army_selected):\n return self.session.run(self.output, feed_dict={self.screen_input: states, self.minimap_input: minimap,\n self.army_input: army_selected})\n\n def save(self):\n self.saver.save(self.session, '/home/rob/Documents/uni/fyp/sc2/model.ckpt')\n\n\nclass ReplayMemory:\n def __init__(self, num_actions, batch_size, max_memory_size, gamma):\n self.num_actions = num_actions\n self.batch_size = batch_size\n self.memory = deque(maxlen=max_memory_size)\n self.gamma = gamma\n\n # Add the current state, the action we took, the reward we got for it and\n # whether it was the terminal (done) state for the ep\n def add(self, state, action, reward, done):\n actions = np.zeros(self.num_actions)\n actions[action] = 1\n self.update(state)\n self.memory.append([state, actions, reward, done, None])\n\n # Update the memory to include the next state\n def update(self, next_state):\n if len(self.memory) > 0:\n if not self.memory[-1][3]:\n self.memory[-1][4] = next_state\n\n def get_batch(self, model, batch_size=50):\n mini_batch = np.array(random.sample(self.memory, batch_size))\n\n states = [item[0][0] for item in mini_batch]\n minimap_states = [item[0][1] for item in mini_batch]\n army_selected = [item[0][2] for item in mini_batch]\n\n actions = [item[1] for item in mini_batch]\n rewards = [item[2] for item in mini_batch]\n done = [item[3] for item in mini_batch]\n\n next_states = mini_batch[:, 4]\n\n y_batch = []\n\n for i in range(batch_size):\n if done[i]:\n y_batch.append(rewards[i])\n else:\n y_batch.append(rewards[i] + self.gamma * np.max(model.get_action(next_states[i])))\n\n return states, minimap_states, army_selected, actions, y_batch\n\n"
},
{
"alpha_fraction": 0.6317365169525146,
"alphanum_fraction": 0.667664647102356,
"avg_line_length": 27.4255313873291,
"blob_id": "77b8a2797a95b025e5302fecda68b57ad9c2e023",
"content_id": "2750525582ef1b6322afab74030fc7928a29e8d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1336,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 47,
"path": "/visualise.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "import pickle\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.path as path\nimport matplotlib.animation as animation\nimport numpy as np\n#\n# actions = pickle.load(open('actions.pkl', 'rb'))\n# rewards1 = pickle.load(open('policy_rewards1.pkl', 'rb'))\n# rewards2 = pickle.load(open('policy_rewards2.pkl', 'rb'))\n# rewards3 = pickle.load(open('policy_rewards3.pkl', 'rb'))\n# rewards4 = pickle.load(open('policy_rewards4.pkl', 'rb'))\n# rewards5 = pickle.load(open('policy_rewards5.pkl', 'rb'))\n_rewards = list(pickle.load(open('rewards4.pkl', 'rb')))\n# _rewards1 = list(pickle.load(open('rewards.pkl1', 'rb')))\n# _rewards2 = list(pickle.load(open('rewards2.pkl', 'rb')))\n# _rewards3 = list(pickle.load(open('rewards.pkl', 'rb')))\n\nfig = plt.figure()\nax1 = fig.add_subplot(1, 1, 1)\ncount = 0\n\n\ndef animate(i):\n ax1.clear()\n ax1.plot(_rewards[:i])\n # ax1.plot(rewards2[:i])\n # ax1.plot(rewards3[:i])\n # ax1.plot(rewards4[:i])\n # ax1.plot(rewards5[:i])\n\n\nani = animation.FuncAnimation(fig, animate, interval=50, repeat=False)\n# plt.plot(rewards)\nplt.show()\n# #\n# ax2 = fig.add_subplot(1, 1, 1)\n# ax2.set_xlim(0, 16)\n#\n#\n# def animate_hist(i):\n# ax2.clear()\n# ax2.hist(actions[:i*240])\n#\n#\n# ani = animation.FuncAnimation(fig, animate_hist, interval=50, repeat=False)\n# plt.show()\n"
},
{
"alpha_fraction": 0.6133720874786377,
"alphanum_fraction": 0.6214008927345276,
"avg_line_length": 36.226802825927734,
"blob_id": "440bdee588e63f307d03e8d5c754856adb9abbaf",
"content_id": "c59473bda4fcd8a9107775c130a78eb587a65f5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7224,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 194,
"path": "/ac_agent.py",
"repo_name": "rob-deans/pysc2-bot",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\n\nfrom actor_critic import ReplayMemory\nfrom actor_critic import ActorCriticModel\n\nimport numpy as np\nfrom collections import deque\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_MM_PLAYER_RELATIVE = features.MINIMAP_FEATURES.player_relative.index\n_SELECT = features.SCREEN_FEATURES.selected.index\n_PLAYER_FRIENDLY = 1\n_PLAYER_NEUTRAL = 3 # beacon/minerals\n_PLAYER_HOSTILE = 4\n\n_NOT_QUEUED = [0]\n_SELECT_ALL = [0]\n\n_NO_OP = actions.FUNCTIONS.no_op.id\n_MOVE_CAMERA = actions.FUNCTIONS.move_camera.id\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n_SELECT_RECT = actions.FUNCTIONS.select_rect.id\n_SELECT_CONTROL_GROUP = actions.FUNCTIONS.select_control_group.id\n_STOP_QUICK = actions.FUNCTIONS.Stop_quick.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id\n_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id\n_MOVE_MINIMAP = actions.FUNCTIONS.Move_minimap.id\n_PATROL_SCREEN = actions.FUNCTIONS.Patrol_screen.id\n_PATROL_MINIMAP = actions.FUNCTIONS.Patrol_minimap.id\n_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id\n_HOLD_POSITION_QUICK = actions.FUNCTIONS.HoldPosition_quick.id\n_SMART_SCREEN = actions.FUNCTIONS.Smart_screen.id\n_SMART_MINIMAP = actions.FUNCTIONS.Smart_minimap.id\n\n# All the available actions\navailable_actions = [\n _NO_OP,\n _MOVE_CAMERA,\n _SELECT_POINT,\n _SELECT_RECT,\n _SELECT_CONTROL_GROUP,\n _STOP_QUICK,\n _SELECT_ARMY,\n _ATTACK_SCREEN,\n _MOVE_SCREEN,\n _MOVE_MINIMAP,\n _PATROL_SCREEN,\n _PATROL_MINIMAP,\n _ATTACK_MINIMAP,\n _HOLD_POSITION_QUICK,\n _SMART_SCREEN,\n _SMART_MINIMAP\n]\n\n\nclass MoveToBeacon(base_agent.BaseAgent):\n \"\"\"An agent specifically for solving the MoveToBeacon map.\"\"\"\n def __init__(self):\n super(MoveToBeacon, self).__init__()\n self.num_actions = len(available_actions)\n # Screen sizes\n self.input_flat = 84*84 # Size of the screen\n self.wh = 84\n # Minimap sizes\n self.mm_input_flat = 64*64\n self.mm_wh = 64\n\n self.batch_size = 32\n self.max_memory_size = 2000\n\n self.gamma = .99\n self.actor_lr = 1e-3\n self.critic_lr = 5e-3\n\n self.actions = []\n self.states = []\n self.minimap_states = []\n self.army_state = []\n self.done = []\n\n # Stat count\n self.total_rewards = deque(maxlen=100)\n self.episode_reward = 0\n self.actions_taken = np.zeros(self.num_actions)\n\n self.memory = ReplayMemory(self.batch_size, self.max_memory_size)\n self.model = ActorCriticModel(self.wh, self.input_flat, self.mm_wh, self.mm_input_flat,\n self.num_actions, self.actor_lr, self.critic_lr,\n self.memory, self.gamma)\n\n def step(self, obs):\n # Current observable state\n screen_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n current_state = screen_relative.flatten()\n mini_map_relative = obs.observation['minimap'][_MM_PLAYER_RELATIVE]\n minimap_state = mini_map_relative.flatten()\n army_state = obs.observation['screen'][_SELECT].flatten()\n\n if len(self.memory.memory) > 0:\n self.memory.update(army_state)\n self.model.train()\n\n super(MoveToBeacon, self).step(obs)\n\n legal_actions = obs.observation['available_actions']\n\n feed_dict = {self.model.army_selected: [army_state]}\n\n output = self.model.session.run(self.model.output, feed_dict)[0]\n out = redistribute(output, legal_actions)\n try:\n action = int(np.argmax(np.random.multinomial(1, out)))\n except ValueError:\n action = int(np.argmax(np.random.multinomial(1, out / (1 + 1e-6))))\n\n self.actions_taken[int(action)] += 1\n\n self.states.append(current_state)\n self.minimap_states.append(minimap_state)\n self.army_state.append(army_state)\n\n actions_oh = np.zeros(self.num_actions)\n actions_oh[action] = 1\n self.actions.append(actions_oh)\n\n reward = obs.reward\n self.episode_reward += reward\n\n if obs.last():\n # if self.episode_reward == 0:\n # reward = -100\n # Printing out the stats\n self.total_rewards.append(self.episode_reward)\n self.episode_reward = 0\n if self.episodes % 100 == 0 and self.episodes > 0:\n self.model.save()\n print('Highest: {} | Lowest: {} | Average: {} | Time steps: {}'.format(\n max(self.total_rewards),\n min(self.total_rewards),\n np.mean(self.total_rewards),\n self.steps\n )\n )\n print(self.actions_taken)\n\n self.memory.add(army_state, actions_oh, reward, obs.last())\n\n # The group of actions to take\n if available_actions[action] == _NO_OP:\n return actions.FunctionCall(_NO_OP, [])\n elif available_actions[action] == _SELECT_ARMY:\n return actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])\n elif available_actions[action] == _ATTACK_SCREEN \\\n or available_actions[action] == _MOVE_SCREEN \\\n or available_actions[action] == _PATROL_SCREEN \\\n or available_actions[action] == _SMART_SCREEN:\n # This is the scripted one\n neutral_y, neutral_x = (screen_relative == _PLAYER_NEUTRAL).nonzero()\n target = [int(neutral_x.mean()), int(neutral_y.mean())]\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED, target])\n elif available_actions[action] == _STOP_QUICK:\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED])\n elif available_actions[action] == _HOLD_POSITION_QUICK:\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED])\n elif available_actions[action] == _ATTACK_MINIMAP \\\n or available_actions[action] == _MOVE_MINIMAP \\\n or available_actions[action] == _PATROL_MINIMAP \\\n or available_actions[action] == _SMART_MINIMAP:\n neutral_y, neutral_x = (mini_map_relative == _PLAYER_NEUTRAL).nonzero()\n target = [int(neutral_x.mean()), int(neutral_y.mean())]\n return actions.FunctionCall(available_actions[action], [_NOT_QUEUED, target])\n else:\n return actions.FunctionCall(_NO_OP, [])\n\n\n# Defines if we have the potential of picking an illegal action and how we redistribute the probabilities\ndef redistribute(output, legal_actions):\n for i, action in enumerate(available_actions):\n if action not in legal_actions:\n output[i] = 0\n if sum(output) == 0:\n for i, a in enumerate(available_actions):\n if a in legal_actions:\n output[i] = float(1/len(legal_actions))\n else:\n output /= sum(output)\n return output\n\n\n"
}
] | 14 |
yaph/geo-files
|
https://github.com/yaph/geo-files
|
23b10644ca6d962f85c733cb1605e8640846841a
|
1a514182432ab3a7a689e36a4bc1a9b302b99e32
|
cb714f78c66caf2fbec3c3c2a0ac1e1cceb3043b
|
refs/heads/master
| 2016-09-07T04:43:21.589581 | 2014-10-20T09:08:49 | 2014-10-20T09:08:49 | 13,632,800 | 4 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5627118349075317,
"alphanum_fraction": 0.5864406824111938,
"avg_line_length": 20.14285659790039,
"blob_id": "5c3ad303cae53e8922042ee6e6c05cd3f4fb7589",
"content_id": "71aa5ada3e28ddfbb14e2cc05a9fda3de8ac3670",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 14,
"path": "/worldmap.py",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom kartograph import Kartograph\n\nK = Kartograph()\n\ncfg = {\n 'layers': [{\n 'id': 'country',\n 'src': 'shp/ne_50m_admin_0_countries.shp'\n }]\n}\nsvg = 'svg/world.svg'\nK.generate(cfg, outfile=svg, stylesheet='#country {fill: #000;}')"
},
{
"alpha_fraction": 0.6256377696990967,
"alphanum_fraction": 0.6409438848495483,
"avg_line_length": 26.05172348022461,
"blob_id": "823714684368dd5d5b12e3981ecffda3b0c8fd0e",
"content_id": "7cf97ca41c41f42078d7f7dd81534bee955af8fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1568,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 58,
"path": "/lico.py",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# data from http://linuxcounter.net/xml/where_users_live.xml\nimport numpy\nimport colorbrewer\n\nfrom bs4 import BeautifulSoup\n\n# mapping of ISO2 country codes to data\nlico_data = {}\n\nwith open('where_users_live.xml', 'r') as f:\n lico_xml = f.read()\n\nlico_soup = BeautifulSoup(lico_xml, 'xml')\nfor line in lico_soup.find_all('line'):\n iso2 = line.find('code').text.lower()\n user_count = float(line.find('users').text)\n pop_count = float(line.find('population').text)\n lico_data[iso2] = user_count\n\nuser_counts = list(lico_data.values())\nmin_users = min(user_counts)\nmax_users = max(user_counts)\n\ncolor_scheme = colorbrewer.Blues[9]\nbins = numpy.linspace(min_users, max_users, len(color_scheme))\n\nwith open('svg/world-choropleth.svg', 'r') as f:\n map_svg = f.read()\n\nmap_soup = BeautifulSoup(map_svg, 'xml')\n\n#FIXME append country classes to <style id=\"style_css_sheet\" type=\"text/css\">\n\nfor path in map_soup.find_all(['path', 'g', 'circle']):\n classes = path.attrs.get('class', '').split()\n\n if not classes or classes[0] not in ['landxx', 'circlexx', 'unxx']:\n continue\n\n if classes[1] == 'coastxx':\n iso2 = classes[2]\n elif classes[1] == 'subxx':\n iso2 = classes[3]\n else:\n iso2 = classes[1]\n\n country_val = lico_data.get(iso2, None)\n if not country_val:\n continue\n\n color = color_scheme[numpy.digitize([country_val], bins)[0] - 1]\n path['style'] = 'fill:#%02x%02x%02x;' % color\n\n\nwith open('svg/lico.svg', 'w') as f:\n f.write(str(map_soup))"
},
{
"alpha_fraction": 0.604519784450531,
"alphanum_fraction": 0.6610169410705566,
"avg_line_length": 28.66666603088379,
"blob_id": "aea770933cc4122d0208b12c21bf27602510a4bb",
"content_id": "319960b6874451eb7911cd98b23f9605f2d88c46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 6,
"path": "/mkimg.sh",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nFORMAT=$1\n[ $# -eq 0 ] && { echo \"Usage: $0 format\"; exit 1; }\n\nmogrify -resize 600x600 -format $FORMAT svg/countries/*.svg\nmv svg/countries/*.$FORMAT img/countries/"
},
{
"alpha_fraction": 0.7401315569877625,
"alphanum_fraction": 0.7730262875556946,
"avg_line_length": 59.79999923706055,
"blob_id": "94142e0f92317eef24233ef06b3d126264e2a862",
"content_id": "ba2922ae6c505cbbff97e2d7dadd3f8cae180c69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 5,
"path": "/dl.sh",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ncd source_data\nwget http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/10m/cultural/ne_10m_admin_0_countries.zip\nwget http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/50m/cultural/ne_50m_admin_0_countries.zip\nfind . -name '*.zip' -exec unzip {} \\;\n"
},
{
"alpha_fraction": 0.4749999940395355,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 19.5,
"blob_id": "7f086f20622168761c899f315fc058107a9fda0a",
"content_id": "5d3e8ebb243a70c9da2cf07a86f2f7843d2f8063",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 2,
"path": "/mappic.sh",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nconvert -resize 1200x616 $1 $2"
},
{
"alpha_fraction": 0.7195512652397156,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 25.04166603088379,
"blob_id": "6169a89181e4185ea2e9ac844c857f9cef907b0c",
"content_id": "6038a901531ef80ed1f2e0cdc45015f97cd44784",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 624,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 24,
"path": "/redacted_world.py",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\nimport simplemapplot\nimport pandas as pd\nfrom geonamescache import GeonamesCache\n\n\nparser = argparse.ArgumentParser(\n description='Create a Choropleth world map without a legend.')\nparser.add_argument('file', help='CSV data file')\nargs = parser.parse_args()\n\ncolors = [\"#ffffff\", '#ff0000']\ncountries = GeonamesCache().get_countries_by_names()\n\ndf = pd.read_csv(args.file)\ncountry_data = df['Country']\n\ncolorize = {}\nfor name in country_data:\n colorize[countries[name]['iso'].lower()] = 1\n\nsimplemapplot.make_world_country_map(data=colorize, colors=colors)"
},
{
"alpha_fraction": 0.7684630751609802,
"alphanum_fraction": 0.7684630751609802,
"avg_line_length": 28.47058868408203,
"blob_id": "9d7d746bcb524f6eb0c694d909c74c0e195a19dc",
"content_id": "2b3d3869239308894e2e6945e570c400f0f7dab6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 17,
"path": "/README.md",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "# Geo files\n\nA repository of geography data including ESRI shapefiles, SVG, and JPG images\nfor the world's countries and the scripts to generate these files.\n\n## Data Sources\n\n### Shapefiles\n[Natural Earth](http://www.naturalearthdata.com/)\n\n### Maps\n\n[BlankMap World Microstates](https://commons.wikimedia.org/wiki/File:BlankMap-World-Microstates.svg)\n\n### CSV\n\n[List of countries that border only one other country](http://en.wikipedia.org/wiki/List_of_countries_that_border_only_one_other_country)\n"
},
{
"alpha_fraction": 0.6023579835891724,
"alphanum_fraction": 0.6227223873138428,
"avg_line_length": 27.303030014038086,
"blob_id": "c4b92e9046952c6442f1fb8dc3cb1fc14007366a",
"content_id": "deb4706cec12eb9cd5b8849f35e05ef8bece0d79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 33,
"path": "/mkshp.py",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport fiona\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser(description='Create shapefiles for countries.')\nparser.add_argument('shp', help='Source shapefile')\nparser.add_argument('--iso3', help='ISO 3166-1 alpha-3 country code.')\nargs = parser.parse_args()\n\n\nwith fiona.open(args.shp) as source:\n driver = source.driver\n schema = source.schema\n\n for country in source:\n try:\n iso3 = country['properties']['adm0_a3']\n except KeyError:\n iso3 = country['properties']['ADM0_A3']\n\n if args.iso3 and iso3 != args.iso3:\n continue\n\n target_dir = 'shp/countries/%s' % iso3\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n with fiona.open(os.path.join(target_dir, '%s.shp' % iso3), 'w',\n driver=driver, schema=schema) as target:\n\n target.write(country)"
},
{
"alpha_fraction": 0.4982078969478607,
"alphanum_fraction": 0.6953405141830444,
"avg_line_length": 14.5,
"blob_id": "3e6ca01f1e52e17e4783206b25174f7f5da37ee9",
"content_id": "8b7c6a45a963fe7504212e90ffd42d7c62b3e03a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 18,
"path": "/requirements.txt",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "Fiona==1.0.2\nPySAL==1.6.0\nPyYAML==3.10\nShapely==1.2.18\naggdraw==1.1-64bits\ncolorbrewer==0.1.1\ndescartes==1.0\ngcmap==0.0.3\ngeonamescache==0.10\nkartograph.py==0.6.7\nlxml==3.2.3\nordereddict==1.1\npykml==0.1.0\npyproj==1.9.3\npyshp==1.2.0\npytz==2013.7\nsimplemapplot==1.0.3\ntinycss==0.3\n"
},
{
"alpha_fraction": 0.5487364530563354,
"alphanum_fraction": 0.5649819374084473,
"avg_line_length": 24.227272033691406,
"blob_id": "012be5e3cb80e88c56fa1f67c8cbb2a213239ae0",
"content_id": "bb1bfd327d18bb8145b7a1338e5cb783a949e9d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 22,
"path": "/mksvg.py",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom kartograph import Kartograph\nimport os\n\nK = Kartograph()\ncss = '#country {fill: #000;}'\n\nfor iso3 in os.listdir('shp/countries'):\n source_file = 'shp/countries/%s/%s.shp' % (iso3, iso3)\n target_file = 'svg/countries/%s.svg' % iso3\n cfg = {\n 'layers': [{\n 'id': 'country',\n 'src': source_file,\n }]\n }\n\n try:\n K.generate(cfg, outfile=target_file, stylesheet=css)\n except Exception as err:\n print(('Exception for country %s:\\n%r' % iso3))"
},
{
"alpha_fraction": 0.5317919254302979,
"alphanum_fraction": 0.5433526039123535,
"avg_line_length": 18.22222137451172,
"blob_id": "b70e7f6e7d4a245594fdb04c92482473098b415c",
"content_id": "68e406ce7c143f9d418fdbdbd964aa4462efcea9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 9,
"path": "/geonames_palindromes.py",
"repo_name": "yaph/geo-files",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\npals = []\ndf = pd.read_csv('csv/DE.txt', sep='\\t', header=None)\n\nfor n in df[1]:\n name = n.lower()\n if name == name[::-1]:\n pals.append(n)\n"
}
] | 11 |
gmarcotte/face-django
|
https://github.com/gmarcotte/face-django
|
81b805c8c79c6a4058d4b27493ebcf76ee945640
|
fc226f0fc8208ec0948f89ff081eb2f2e32b1234
|
05853bfa6fb2f1212fea263f4c2cba1a269813b5
|
refs/heads/master
| 2016-09-10T20:06:00.853271 | 2011-07-10T11:00:38 | 2011-07-10T11:00:38 | 32,219,954 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7108433842658997,
"alphanum_fraction": 0.7108433842658997,
"avg_line_length": 25.22222137451172,
"blob_id": "a0ae60ff508f9c84ac6d871522dd83d345bc08a8",
"content_id": "17038049b98be3060a56ac670c76a1640ac2153b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/tradesocial/config/__init__.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "\r\nfrom config.django import *\r\nfrom config.facebook import *\r\ntry:\r\n from config.prod import *\r\nexcept ImportError:\r\n try:\r\n from config.sandbox import *\r\n except ImportError:\r\n raise importError(\"You must define an environment config\")\r\n "
},
{
"alpha_fraction": 0.656956672668457,
"alphanum_fraction": 0.6607895493507385,
"avg_line_length": 30.148147583007812,
"blob_id": "2bb10ce6664678c48410d5f2051f5e7cf5780ee1",
"content_id": "3125f4cb45cc71d382a5987e070272f8e5c22a29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2609,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 81,
"path": "/tradesocial/auth/models.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\r\nfrom django.db import models\r\n\r\nimport facebook\r\n\r\nimport core.models\r\n\r\nimport datetime\r\n\r\nclass User(core.models.BaseModel):\r\n \r\n SESSION_KEY = '_fb_connect_user'\r\n \r\n # User's Facebook ID, this should never change\r\n fbid = models.CharField(primary_key=True)\r\n \r\n # The only data required to access the Facebook API\r\n access_token = models.CharField(max_length=255)\r\n \r\n # Access permissions the user has granted us\r\n permissions = models.CommaSeparatedIntegerField(max_length=255)\r\n \r\n # Data cached from the user's Facebook profile\r\n first_name = models.CharField(blank=True, max_length=50)\r\n last_name = models.CharField(blank=True, max_length=50)\r\n \r\n # Additional data\r\n # Nothing here yet...\r\n \r\n @staticmethod\r\n def logout(request):\r\n \"\"\"Deletes the user's current session\"\"\"\r\n request.session.flush()\r\n if hasattr(request, 'user'):\r\n request.user = None\r\n \r\n def login(self, request):\r\n \"\"\"Login a user by storing his information in the current session\"\"\"\r\n if User.SESSION_KEY in request.session:\r\n if request.session[User.SESSION_KEY] != self.fbid:\r\n # To avoid reusing another user's session, create a new, empty\r\n # session if the existing session corresponds to a different\r\n # authenticated and user.\r\n request.session.flush()\r\n else:\r\n request.session.cycle_key()\r\n request.session[User.SESSION_KEY] = self.fbid\r\n \r\n def getPermissions(self):\r\n \"\"\"Get the Facebook API permissions granted by this user.\"\"\"\r\n if not self.cached_permissions:\r\n perm_indices = map(int, self.permissions.split(','))\r\n perms = set()\r\n for i in perm_indices:\r\n perms.add(auth.permissions.ALL_PERMISSIONS[i])\r\n self.cached_permissions = perms\r\n return self.cached_permissions\r\n \r\n def hasPermission(self, perm):\r\n perms = self.getPermissions()\r\n return perm in perms\r\n \r\n def checkPermission(self, perms):\r\n return [perm for perm in perms if not self.hasPermission(perm)]\r\n \r\n def refresh(self, access_token):\r\n \"\"\"Connect to the Facebook API to refresh this user's data.\r\n Params:\r\n access_token: String, token to access the Facebook API.\r\n Returns:\r\n The updated User object.\r\n \"\"\"\r\n self.access_token = access_token\r\n graph = facebook.GraphAPI(self.access_token)\r\n profile = graph.get_object(\"me\")\r\n if (self.fbid != profile['id']):\r\n raise Exception('ID Mismatch')\r\n self.first_name = profile['first_name']\r\n self.last_name = profile['last_name']\r\n self.save()\r\n return self \r\n\t"
},
{
"alpha_fraction": 0.6610169410705566,
"alphanum_fraction": 0.6689929962158203,
"avg_line_length": 23.125,
"blob_id": "27a17c7b8065cb278238363a3944cd7753e421e3",
"content_id": "8d3df57b4cd6d9aa57520113aaed97aa2d7ec41d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1003,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 40,
"path": "/tradesocial/portfolios/models.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.db import models\r\n\r\nimport auth.models\r\nimport core.models\r\nimport securities.models\r\n\r\nclass Portfolio(core.models.BaseModel):\r\n user = models.ForeignKey(auth.models.User, reverse_name='portfolios')\r\n name = models.CharField(max_length=50)\r\n\r\nclass Transaction(core.models.BaseModel):\r\n portfolio = models.ForeignKey(Portfolio)\r\n date = models.DateField()\r\n notes = models.TextField()\r\n type = models.SmallIntegerField()\r\n \r\n def verboseType(self):\r\n return self.ORDER_TYPES[self.type]\r\n \r\n class Meta:\r\n abstract = True\r\n \r\nclass CashTransaction(Transaction):\r\n ORDER_TYPES = (\r\n (0, 'Deposit'),\r\n (1, 'Withdrawal'),\r\n )\r\n value = models.FloatField()\r\n\r\nclass SecurityTransaction(Transaction):\r\n ORDER_TYPES = (\r\n (0, 'Buy'),\r\n (1, 'Sell'),\r\n (2, 'Buy to Cover'),\r\n (3, 'Sell Short'),\r\n )\r\n security = models.ForeignKey(securities.models.Security)\r\n shares = models.FloatField()\r\n price = models.FloatField()\r\n commission = models.FloatField()"
},
{
"alpha_fraction": 0.5720824003219604,
"alphanum_fraction": 0.5720824003219604,
"avg_line_length": 22.38888931274414,
"blob_id": "ef67bb22671879460b363ac13fa65afd19060108",
"content_id": "8c674825e32a674080f12008efc362ba78867b4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 18,
"path": "/tradesocial/templates/portfolio/index.html",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\r\n{% block title %}Portfolios{% endblock %}\r\n\r\n{% block body %}\r\n<form method=\"POST\" action=\"\" />\r\n<label for=\"new_name\">New Portfolio:</label>\r\n<input type=\"text\" name=\"new_name\" />\r\n<input type=\"submit\" label=\"Create\" />\r\n</form>\r\n\r\n<ul>\r\n{% for p in portfolios %}\r\n<li><a href=\"{{ p.detailUrl }}\">{{ p.name }}</a></li>\r\n{% empty %}\r\n<li>You don't have any portfolios.</li>\r\n{% endfor %}\r\n</ul>\r\n{% endblock %}"
},
{
"alpha_fraction": 0.5734127163887024,
"alphanum_fraction": 0.5742063522338867,
"avg_line_length": 20.70270347595215,
"blob_id": "a6d34f66e03a8944de5aa705ea027b3a551f900c",
"content_id": "3684bc772bf107104a906ef2770fd485b42f91a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2520,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 111,
"path": "/tradesocial/core/url.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "\r\nclass URL:\r\n \r\n def __init__(self, path):\r\n # Setup defaults\r\n self.query_data = {}\r\n self.anchor = ''\r\n self.protocol = 'http://'\r\n self.path = '/'\r\n self.domain = ''\r\n self.subdomain = ''\r\n \r\n # Parse the given path\r\n self.setPath(path)\r\n \r\n def __str__(self):\r\n return self.toString()\r\n \r\n def toAbsoluteString(self):\r\n bits = [self.protocol, self.subdomain, '.', self.domain]\r\n bits.append(self.toRelativeString())\r\n \r\n def toRelativeString(self):\r\n bits = []\r\n bits.append(self.path)\r\n if self.query_data:\r\n bits.append('?')\r\n for key in self.query_data:\r\n bits.append('%s=%s' % (key, self.query_data[key]))\r\n if self.anchor:\r\n bits.append('#%s' % self.anchor)\r\n return ''.join(bits)\r\n \r\n def toString(self):\r\n if self.domain:\r\n return self.toAbsoluteString()\r\n else:\r\n return self.toRelativeString()\r\n \r\n def clearQueryData(self):\r\n self.setQueryData({})\r\n \r\n def addQueryData(self, key, value):\r\n self.query_data[key] = value\r\n \r\n def removeQueryData(self, keys):\r\n try:\r\n for key in keys:\r\n try:\r\n self.query_data.pop(key)\r\n except KeyError:\r\n pass\r\n except TypeError:\r\n try:\r\n self.query_data.pop(key)\r\n except KeyError:\r\n pass\r\n \r\n def appendQueryData(self, query_dict):\r\n self.query_data.extend(query_data)\r\n \r\n def setQueryData(self, query_data):\r\n self.query_data = query_data\r\n \r\n def setAnchor(self, anchor):\r\n self.anchor = anchor\r\n \r\n def clearAnchor(self):\r\n self.setAnchor('')\r\n \r\n def getAnchor(self):\r\n return self.anchor\r\n \r\n def setProtocol(self, protocol):\r\n if protocol.find('://') < 0:\r\n raise ValueError('Protocol must contain ://')\r\n self.protocol = protocol\r\n \r\n def getProtocol(self):\r\n return self.protocol\r\n \r\n def clearProtocol(self):\r\n self.setProtocol('http://')\r\n \r\n def setDomain(self, domain):\r\n self.domain = domain\r\n \r\n def getDomain(self):\r\n return self.domain\r\n \r\n def clearDomain(self):\r\n self.setDomain('')\r\n \r\n def setSubdomain(self, subdomain):\r\n self.subdomain = subdomain\r\n \r\n def getSubdomain(self):\r\n return self.subdomain\r\n \r\n def clearSubdomain(self):\r\n self.setSubdomain('')\r\n \r\n def setPath(self, path):\r\n if not path or path[0] != '/':\r\n path = '/%s' % path\r\n self.path = path\r\n \r\n def getPath(self):\r\n return self.path\r\n \r\n def clearPath(self):\r\n self.setPath('/')"
},
{
"alpha_fraction": 0.6630243062973022,
"alphanum_fraction": 0.6697215437889099,
"avg_line_length": 32.132530212402344,
"blob_id": "c5963752ab17c8d65dbe0f8d54a5bc4bd7a4c2f7",
"content_id": "f9749bee627d094789125b2c7ecd2e75c3b34035",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2837,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 83,
"path": "/tradesocial/securities/models.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.db import models\r\n\r\nimport core.models\r\nimport core.url\r\n\r\nimport datetime\r\nimport re\r\nimport urllib2\r\nfrom xml.dom import minidom\r\n\r\nclass Security(core.models.BaseModel):\r\n \r\n class SymbolInvalidError(Exception):\r\n pass\r\n \r\n # By default, how often we refresh the data\r\n DEFAULT_REFRESH_TIME = datetime.timedelta(minutes=15)\r\n \r\n symbol = models.CharField(primary_key=True, max_length=10)\r\n \r\n # Security details, changes rarely\r\n company = models.CharField(blank=True, max_length=255)\r\n exchange = models.CharField(blank=True, max_length=255)\r\n currency = models.CharField(blank=True, max_length=10)\r\n \r\n # Cached data, changes frequently\r\n refreshed_at = models.DateTimeField(null=True)\r\n last = models.FloatField(null=True)\r\n high = models.FloatField(null=True)\r\n low = models.FloatField(null=True)\r\n volume = models.FloatField(null=True)\r\n market_cap = models.FloatField(null=True)\r\n open = models.FloatField(null=True)\r\n y_close = models.FloatField(null=True)\r\n change = models.FloatField(null=True)\r\n perc_change = models.FloatField(null=True)\r\n \r\n @staticmethod\r\n def getFromSymbol(symbol):\r\n symbol = symbol.upper()\r\n if not re.match('^[A-Z0-9.]+$', symbol):\r\n raise Security.SymbolInvalidError(\r\n 'Symbol %s contains invalid characters' % symbol)\r\n try:\r\n security = Security.objects.get(pk=symbol)\r\n except Security.DoesNotExist:\r\n security = Security(symbol=symbol)\r\n if security.needsRefresh():\r\n security = security.refresh()\r\n return security\r\n \r\n def getSummaryUrl(self):\r\n return core.url.URL('/s/%s/' % self.symbol)\r\n \r\n def needsRefresh(self, refresh_time=None):\r\n if self.refreshed_at is None:\r\n return True\r\n if not isinstance(refresh_time, datetime.timedelta):\r\n refresh_time = Security.DEFAULT_REFRESH_TIME\r\n since_refresh = datetime.datetime.now() - self.refreshed_at\r\n return since_refresh >= refresh_time\r\n \r\n def refresh(self):\r\n url = 'http://www.google.com/ig/api?stock=%s' % self.symbol\r\n xml = urllib2.urlopen(url)\r\n dom = minidom.parse(xml)\r\n # Check if the symbol was valid\r\n # NB: Assumes all valid symbols will have a company name. Is this true?\r\n elems = dom.getElementsByTagName('company')\r\n if not elems or not elems[0].getAttribute('data'):\r\n raise Security.SymbolInvalidError('Symbol %s is not listed' % self.symbol)\r\n self.refreshed_at = datetime.datetime.now()\r\n attrs = [\r\n 'company', 'exchange', 'currency',\r\n 'last', 'high', 'low', 'volume', 'market_cap', 'open',\r\n 'y_close', 'change', 'perc_change',\r\n ]\r\n for var in attrs:\r\n elems = dom.getElementsByTagName(var)\r\n if elems and elems[0].getAttribute('data'):\r\n setattr(self, var, elems[0].getAttribute('data'))\r\n self.save()\r\n return self\r\n "
},
{
"alpha_fraction": 0.5390625,
"alphanum_fraction": 0.5390625,
"avg_line_length": 19.66666603088379,
"blob_id": "9f928d0356bcc1863f24a4bb1ecbd37a5c6652c4",
"content_id": "49302eed4311ea44d2058e50b3639d1e22df7736",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/tradesocial/auth/urls.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls.defaults import *\r\n\r\nurlpatterns = patterns('auth.views',\r\n r('^callback/')\r\n \r\n)"
},
{
"alpha_fraction": 0.7255814075469971,
"alphanum_fraction": 0.7255814075469971,
"avg_line_length": 38.4375,
"blob_id": "f66d4b31da67a3696384db0854d4a88f7d319e76",
"content_id": "8f30a5c7454a472c4a886943fed41ecbddaa2f9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 645,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 16,
"path": "/tradesocial/core/utils.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django import shortcuts\r\nfrom django import template\r\n\r\ndef render_to_response(template_name, request, data_dict={}):\r\n \"\"\"A wrapper for Django's render_to_response that automatically builds\r\n a RequestContext to include the default context processors.\r\n \"\"\"\r\n if request is None:\r\n raise Exception(\r\n \"Request must be provided to use core.utils.render_to_response. \"\r\n \"If you want to render a template without a RequestContext, use \"\r\n \"django.shortcuts.render_to_response directly.\"\r\n )\r\n return shortcuts.render_to_response(\r\n template_name, data_dict, context_instance=template.RequestContext(request)\r\n )"
},
{
"alpha_fraction": 0.7799999713897705,
"alphanum_fraction": 0.7799999713897705,
"avg_line_length": 33.71428680419922,
"blob_id": "8579fd4fc5a91aa473882b81bd55e0272ce1b3b8",
"content_id": "fb85b733f01c3f3316588e7f1a4d7f8526db07b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 7,
"path": "/tradesocial/config/facebook_template.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "\"\"\"Facebook API-specific settings that apply across all environments\"\"\"\r\n\r\nfrom update_config import SETTING_NEEDS_CONFIG\r\n\r\nFACEBOOK_APP_ID = SETTING_NEEDS_CONFIG\r\nFACEBOOK_API_KEY = SETTING_NEEDS_CONFIG\r\nFACEBOOK_APP_SECRET = SETTING_NEEDS_CONFIG\r\n"
},
{
"alpha_fraction": 0.7634069323539734,
"alphanum_fraction": 0.7634069323539734,
"avg_line_length": 20.785715103149414,
"blob_id": "bca3ac8bdff40ff3d1ef762ebf98bca83ade8cdf",
"content_id": "f246e33b8a280b0f3ac30eee0c0d91fb94deff47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 317,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 14,
"path": "/tradesocial/config/prod_template.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "# Settings specific to sandbox development\r\n\r\nfrom update_config import SETTING_NEEDS_CONFIG\r\n\r\nPREPEND_WWW = True\r\n\r\n# Error reporting\r\nSEND_BROKEN_LINK_EMAILS = True\r\nMANAGERS = SETTING_NEEDS_CONFIG\r\nADMINS = SETTING_NEEDS_CONFIG\r\n\r\n# Admin access control\r\nRESTRICT_ADMINS = True\r\nADMIN_FBIDS = SETTING_NEEDS_CONFIG"
},
{
"alpha_fraction": 0.7866666913032532,
"alphanum_fraction": 0.7866666913032532,
"avg_line_length": 23,
"blob_id": "980bd0568f809c9aa5df19d01959a473c7d2a6bb",
"content_id": "6a14e96b1cb97b6928ee4f8f3e292d4d158880ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 3,
"path": "/tradesocial/config/sandbox_template.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "\r\nfrom update_config import SETTING_NEEDS_CONFIG\r\n\r\nRESTRICT_ADMINS = False"
},
{
"alpha_fraction": 0.6664139628410339,
"alphanum_fraction": 0.6664139628410339,
"avg_line_length": 34.63888931274414,
"blob_id": "bb41a9fa4fdfad5bee4544ac743d64871bab2917",
"content_id": "ec19d6e5222e552df148506c10a2cddb32b894b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1319,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 36,
"path": "/tradesocial/auth/middleware.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\r\n\r\nimport facebook\r\n\r\nfrom auth import models\r\n\r\nclass FacebookConnectMiddleware:\r\n \"\"\"Middleware for automatically handling login via Facebook Connect.\"\"\"\r\n def process_request(self, request):\r\n \"\"\"Check for a Facebook Connect cookie. If it doesn't exist, we set\r\n request.user to None. If it does exist, we look up the associated\r\n uid. If there is no user in our db with that uid, we create a new \r\n user object.\r\n \"\"\"\r\n assert(\r\n hasattr(request, 'session'), \r\n \"The Facebook Connect middleware requires session middleware to be \"\r\n \"installed. Edit your MIDDLEWARE_CLASSES setting to insert \"\r\n \"'django.contrib.sessions.middleware.SessionMiddleware'.\"\r\n )\r\n request.__class__.user = None\r\n \r\n # No user in the session, look for a Facebook cookie\r\n cookie = facebook.get_user_from_cookie(\r\n request.COOKIES,\r\n settings.FACEBOOK_APP_ID,\r\n settings.FACEBOOK_APP_SECRET)\r\n if cookie:\r\n try:\r\n request.user = models.User.objects.get(pk=cookie['uid'])\r\n except models.User.DoesNotExist:\r\n request.user = models.User(fbid=cookie['uid'])\r\n request.user = request.user.refresh(cookie['access_token'])\r\n request.user.login(request)\r\n else:\r\n models.User.logout(request)\r\n"
},
{
"alpha_fraction": 0.6789423823356628,
"alphanum_fraction": 0.6855524182319641,
"avg_line_length": 28.257143020629883,
"blob_id": "1b7eaf4197d19bb9e1237df3f17997ad374c131e",
"content_id": "88316171ea26bf98636ed1e458f3bcd01841689d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2118,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 70,
"path": "/tradesocial/config/django_template.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "\"\"\"Settings defined by Django that apply across environments\"\"\"\r\n\r\nimport os\r\n\r\nfrom update_config import SETTING_NEEDS_CONFIG\r\n\r\nAPPEND_SLASH = True\r\n\r\n# To initialize this, run:\r\n# from random import choice\r\n# secret = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])\r\nSECRET_KEY = SETTING_NEEDS_CONFIG\r\n\r\nINSTALLED_APPS = (\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.messages',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sites',\r\n 'djangotoolbox',\r\n \r\n 'auth',\r\n 'core',\r\n 'portfolios',\r\n 'securities',\r\n\r\n # djangoappengine should come last, so it can override a few manage.py commands\r\n 'djangoappengine',\r\n)\r\n\r\nMIDDLEWARE_CLASSES = (\r\n 'django.middleware.gzip.GZipMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'auth.middleware.FacebookConnectMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n)\r\n\r\nTEMPLATE_CONTEXT_PROCESSORS = (\r\n 'django.core.context_processors.request',\r\n 'django.core.context_processors.media',\r\n 'django.contrib.messages.context_processors.messages',\r\n 'django.core.context_processors.debug',\r\n 'django.core.context_processors.request',\r\n \r\n 'core.context_processors.config',\r\n)\r\n\r\n# This test runner captures stdout and associates tracebacks with their\r\n# corresponding output. Helps a lot with print-debugging.\r\nTEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'\r\n\r\nMEDIA_URL = '/media'\r\nADMIN_MEDIA_PREFIX = '/media/admin/'\r\nTEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)\r\n\r\nROOT_URLCONF = 'urls'\r\n\r\nSITE_ID = 29\r\n\r\n# Activate django-dbindexer if available\r\ntry:\r\n import dbindexer\r\n DATABASES['native'] = DATABASES['default']\r\n DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}\r\n INSTALLED_APPS += ('dbindexer',)\r\n DBINDEXER_SITECONF = 'dbindexes'\r\n MIDDLEWARE_CLASSES = ('dbindexer.middleware.DBIndexerMiddleware',) + \\\r\n MIDDLEWARE_CLASSES\r\nexcept ImportError:\r\n pass\r\n"
},
{
"alpha_fraction": 0.7475728392601013,
"alphanum_fraction": 0.7475728392601013,
"avg_line_length": 24.25,
"blob_id": "9db231e85df659e1f4264969ebf017c27f9f2ba1",
"content_id": "e9beb435ec32cad548eac7c789fc441e9deb5889",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 4,
"path": "/tradesocial/views.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from core import utils\r\n\r\ndef index(request):\r\n return utils.render_to_response('index.html', request)"
},
{
"alpha_fraction": 0.5857740640640259,
"alphanum_fraction": 0.5857740640640259,
"avg_line_length": 25.55555534362793,
"blob_id": "636019d5efadb816d75460ddab55b46d0192d0d4",
"content_id": "236bfaacf0e27fbdb23e77b350c7a36f75d706fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 9,
"path": "/tradesocial/urls.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls.defaults import *\n\nurlpatterns = patterns('',\n (r'^_ah/warmup$', 'djangoappengine.views.warmup'),\n (r'^$', 'views.index'),\n \n (r'^auth/', include('auth.urls')),\n (r'^s/', include('securities.urls')),\n)\n"
},
{
"alpha_fraction": 0.7685459852218628,
"alphanum_fraction": 0.7685459852218628,
"avg_line_length": 41.125,
"blob_id": "dbe3aca43cc98848568fd1ea2ca3ddfdfa87750a",
"content_id": "b0edd8e3319a6491445d7d28be730c5a07d2356f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 337,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 8,
"path": "/tradesocial/settings.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "# Initialize App Engine and import the default settings (DB backend, etc.).\n# If you want to use a different backend you have to remove all occurences\n# of \"djangoappengine\" from this file.\nfrom djangoappengine.settings_base import *\n\n# For instructions on how to configure the environment,\n# see config/__init__.py\nfrom config import *\n"
},
{
"alpha_fraction": 0.663286030292511,
"alphanum_fraction": 0.663286030292511,
"avg_line_length": 23.842105865478516,
"blob_id": "52c1969563f798922720fabe48ee51d6ff83af6a",
"content_id": "91b2598785e18f2adb9cd7f7847fff3657f86b4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 19,
"path": "/tradesocial/core/templatetags/renderers.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django import template\r\nfrom django.conf import settings\r\nfrom django.utils import safestring\r\n\r\nregister = template.Library()\r\n\r\[email protected]_tag\r\ndef render_css(path):\r\n return safestring.mark_safe(\r\n '<link rel=\"stylesheet\" type=\"text/css\" href=\"%s/css/%s\" />' \r\n % (settings.MEDIA_URL, path)\r\n )\r\n\r\[email protected]_tag\r\ndef render_js(path):\r\n return safestring.mark_safe(\r\n '<script type=\"text/javascript\" src=\"%s/js/%s\" />'\r\n % (settings.MEDIA_URL, path)\r\n )\r\n "
},
{
"alpha_fraction": 0.6189397573471069,
"alphanum_fraction": 0.6189397573471069,
"avg_line_length": 24.566265106201172,
"blob_id": "d8aa91e7f0f5103ecb3c853005529bf1b3ceb254",
"content_id": "ad75690ac6e5b0189a71a9affe372810574e6eb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2207,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 83,
"path": "/tradesocial/auth/permissions.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "\"\"\"Define all available Facebook API permissions.\"\"\"\r\n\r\nAVAILABLE_PERMISSIONS = [\r\n 'user_about_me',\r\n 'friends_about_me',\r\n 'user_activities',\r\n 'friends_activities',\r\n 'user_birthday',\r\n 'friends_birthday',\r\n 'user_education_history',\r\n 'friends_education_history',\r\n 'user_events',\r\n 'friends_events',\r\n 'user_groups',\r\n 'friends_groups',\r\n 'user_hometown',\r\n 'friends_hometown',\r\n 'user_interests',\r\n 'friends_interests',\r\n 'user_likes',\r\n 'friends_likes',\r\n 'user_location',\r\n 'friends_location',\r\n 'user_notes',\r\n 'friends_notes',\r\n 'user_online_presence',\r\n 'friends_online_presence',\r\n 'user_photo_video_tags',\r\n 'friends_photo_video_tags',\r\n 'user_photos',\r\n 'friends_photos',\r\n 'user_relationships',\r\n 'friends_relationships',\r\n 'user_relationship_details',\r\n 'friends_relationship_details',\r\n 'user_religion_politics',\r\n 'friends_religion_politics',\r\n 'user_status',\r\n 'friends_status',\r\n 'user_videos',\r\n 'friends_videos',\r\n 'user_website',\r\n 'friends_website',\r\n 'user_work_history',\r\n 'friends_work_history',\r\n 'email',\r\n 'read_friendlists',\r\n 'manage_friendlists',\r\n 'read_insights',\r\n 'read_mailbox',\r\n 'read_requests',\r\n 'read_stream',\r\n 'xmpp_login',\r\n 'ads_management',\r\n 'user_checkins',\r\n 'friends_checkins',\r\n 'publish_stream',\r\n 'create_event',\r\n 'rsvp_event',\r\n 'sms',\r\n 'offline_access',\r\n 'publish_checkins',\r\n 'manage_pages',\r\n]\r\n\r\nPERMISSION_MAP = dict(\r\n [(ALL_PERMISSIONS[i], i) for i in range(len(ALL_PERMISSIONS))]\r\n)\r\n\r\ndef enforcePermissions(perms, user):\r\n #Find out which permissions are missing\r\n missing = user.checkPermissions(perms)\r\n \r\n # Push the user into a dialog requesting those permissions\r\n redirect_url = core.url.Url()\r\n redirect_url.setDomain(settings.)\r\n dialog_url = core.url.Url()\r\n dialog_url.setDomain('www.facebook.com')\r\n dialog_url.setPath('/dialog/oauth/')\r\n dialog_url.addQueryData('client_id', settings.APP_ID)\r\n dialog_url.addQueryData('response_type', 'token')\r\n dialog_url.addQueryData('state', '')\r\n dialog_url.addQueryData('redirect_uri', )\r\n "
},
{
"alpha_fraction": 0.6217391490936279,
"alphanum_fraction": 0.6217391490936279,
"avg_line_length": 17.81818199157715,
"blob_id": "4bf4a934b1f9f6ea34b5494975074b5469e5a41f",
"content_id": "aa0f8eff5077544e06bd1adccf99822cbae0dbff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 230,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 11,
"path": "/tradesocial/portfolios/views.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from core import utils\r\nfrom portfolios import models\r\n\r\ndef index(request):\r\n \r\n return utils.render_to_response(\r\n 'portfolio/index.html',\r\n request,\r\n {'portfolios': request.user.portfolios,\r\n }\r\n )\r\n \r\n \r\n \r\n"
},
{
"alpha_fraction": 0.6207584738731384,
"alphanum_fraction": 0.6307384967803955,
"avg_line_length": 27.47058868408203,
"blob_id": "81e4d08d5d6d6e0c7d9f22dc0e97d53c95e81c17",
"content_id": "c8d24d57ba5b781454f76285629ef0ec21c1902d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 17,
"path": "/tradesocial/core/initialization/security.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from securities import models\r\n\r\ndef initialize_securities(symbols=None):\r\n if symbols is None:\r\n from core.initialization.data.symbols import DEFAULT_SYMBOLS\r\n symbols = DEFAULT_SYMBOLS\r\n \r\n print 'Ready to fetch %d symbols' % len(symbols)\r\n i = 0\r\n for symbol in symbols:\r\n try:\r\n models.Security.getFromSymbol(symbol)\r\n except Exception, e:\r\n print 'Failed to fetch %s: %s' % (symbol, e)\r\n i += 1\r\n if i % 25 == 0:\r\n print 'Fetched %d/%d' % (i, len(symbols))\r\n"
},
{
"alpha_fraction": 0.5786802172660828,
"alphanum_fraction": 0.5888324975967407,
"avg_line_length": 26.428571701049805,
"blob_id": "15b3e90daefc277759e1f08a71dd1f76afe27127",
"content_id": "03afc0d068d3942c950ce2111dd28152b8c24a4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 7,
"path": "/tradesocial/securities/urls.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls.defaults import *\r\n\r\nurlpatterns = patterns('securities.views',\r\n (r'^$', 'index'),\r\n (r'^(?P<symbol>[A-Z0-9.]+)/$', 'summary_page'),\r\n (r'^initialize/$', 'initialize'),\r\n)"
},
{
"alpha_fraction": 0.6886178851127625,
"alphanum_fraction": 0.6918699145317078,
"avg_line_length": 26,
"blob_id": "fd088c40720c6f532baf21575512cbb8d283d117",
"content_id": "334943c8f21a79d5881e9e0cd15993ad7b0c95b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1230,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 44,
"path": "/tradesocial/securities/views.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django import http\r\nfrom django.conf import settings\r\nfrom django.core import paginator\r\n\r\nimport core.url\r\nfrom core import utils\r\nfrom securities import models\r\n\r\ndef index(request):\r\n p = paginator.Paginator(models.Security.objects.all(), 20) \r\n try:\r\n page = int(request.GET.get('page', '1'))\r\n except ValueError:\r\n page = 1\r\n \r\n return utils.render_to_response(\r\n 'security/index.html',\r\n request,\r\n {'current_page': page,\r\n 'paginator': p,\r\n 'page_url': core.url.URL('/s/'),\r\n 'securities': p.page(page).object_list}\r\n )\r\n\r\ndef summary_page(request, symbol):\r\n try:\r\n security = models.Security.getFromSymbol(symbol)\r\n except models.Security.SymbolInvalidError:\r\n return http.HttpResponseNotFound()\r\n \r\n return utils.render_to_response(\r\n 'security/summary.html', \r\n request,\r\n {'security': security}\r\n )\r\n \r\ndef initialize(request):\r\n # Ony allow admins to initialize\r\n if not request.user or request.user.fbid not in settings.ADMIN_FBIDS:\r\n return http.HttpResponse('Only admins are allowed to initialize')\r\n \r\n from core.initialization.security import initialize_securities\r\n initialize_securities()\r\n return http.HttpResponse('Initialization Complete')"
},
{
"alpha_fraction": 0.6181774139404297,
"alphanum_fraction": 0.6184210777282715,
"avg_line_length": 26.96453857421875,
"blob_id": "af3adb6b64a91e91bb252aeb95338e10df76a228",
"content_id": "cfe6cb5e3e38b5c39d8a6a4e82b57b7fb1504fd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4104,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 141,
"path": "/tradesocial/config/update_config.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "import inspect\r\n\r\nSETTING_NEEDS_CONFIG = 'mUsT_cOnFiGuRe!'\r\n\r\nCORE_MODULES = [\r\n 'django',\r\n 'facebook',\r\n]\r\nPROD_MODULES = [\r\n 'prod',\r\n]\r\nSANDBOX_MODULES = [\r\n 'sandbox',\r\n]\r\n\r\ndef docblock(module_name):\r\n return \"\"\"\\\"\\\"\\\"\r\nThis is the auto-generated configuration of module %s.py. It was\r\ncreated using the template in %s_template.py. All of the settings by\r\nthe template have been transferred to this module. Some of these settings\r\nmay require configuration (e.g. database passwords, private encryption keys,\r\netc.). If you have deployed this module previously, all local configurations\r\nshould have been transferred over.\r\n\r\nPlease check all settings for correctness and configure any that are set\r\nto \"%s\".\r\n\\\"\\\"\\\"\r\n \"\"\" % (module_name, module_name, SETTING_NEEDS_CONFIG)\r\n\r\ndef log_output(msg):\r\n print msg\r\n\r\ndef pretty(value):\r\n if isinstance(value, str):\r\n return '\"%s\"' % value\r\n \r\n if isinstance(value, list):\r\n bits = ['\\n %s' % pretty(x) for x in value]\r\n return '[%s\\n]' % ', '.join(bits)\r\n \r\n if isinstance(value, tuple):\r\n bits = ['\\n %s' % pretty(x) for x in value]\r\n return '(%s\\n)' % ', '.join(bits)\r\n \r\n return '%s' % value\r\n \r\n\r\ndef write_settings_file(settings, module_name):\r\n filename = '%s.py' % module_name\r\n f = open(filename, 'w')\r\n \r\n f.write(docblock(module_name))\r\n f.write('\\n')\r\n \r\n for setting in sorted(settings):\r\n f.write('%s = %s\\n\\n' % (setting, pretty(settings[setting])))\r\n f.close()\r\n\r\ndef update_from_template(module_name):\r\n template_name = '%s_template' % module_name\r\n \r\n try:\r\n template = __import__(template_name)\r\n except ImportError:\r\n log_output('FAILED TO FIND TEMPLATE FOR: %s\\n\\n' % module_name)\r\n return\r\n \r\n try:\r\n module = __import__(module_name)\r\n except ImportError:\r\n module = {}\r\n \r\n new_settings = {}\r\n changed = []\r\n for attr in dir(template):\r\n if attr[0] == '_' or attr == 'SETTING_NEEDS_CONFIG':\r\n # Ignore Python attributes and the template placeholder\r\n continue\r\n \r\n if inspect.ismodule(getattr(template, attr)):\r\n continue\r\n \r\n template_setting = getattr(template, attr)\r\n if hasattr(module, attr):\r\n module_setting = getattr(module, attr)\r\n else:\r\n module_setting = None\r\n \r\n if template_setting != SETTING_NEEDS_CONFIG:\r\n # This is not a configurable setting, so the template\r\n # value is the one we use.\r\n new_settings[attr] = template_setting\r\n log_output('Setting %s from template config' % attr)\r\n if module_setting is not None and template_setting != module_setting:\r\n # The deployed setting differs from the template setting. This should\r\n # generally only happen when the template setting is changed.\r\n changed.append(attr)\r\n log_output('Warning: template differs from deployed config.')\r\n log_output('Template: %s' % template_setting)\r\n log_output('Deployed: %s' % module_setting)\r\n \r\n else:\r\n # This is a configurable setting \r\n if module_setting is None:\r\n changed.append(attr)\r\n new_settings[attr] = SETTING_NEEDS_CONFIG\r\n log_output('%s needs to be configured' % attr)\r\n else:\r\n new_settings[attr] = module_setting\r\n log_output('Setting %s from deployed config' % attr)\r\n \r\n log_output('\\n\\n')\r\n if changed:\r\n log_output(\r\n 'The following settings were changed and should be checked for correctness:'\r\n )\r\n map(log_output, changed)\r\n log_output('Finished deploying config module %s' % module_name)\r\n log_output('**********************************\\n\\n')\r\n write_settings_file(new_settings, module_name)\r\n \r\n\r\ndef deploy_modules(modules):\r\n for module_name in modules:\r\n log_output('*****DEPLOYING %s******' % module_name)\r\n update_from_template(module_name) \r\n\r\ndef deploy_prod():\r\n modules = CORE_MODULES + PROD_MODULES\r\n deploy_modules(modules)\r\n \r\ndef deploy_sandbox():\r\n modules = CORE_MODULES + SANDBOX_MODULES\r\n deploy_modules(modules)\r\n \r\ndef main():\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n \r\n \r\n "
},
{
"alpha_fraction": 0.6829268336296082,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 18.83333396911621,
"blob_id": "efe2dc37cf15d4342512c0f9a18fe51d40828646",
"content_id": "1cbe1021987ec27dd901a4fc9f89783f187ef1b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 6,
"path": "/tradesocial/core/context_processors.py",
"repo_name": "gmarcotte/face-django",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\r\n\r\ndef config(request):\r\n return {\r\n 'FACEBOOK_APP_ID': settings.FACEBOOK_APP_ID,\r\n }"
}
] | 24 |
Hunteena/hw_superheroes
|
https://github.com/Hunteena/hw_superheroes
|
28553ea15a2cc2810ce57ec4f9c8e74db35ddd55
|
3175e4c10a7737752dfff164434a4b21028802ec
|
de9c1e9bdc188a09894407311b927b9add8bf894
|
refs/heads/master
| 2023-08-17T08:59:53.329786 | 2021-10-04T11:51:46 | 2021-10-04T11:51:46 | 413,402,157 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6564516425132751,
"alphanum_fraction": 0.6596774458885193,
"avg_line_length": 33.44444274902344,
"blob_id": "26c683de2499adb0dd4edda436ab519d475f6d39",
"content_id": "4843ca5aacc904506aafe14cef372c3861a50e16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 630,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 18,
"path": "/main.py",
"repo_name": "Hunteena/hw_superheroes",
"src_encoding": "UTF-8",
"text": "import requests\n\n\ndef most_intelligent_superhero(names):\n token = '2619421814940190'\n intelligences = []\n for name in names:\n url = f'https://superheroapi.com/api/{token}/search/{name}'\n response = requests.get(url=url)\n hero_intelligence = response.json()['results'][0]['powerstats']['intelligence']\n intelligences.append(int(hero_intelligence))\n most_intelligent = names[intelligences.index(max(intelligences))]\n return most_intelligent\n\n\nif __name__ == '__main__':\n names = ['Hulk', 'Captain America', 'Thanos']\n print(f'Самый умный - {most_intelligent_superhero(names)}')\n"
}
] | 1 |
jnasingleton/geocoding
|
https://github.com/jnasingleton/geocoding
|
4881e49a2748361d76d7a44895a2e083f7dcef62
|
aa172aac58d1627e4f4524dda6f12a3de3f8b650
|
adf5b28cf98c396d76c3e9e24e729eaf78fbf73a
|
refs/heads/master
| 2020-03-22T04:41:30.005043 | 2018-07-03T01:48:31 | 2018-07-03T01:48:31 | 139,514,278 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6444750428199768,
"alphanum_fraction": 0.6464646458625793,
"avg_line_length": 36.130680084228516,
"blob_id": "04dcc13250ea2efca27d6d0034ba27cc8dbc1438",
"content_id": "2555fb957c74677ffd10bb5b0e680f95a8587215",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6534,
"license_type": "permissive",
"max_line_length": 181,
"num_lines": 176,
"path": "/process_geocoded_batch.py",
"repo_name": "jnasingleton/geocoding",
"src_encoding": "UTF-8",
"text": "#This script processes geocoded addresses and attempts to pull the most correct match out for addresses that are geocoded multiple time (ie. same address_id but different addresses)\n#Additional work is needed to preprocess to split out PO Boxes and Unit #s, etc, as these often confuse the Google Geocoding API.\n#N.B. There is a Google Places type override for pharmacies that may not be needed for your purposes.\n\nimport numpy as np\nimport os.path\nimport pandas as pd\nimport requests\nimport logging\nimport time\n\n# Create logger\nlogger = logging.getLogger(\"root\")\nlogger.setLevel(logging.DEBUG)\n\n# Create console handler\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\n# Add console handler\nlogger.addHandler(ch)\n\n#------------------ CONFIGURATION -------------------------------\n\n# Set your input file here\nFILENAME_INPUT = 'AddressesForGeocoding_output.csv'\n\n# Set your output file name here.\nFILENAME_OUTPUT = 'AddressesForGeocoding_output_final.csv'\n\n# Specify the segments/splitcounts for printing\nSPLITCOUNT_PRINT = 50\n\n# Fields from import file/dataframe to be used\nADDRESS_IDENTIFIER_FIELD = 'address_id'\nACCURACY_FIELD = 'accuracy'\nADDRESS_FIELD = 'formatted_address'\nLATITUDE_FIELD = 'latitude'\nLONGITUDE_FIELD = 'longitude'\nSTATUS_FIELD = 'status'\nTYPE_FIELD = 'type'\n\n#------------------ DATA LOADING --------------------------------\n\n# Read the data to a Pandas Dataframe\ndf = pd.read_csv(FILENAME_INPUT, encoding='utf8')\n\n#------------------ FUNCTION DEFINITIONS ------------------------\n\ndef determine_replace_master(df_row_master, df_row):\n \"\"\"\n Get first geocode result from Google Maps Geocoding API.\n \n @param address: String address as accurate as possible. \n If including the components parameter, exclude those details from the address.\n Address should not include landmarks, company names, etc.\n @param api_key: String API key from Google. \n @param return_full_response: Boolean to indicate if you'd like to return the full response from Google. \n This useful if you'd like additional location details for storage or parsing later.\n\n Future Work: Return a dataframe instead of a dict.\n \"\"\"\n\n # Compare and return the better entry\n\n row_master_type = df_row_master['type'].iloc[0]\n row_master_accuracy = df_row_master['accuracy'].iloc[0]\n row_master_address = df_row_master['formatted_address'].iloc[0]\n\n row_type = df_row['type'].iloc[0]\n row_accuracy = df_row['accuracy'].iloc[0]\n row_address = df_row['formatted_address'].iloc[0]\n\n skip_compare = False\n if str(row_master_type) == 'nan' or str(row_master_accuracy) == 'nan' or str(row_master_address) == 'nan':\n replace_master = True\n skip_compare = True\n if str(row_type) == 'nan' or str(row_accuracy) == 'nan' or str(row_address) == 'nan' :\n replace_master = False\n skip_compare = True\n\n if not skip_compare:\n\n # Adjust type to allow for string match on 'pharmacy'\n # We have a type override because we are interested in pharmacy locations in this template.\n type_override = 'pharmacy'\n if type_override in row_master_type:\n row_master_type = type_override\n if type_override in row_type:\n row_type = type_override\n\n type_priority_list = ['pharmacy', 'subpremise', 'premise', 'street']\n accuracy_priority_list = ['ROOFTOP', 'RANGE_INTERPOLATED', 'GEOMETRIC_CENTER', 'APPROXIMATE']\n\n try:\n row_master_type_index = type_priority_list.index(row_master_type)\n except ValueError:\n row_master_type_index = len(type_priority_list)\n try:\n row_master_accuracy_index = accuracy_priority_list.index(row_master_accuracy)\n except ValueError:\n row_master_accuracy_index = len(accuracy_priority_list) \n\n try:\n row_type_index = type_priority_list.index(row_type)\n except ValueError:\n row_type_index = len(type_priority_list) \n try:\n row_accuracy_index = accuracy_priority_list.index(row_accuracy)\n except ValueError:\n row_accuracy_index = len(accuracy_priority_list) \n\n if row_type_index == row_master_type_index: \n # Matched Type\n if row_accuracy_index == row_master_accuracy_index:\n # Matched Accuracy\n if row_address != row_master_address:\n print('Different addresses but same type and accuracy')\n replace_master = True\n elif row_accuracy_index < row_master_accuracy_index:\n replace_master = True\n pass\n elif row_accuracy_index > row_master_accuracy_index:\n replace_master = False\n elif row_type_index < row_master_type_index:\n replace_master = True\n pass\n elif row_type_index > row_master_type_index:\n replace_master = False\n\n return replace_master\n\n#------------------ PROCESSING LOOP -----------------------------\n\ndf_duplicated = df[df.duplicated(ADDRESS_IDENTIFIER_FIELD, keep=False)]\nlist_duplicated = df_duplicated[ADDRESS_IDENTIFIER_FIELD].tolist()\nlist_duplicated = list(set(list_duplicated))\n\n# Not duplicated, are allowed to have non 'OK' statuses (will be checked manually)\ndf_notduplicated = pd.concat([df, df_duplicated]).drop_duplicates(keep=False)\n\n# Create a dataframe to hold results\ndf_master = None\n\nfor idx_address, address_identifier in enumerate(list_duplicated):\n\n print(address_identifier)\n\n # df_temp stores the duplicated geocode rows for address_identifier\n df_temp = df_duplicated.loc[df_duplicated[ADDRESS_IDENTIFIER_FIELD] == address_identifier]\n df_temp.reset_index(drop=True,inplace=True)\n\n df_temp_recordcount = df_temp.shape[0]\n\n for idx_row in range(0, df_temp_recordcount):\n\n df_row = df_temp.iloc[[idx_row]]\n\n if idx_row == 0:\n df_row_master = df_row\n else:\n replace_master = determine_replace_master(df_row_master, df_row)\n if replace_master == True:\n df_row_master = df_row\n\n #save df_row_master to a final X\n if df_master is None:\n df_master = df_row_master\n else:\n df_master = df_master.append(df_row_master, ignore_index=True)\n\n# Save results to file \ndf_master = pd.concat([df_master,df_notduplicated], ignore_index=True)\ndf_master.to_csv(FILENAME_OUTPUT, encoding='utf8', index=False)\n\nprint('Finished!')"
},
{
"alpha_fraction": 0.6352753639221191,
"alphanum_fraction": 0.6406338810920715,
"avg_line_length": 37.47368240356445,
"blob_id": "84760584898fd32f08c50e4fef1628e0cc757da6",
"content_id": "b3d21a5b33873d3844e9101275d35a7ea7282708",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8771,
"license_type": "permissive",
"max_line_length": 161,
"num_lines": 228,
"path": "/batch_geocoding.py",
"repo_name": "jnasingleton/geocoding",
"src_encoding": "UTF-8",
"text": "#Python script for batch geocoding of addresses using the Google Geocoding API.\n#This script geocodes addresses (including address components) from a specified csv file. \n\n#Credit (template/framework): https://gist.github.com/shanealynn/033c8a3cacdba8ce03cbe116225ced31\n\n#https://developers.google.com/maps/documentation/geocoding/intro\n#https://developers.google.com/maps/documentation/geocoding/start#ComponentFiltering\n\nimport numpy as np\nimport os.path\nimport pandas as pd\nimport requests\nimport logging\nimport time\n\n# Create logger\nlogger = logging.getLogger(\"root\")\nlogger.setLevel(logging.DEBUG)\n\n# Create console handler\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\n# Add console handler\nlogger.addHandler(ch)\n\n#------------------ CONFIGURATION -------------------------------\n\n# Set your Google API key here. \n# https://console.developers.google.com/apis\nAPI_KEY = 'GOOGLE_GEOCODING_API_KEY'\n\n# Backoff time sets how many seconds to wait between google pings when your API limit is hit\nTIME_BACKOFF = 5\n\n# Set your input file here\nFILENAME_INPUT = 'AddressesForGeocoding.csv'\n\n# Set your output file name here.\nFILENAME_OUTPUT = FILENAME_INPUT.replace('.csv','_output.csv')\n\n# Return Full Google Results? \n# If True, full JSON results from Google are included in output\nRETURN_FULL_RESULTS = False\n\n# Specify the segments/splitcounts for printing and saving\nSPLITCOUNT_PRINT = 50\nSPLITCOUNT_SAVE = 50\n\n# Fields from import file/dataframe to be used\nADDRESS_IDENTIFIER_FIELD = 'address_id'\nADDRESS_FIELD = 'address'\nCOMPONENTS_FIELD = 'components'\nCOMPONENTS2_FIELD = 'components2'\nPOSTAL_CODE_FIELD = 'postal_code'\nLOCALITY_FIELD = 'city'\nCOUNTRY_FIELD = 'country'\n\n#------------------ DATA LOADING --------------------------------\n\n# Read the data to a Pandas Dataframe\ndf = pd.read_csv(FILENAME_INPUT, encoding='utf8')\n\ngeocoded_results_count_total = df.shape[0]\n\n# Create components field\ndf[COMPONENTS_FIELD] = df.apply( \\\n lambda row: 'postal_code:' + str(row[POSTAL_CODE_FIELD]) + '|' + 'locality:' + str(row[LOCALITY_FIELD]) + '|' + 'country:' + str(row[COUNTRY_FIELD]), axis=1)\n\n# Create components field\ndf[COMPONENTS2_FIELD] = df.apply( \\\n lambda row: 'locality:' + str(row[LOCALITY_FIELD]) + '|' + 'country:' + str(row[COUNTRY_FIELD]), axis=1)\n\n# Check no blanks in required columns\n\n# Create a dataframe to hold results\ndf_results = None\n\n# Load existing results, if they exist, and adjust df_results\n# N.B. This assume a direct row index between these two files\nif os.path.isfile(FILENAME_OUTPUT):\n df_results = pd.read_csv(FILENAME_OUTPUT)\n geocoded_results_count = df_results.shape[0]\n df = df.iloc[geocoded_results_count:]\nelse:\n geocoded_results_count = 0\n\n#------------------\tFUNCTION DEFINITIONS ------------------------\n\ndef get_google_results(address_identifier, address, components=None, components2=None, api_key=None, return_full_response=False):\n \"\"\"\n Get first geocode result from Google Maps Geocoding API.\n \n @param address: String address as accurate as possible. \n If including the components parameter, exclude those details from the address.\n Address should not include landmarks, company names, etc.\n @param api_key: String API key from Google. \n @param return_full_response: Boolean to indicate if you'd like to return the full response from Google. \n This useful if you'd like additional location details for storage or parsing later.\n\n Future Work: Return a dataframe instead of a dict.\n \"\"\"\n\n # Set up simple geocode_url\n geocode_url = 'https://maps.googleapis.com/maps/api/geocode/json?'\n geocode_url += 'address=' + str(address)\n\n # Set up detailed geocode_url\n if components is not None:\n geocode_url += \"&components={}\".format(components)\n if api_key is not None:\n geocode_url += \"&key={}\".format(api_key)\n \n # Retrive the results from Google\n #print(geocode_url)\n results = requests.get(geocode_url)\n # Results will be in JSON format; json() function will decode the JSON format\n results = results.json()\n\n # If there are no results or an error, \n # Try again by dropping the postal_code component\n if len(results['results']) == 0:\n geocode_url = geocode_url.replace(\"&components={}\".format(components),\"&components={}\".format(components2))\n\n # Retrive the results from Google\n results = requests.get(geocode_url)\n # Results will be in JSON format; json() function will decode the JSON format\n results = results.json()\n\n # If address is blank or there are no results or an error, return empty results.\n if str(address) == 'nan' or len(results['results']) == 0:\n output = {\n \"formatted_address\" : None,\n \"latitude\": None,\n \"longitude\": None,\n \"accuracy\": None,\n \"google_place_id\": None,\n \"type\": None,\n \"postcode\": None\n }\n # Append additional details\n output['number_of_results'] = 0\n output['status'] = ''\n output['response'] = ''\n else: \n # Retrieve only the first result\n answer = results['results'][0]\n output = {\n \"formatted_address\" : answer.get('formatted_address'),\n \"latitude\": answer.get('geometry').get('location').get('lat'),\n \"longitude\": answer.get('geometry').get('location').get('lng'),\n \"accuracy\": answer.get('geometry').get('location_type'),\n \"google_place_id\": answer.get(\"place_id\"),\n \"type\": \",\".join(answer.get('types')),\n \"postcode\": \",\".join([x['long_name'] for x in answer.get('address_components') \n if 'postal_code' in x.get('types')])\n } \n # Append additional details\n output['number_of_results'] = len(results['results'])\n output['status'] = results.get('status')\n if return_full_response:\n output['response'] = results\n \n # Append the identifier so we can tie back to the import file\n output['identifier'] = address_identifier\n\n return output\n\n#------------------ PROCESSING LOOP -----------------------------\n\n# Go through each address\nfor idx_row, row in df.iterrows():\n\n #idx_row_actual = geocoded_results_count + idx_row\n idx_row_actual = idx_row\n\n address_identifier = row[ADDRESS_IDENTIFIER_FIELD]\n address = row[ADDRESS_FIELD]\n components = row[COMPONENTS_FIELD]\n components2 = row[COMPONENTS2_FIELD]\n\n # Continue while the row has not been geocoded: \n geocoded = False\n while geocoded is not True:\n # Geocode the address with google\n try:\n # Generate compoennts string\n geocode_result = get_google_results(address_identifier, address, components, components2, API_KEY, return_full_response=RETURN_FULL_RESULTS)\n except Exception as e:\n logger.exception(e)\n logger.error(\"Major error with {}\".format(address))\n logger.error(\"Skipping!\")\n geocoded = True\n \n if geocode_result['status'] == 'OVER_QUERY_LIMIT':\n # If API result status is 'OVER_QUERY_LIMIT',\n # backoff for [] seconds and then retry\n # N.B. This can occur due to the daily limit, limit on queries per second, ...\n logger.info('OVER_QUERY_LIMIT - Backing off for ' + str(TIME_BACKOFF) + ' seconds')\n time.sleep(TIME_BACKOFF)\n geocoded = False\n else:\n # N.B. Results might be empty / not 'OK', these statuses will be logged\n if geocode_result['status'] != 'OK':\n logger.warning(\"Error geocoding {}: {}\".format(address, geocode_result['status']))\n #logger.debug(\"Geocoded: {}: {}\".format(address, geocode_result['status']))\n if df_results is None:\n df_results = pd.DataFrame(geocode_result,index=[0])\n else:\n df_results = df_results.append(geocode_result, ignore_index=True) \n geocoded = True\n\n # Print status every [SPLITCOUNT_PRINT] addresses\n if (idx_row_actual+1) % SPLITCOUNT_PRINT == 0:\n \tlogger.info(\"Completed {} of {} addresses\".format((idx_row_actual+1), geocoded_results_count_total))\n \n # Save df_results to file every [SPLITCOUNT_SAVE] addresses\n if (idx_row_actual+1) % SPLITCOUNT_SAVE == 0:\n df_results.to_csv(FILENAME_OUTPUT, encoding='utf8', index=False)\n\n# Remove duplicate rows, if they exist\ndf_results.drop_duplicates(inplace=True)\n\n# Save results to file \ndf_results.to_csv(FILENAME_OUTPUT, encoding='utf8', index=False)\n\n# Completion message\nlogger.info(\"Finished geocoding all addresses\")"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 42.5,
"blob_id": "21be24d7991c7c56550af6a8ff3772d7b2cd4321",
"content_id": "f9cd33c1e67da59591cc7cf1b5081ebd404f2660",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 2,
"path": "/README.md",
"repo_name": "jnasingleton/geocoding",
"src_encoding": "UTF-8",
"text": "# geocoding\nVarious projects featuring geocoding and GIS are shown in this repository. \n"
}
] | 3 |
HARISH6000/just-some-random-projects
|
https://github.com/HARISH6000/just-some-random-projects
|
f6ad44b3fbfac24c39a1525cd79ac2ab10dddbf6
|
a8b983cd4c7da73df1576d474316ca57e85a6b91
|
da1152b7604a2deaa75a4776ad53a64177e04ead
|
refs/heads/master
| 2023-08-02T13:20:06.708681 | 2021-09-12T15:52:29 | 2021-09-12T15:52:29 | 403,326,177 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6204819083213806,
"alphanum_fraction": 0.6445783376693726,
"avg_line_length": 19.75,
"blob_id": "b356ac6a07af4437725d5cd55225d1189a756b0c",
"content_id": "ab2a345917e6f8b0b88aad693efcc6168166fd50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 166,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 8,
"path": "/miles-to-km.py",
"repo_name": "HARISH6000/just-some-random-projects",
"src_encoding": "UTF-8",
"text": "# km = 1.6 * miles\n\ndef function(miles):\n return 1.6*miles\nm = float(input(\"Enter the distance in miles:\"))\nkm = function(m)\n\nprint(m,\"miles is equal to\",km,\"km\")\n"
},
{
"alpha_fraction": 0.5322580933570862,
"alphanum_fraction": 0.5591397881507874,
"avg_line_length": 22,
"blob_id": "74121315335dd07fe7c82ee1486d2b3bc98a10ad",
"content_id": "754655b4aa88c5f35159efa214e70a258666a00a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 186,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 8,
"path": "/Volume-of-cone.py",
"repo_name": "HARISH6000/just-some-random-projects",
"src_encoding": "UTF-8",
"text": "def volume(r ,h):\n v = (1/3)*((3.14)*(r*r)*(h))\n print(\"the volume of the cone is \", v)\n\na= float(input(\"Enter the radius:\"))\nb = float(input(\"Enter the height:\"))\n\nvolume(a, b)\n\n\n"
},
{
"alpha_fraction": 0.5530303120613098,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 20.83333396911621,
"blob_id": "8a9c0f83b5727fc363e9c3fec274d29ca9b8968d",
"content_id": "7ffb8e70f74c5f589ca159b907a3f27be79e03c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/area-of-the-circle.py",
"repo_name": "HARISH6000/just-some-random-projects",
"src_encoding": "UTF-8",
"text": "# r = int(input('enter the radius:'))\ndef area(r):\n area = 3.14*(r**2)\n print(area)\n\narea(float(input('enter the radius:')))\n\n"
},
{
"alpha_fraction": 0.650602400302887,
"alphanum_fraction": 0.650602400302887,
"avg_line_length": 26.66666603088379,
"blob_id": "8a3da4efad88586aa9d9780bd5e7cc0e1909ff07",
"content_id": "8fcac75ca42be08e826ceef40a4a635fa87daa4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 3,
"path": "/date-time.py",
"repo_name": "HARISH6000/just-some-random-projects",
"src_encoding": "UTF-8",
"text": "import datetime\nn = datetime.datetime.now()\nprint(n.strftime(\"%Y-%m-%d %H:%M:%S\"))\n"
},
{
"alpha_fraction": 0.5133333206176758,
"alphanum_fraction": 0.5533333420753479,
"avg_line_length": 20.14285659790039,
"blob_id": "638ce4cfac19a52d9aa300e3331312bbb8633d46",
"content_id": "87a32073ce4dcc679f56658120c3c84129b1e49f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/Volume-of-sphere.py",
"repo_name": "HARISH6000/just-some-random-projects",
"src_encoding": "UTF-8",
"text": "def vol_sphere(r):\n v_s = (4/3)*((3.14)*(r**3))\n print(\"Volume of the sphere is \", v_s)\n\nR = float(input(\"Enter the radius:\"))\n\nvol_sphere(R)\n\n\n"
},
{
"alpha_fraction": 0.5660377144813538,
"alphanum_fraction": 0.6037735939025879,
"avg_line_length": 25.5,
"blob_id": "2431fe43393465a29fddccd87935ce8a726579a2",
"content_id": "1530a7c4e379423ff726f8b572b40a1a29425610",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 10,
"path": "/bmi-calculator.py",
"repo_name": "HARISH6000/just-some-random-projects",
"src_encoding": "UTF-8",
"text": "def bmi(h, w):\n bmi = w/((h/100)**2)\n print(\"your BMI is\", bmi)\n\n#h1 = int(input(\"Enter your height in meter:\"))\n#w1 = int(input(\"Enter your weight in kg:\"))\nh1 = int(input(\"Enter your height in cm:\"))\nw1 = int(input(\"Enter your weight in kg:\"))\n\nbmi(h1, w1)\n"
}
] | 6 |
dunovank/myhddm
|
https://github.com/dunovank/myhddm
|
45aeb0b8a4539cc4260e0b0d58f09c9aab82dcb5
|
f0071e7a4b8eb5b3eab8417e3208929d2a1cbc4f
|
6a1ee89de8ee887d3877507fa16e09044df14b59
|
refs/heads/master
| 2015-08-11T13:17:14.539159 | 2015-06-21T20:12:06 | 2015-06-21T20:12:06 | 19,521,181 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6365470886230469,
"alphanum_fraction": 0.6652466654777527,
"avg_line_length": 25.85454559326172,
"blob_id": "5e823c1b2ed5c5263b9a446f48b17ce7f96544c0",
"content_id": "31016e46ccc1924357cf9842ee612f9f7b8ead69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4460,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 165,
"path": "/diffusionp.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\"\nTakes conds_df file and makes a pandas dataframe\nfor each parameter\n\t\trows: subs\n\t\tcols: cues\n\nTODO: easy access to sdt subj parameters \n \n\t1. \tput sdt .csv files for dp_bayes, c_bayes, dp_analytic, \n\t\tand c_analytic in one file for AllPrior models \n\t\t(\"~/HDDM/SDTmodels/allp_estimates/\") and another\n\t\tfor all HNL models (\"~/HDDM/SDTmodels/hnl_estimates/\")\n\t\n\t2.\tread in with pandas using a global path\n\t\t\n\t\t\texample: cbayes=pd.read_csv(\"~/HDDM/SDTmodels/allp_estimates/c_bayes.csv\")\n\t\n\t3. \tThen probably should just concat the two df's want to correlate (axis=0) \n\t\tand run df.corr() then write to .csv... only need to do scipy.stats.stats.pearsonr() \n\t\tif the correlations look good enough\n\"\"\"\n\nfrom __future__ import division\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\ndef get_p(condsdf, par=str(), noise=False):\n\t#z estimates same for face and\n\t#house conds so only get from face columns\n\t\n\tif 'noise' in condsdf.columns and len(condsdf['noise'].unique())>1:\n\t\tcondsdf=condsdf[condsdf['noise']!=69]\n\t\n\tsubp=condsdf.ix[(condsdf['param']==par)]\n\tfor col in subp.columns:\n\t\tif 'face' not in col:\n\t\t\tdel subp[col]\n\n\tif len(condsdf.columns)>=10:\n\t\tsubp.columns=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\telse:\n\t\tsubp.columns=['a80H','b50N', 'c80F']\n\n\tsubp.index=np.arange(len(condsdf['sub'].unique()))\n\n\treturn subp\n\ndef get_dimg(condsdf, par=str(), noise=False):\t\n\t\n\tif 'noise' in condsdf.columns and len(condsdf['noise'].unique())>1:\n\t\tcondsdf=condsdf[condsdf['noise']!=69]\n\t\n\t\n\t#create two df's for subj. estimates of \n\t#param(face) and param(house) then subtract \n\t#each column to create df of difference b/w \n\t#param values across stim type for that \n\t#parameter (i.e. vface-vhouse)\t\n\tsubpf=condsdf.ix[(condsdf['param']==par)]\n\tfor col in subpf.columns:\n\t\tif 'face' not in col:\n\t\t\tdel subpf[col]\n\t\n\tsubph=condsdf.ix[(condsdf['param']==par)]\n\tfor col in subph.columns:\n\t\tif 'house' not in col:\n\t\t\tdel subph[col]\n\n\t#give vf and vh common col id's \n\t#and put subvf-subvh in subdv\n\tif len(condsdf.columns)>=10:\n\t\tsubpf.columns=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\t\tsubph.columns=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\telse:\n\t\tsubpf.columns=['a80H','b50N', 'c80F']\n\t\tsubph.columns=['a80H','b50N', 'c80F']\n\t\n\tdiff_img=subpf-subph\n\tdiff_img.index=np.arange(len(condsdf['sub'].unique()))\n\t\n\treturn diff_img\n\n\ndef get_dnoise(condsdf, par=str(), noise=False):\t\n\n\t#create two df's for subject estimates of \n\t#param(68) and param(69) then subtract \n\t#each column to create df of difference b/w \n\t#param values across stim type for that \n\t#parameter (i.e. v68-v69)\n\t\n\tsubp68=condsdf.ix[(condsdf['param']==par)&(condsdf['noise']==68)]\n\tsubp69=condsdf.ix[(condsdf['param']==par)&(condsdf['noise']==69)]\n\t\n\tnlist=[subp68, subp69]\n\t\n\tfor ndf in nlist:\n\t\tfor col in ndf.columns:\n\t\t\tif 'face' not in col:\n\t\t\t\tdel ndf[col]\n\t\tndf.index=np.arange(len(ndf))\n\n\tif len(condsdf.columns)>=10:\n\t\tsubp68.columns=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\t\tsubp69.columns=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\telse:\n\t\tsubp68.columns=['a80H','b50N', 'c80F']\n\t\tsubp69.columns=['a80H','b50N', 'c80F']\n\t\n\t\n\tdiff_noise=subp69-subp68\n\n\treturn diff_noise\n\n\n\ndef get_diffusion(condsdf, depends_on=dict(), save=False):\n\n\t\"\"\"\n\tArgs:\n\n\t\tcondsdf (pandas dataframe):\t\tcolumn id's are cue names \n\t\t\t\t\t\t\t\t\t\t(also sub, param columns but these \n\t\t\t\t\t\t\t\t\t\tare removed by a function in this script)\n\t\t\t\t\t\t\t\t\t\trows are subj parameter estimates\n\n\t\tdepends_on (dict):\t\t\t\tdictionary of all diffusion parameters used\n\t\t\t\t\t\t\t\t\t\tin model which was used to produce condsdf.\n\n\t\t\t\t\t\t\t\t\t\texample:\n\t\t\t\t\t\t\t\t\t\t\tdepends_on={'a':'constant', 'v':'stim', 't':'cue', 'z':'cue'}\n\t::Returns::\n\n\t\tdiffusion_params (dict)\t\t\tdictionary of pandas dataframes, each containing\n\t\t\t\t\t\t\t\t\t\tdiffusion model parameter values across cues (column names) \n\t\t\t\t\t\t\t\t\t\tfor each parameter included in depends_on.\n\n\t\"\"\"\n\n\n\n\tdiffusion_params=dict()\n\tfor param in depends_on.keys():\n\t\tif 'stim' in depends_on[param]:\n\t\t\tpar=get_dimg(condsdf, par=param)\n\t\t\ttag='dimg_'+param\n\t\t\tdiffusion_params[tag]=par\n\t\tif 'noise' in depends_on[param]:\n\t\t\tpar=get_dnoise(condsdf, par=param)\n\t\t\ttag='dnoise_'+param\n\t\t\tdiffusion_params[tag]=par\n\t\tif 'cue' in depends_on[param]:\n\t\t\tpar=get_p(condsdf, par=param)\n\t\t\ttag='cue_'+param\n\t\t\tdiffusion_params[tag]=par\n\n\treturn diffusion_params\n\n\t\t\t\nif __name__ == \"__main__\":\n\tmain()\t\n\n\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t"
},
{
"alpha_fraction": 0.6152053475379944,
"alphanum_fraction": 0.6503185033798218,
"avg_line_length": 30.754934310913086,
"blob_id": "3b89607a40302f8cd314bc756a185189513c6c00",
"content_id": "a93b6e4b6aa673f1b0516ec50948ef81dd0879f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19309,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 608,
"path": "/sdt.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "\"\"\"\nadd up Hit, Miss, FA, and CR counts \nfor each subject in a simulated\ndataset\n\nFirst run proc.parse_stats(model) to get subdf, condsdf, and pdict\nthen run sim_subs(pdict) to get dataframe, param_dict\n\ncalc SDT counts from dataframe\n\"\"\"\n\n\nfrom __future__ import division\nfrom scipy.stats import norm\nfrom myhddm import diffusionp, defmod\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\ndef counts(dataframe, target='H', num=None):\n\t\"\"\"\n\treturns a dictionary of dataframes, one for each cue\n\n\tcols are sdt measures: \t[CR, FA, H, M]\n\trows are subj. counts: [sub(1)...sub(n)]\n\t\n\t\"\"\"\n\tif 'subj_idx' in dataframe.columns:\n\t\ts='subj_idx'\n\telse:\n\t\ts='sub'\n\t\n\timg='stim'\n\tc='cue'\n\n\tsdt_dict={}\n\tcounts_list=[]\n\tfor cue in dataframe[c].unique():\n\t\tcounts={}\n\t\tif target=='H':\n\t\t\tcounts['H']=[len(cols.ix[(cols[img]=='house')&(cols[c]==cue)&(cols['acc']==1)]) for sub, cols in dataframe.groupby(s)]\n\t\t\tcounts['FA']=[len(cols.ix[(cols[img]=='face')&(cols[c]==cue)&(cols['acc']==0)]) for sub, cols in dataframe.groupby(s)]\n\t\t\tcounts['M']=[len(cols.ix[(cols[img]=='house')&(cols[c]==cue)&(cols['acc']==0)]) for sub, cols in dataframe.groupby(s)]\n\t\t\tcounts['CR']=[len(cols.ix[(cols[img]=='face')&(cols[c]==cue)&(cols['acc']==1)]) for sub, cols in dataframe.groupby(s)]\t\t\t\n\t\telif target=='F':\n\t\t\tcounts['H']=[len(cols.ix[(cols[img]=='face')&(cols[c]==cue)&(cols['acc']==1)]) for sub, cols in dataframe.groupby(s)]\n\t\t\tcounts['FA']=[len(cols.ix[(cols[img]=='house')&(cols[c]==cue)&(cols['acc']==0)]) for sub, cols in dataframe.groupby(s)]\n\t\t\tcounts['M']=[len(cols.ix[(cols[img]=='face')&(cols[c]==cue)&(cols['acc']==0)]) for sub, cols in dataframe.groupby(s)]\n\t\t\tcounts['CR']=[len(cols.ix[(cols[img]=='house')&(cols[c]==cue)&(cols['acc']==1)]) for sub, cols in dataframe.groupby(s)]\n\t\t\t\n\t\tcountsdf=pd.DataFrame(counts)\n\t\t#countsdf.to_csv(cue+\"_counts.csv\", index=False)\n\t\tsdt_dict[cue]=countsdf\t\n\t\n\treturn sdt_dict\n\ndef calc_sdt(sdtdf):\n\n\tsdtdf['HR']=sdtdf['H']/(sdtdf['H']+sdtdf['M'])\n\tsdtdf['HR'].replace(1.0, .999, inplace=True)\n\tsdtdf['HR'].replace(0.0, .001, inplace=True)\t\n\t\n\tsdtdf['zH']=norm.ppf(sdtdf['HR'])\n\n\tsdtdf['FAR']=sdtdf['FA']/(sdtdf['FA']+sdtdf['CR'])\n\tsdtdf['FAR'].replace(1.0, .999, inplace=True)\n\tsdtdf['FAR'].replace(0.0, .001, inplace=True)\n\t\n\tsdtdf['zFA']=norm.ppf(sdtdf['FAR'])\n\n\tsdtdf['dp']=sdtdf['zH']-sdtdf['zFA']\n\tsdtdf['c']=-0.5*(sdtdf['zH']+sdtdf['zFA'])\n\n\treturn sdtdf\n\ndef get_hit_fa(sdt_dict):\n\t\n\tfor df in sdt_dict:\n\t\tdf=calc_sdt(sdt_dict[df])\n\n\tnsubs=len(sdt_dict[sdt_dict.keys()[0]])\n\n\tif len(sdt_dict)==5:\n\t\tcols=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\telse:\n\t\tcols=['a80H', 'b50N', 'c80F']\n\n\thr_df=pd.DataFrame(np.zeros(nsubs*len(sdt_dict)).reshape(nsubs, len(sdt_dict)), columns=cols)\n\tfa_df=pd.DataFrame(np.zeros(nsubs*len(sdt_dict)).reshape(nsubs, len(sdt_dict)), columns=cols)\n\n\tfor k in sdt_dict.keys():\n\t\thr_df[k]=sdt_dict[k]['HR']\n\t\tfa_df[k]=sdt_dict[k]['FAR']\n\t\t\n\treturn hr_df, fa_df\n\n\ndef get_hr_fa_info(dataframe, target='H', save=False):\n\t\n\tsdt_dict=counts(dataframe, target=target)\n\thr_df, fa_df=get_hit_fa(sdt_dict)\n\t\n\tif target=='H':\n\t\ttag=\"house\"\n\telif target=='F':\n\t\ttag=\"face\"\n\t\t\n\thr_mean=hr_df.mean() \n\tfa_mean=fa_df.mean()\n\thr_std=hr_df.std() \n\tfa_std=fa_df.std()\n\thr_stderr=hr_std/np.sqrt(len(hr_df))\n\tfa_stderr=fa_std/np.sqrt(len(fa_df))\n\t\n\thr_info=pd.DataFrame({'HR_Mean':hr_mean, 'HR_SE':hr_stderr})\n\tfa_info=pd.DataFrame({'FA_Mean':fa_mean, 'FA_SE':fa_stderr})\n\t\n\tif save:\n\t\thr_df.to_csv(tag+\"_target_hr_all.csv\", index=False)\n\t\tfa_df.to_csv(tag+\"_target_fa_all.csv\", index=False)\n\t\thr_info.to_csv(tag+\"_target_hr_agg.csv\", index=False)\n\t\tfa_info.to_csv(tag+\"_target_fa_agg.csv\", index=False)\n\t\n\treturn hr_info, fa_info\n\t\n\t\ndef plot_rates(dataframe, target='H'):\n\t\n\tsdt_dict=counts(dataframe, target=target)\n\thr_df, fa_df=get_hit_fa(sdt_dict)\n\t\n\thr_mean=hr_df.mean(); fa_mean=fa_df.mean()\n\thr_std=hr_df.std(); fa_std=fa_df.std()\n\t\n\tif target=='H':\n\t\txlist=np.array(['50/50', '70H', '90H'])\n\t\thr_ylist=np.array([hr_mean[2], hr_mean[1], hr_mean[0]])\n\t\tfa_ylist=np.array([fa_mean[2], fa_mean[1], fa_mean[0]])\n\t\thr_err_ylist=np.array([hr_std[2], hr_std[1], hr_std[0]])\n\t\tfa_err_ylist=np.array([fa_std[2], fa_std[1], fa_std[0]])\n\t\ttitle_target='House'\n\telif target=='F':\n\t\txlist=np.array(['50/50', '70F', '90F'])\n\t\thr_ylist=np.array([hr_mean[2], hr_mean[3], hr_mean[4]])\n\t\tfa_ylist=np.array([fa_mean[2], fa_mean[3], fa_mean[4]])\n\t\thr_err_ylist=np.array([hr_std[2], hr_std[3], hr_std[4]])\n\t\tfa_err_ylist=np.array([fa_std[2], fa_std[3], fa_std[4]])\n\t\ttitle_target='Face'\n\t\n\thr_err_ylist=hr_err_ylist/np.sqrt(len(hr_df))\t\n\tfa_err_ylist=fa_err_ylist/np.sqrt(len(fa_df))\t\n\t\n\tfig, axes=plt.subplots(2, figsize=(7, 10))\n\tfig.subplots_adjust(top=0.92, hspace=0.30, left=0.15, right=0.96, bottom=0.12)\n\thr_ax=axes[0]\n\tfa_ax=axes[1]\n\txnum=np.array([1, 2, 3])\n\t\n\thr_ax.errorbar(xnum, hr_ylist, yerr=hr_err_ylist, lw=7.0, elinewidth=4.5, capsize=0, color='LimeGreen', ecolor='k')\n\tfa_ax.errorbar(xnum, fa_ylist, yerr=fa_err_ylist, lw=7.0, elinewidth=4.5, capsize=0, color='Red', ecolor='k')\n\tax_list=[hr_ax, fa_ax]\n\t#hr_ax.set_title(title_target, fontsize=40) \n\thr_ax.title.set_y(1.03)\n\thr_ax.set_ylim(0.7, 1.0)\n\tfa_ax.set_ylim(0.0, 0.4)\n\thr_ax.set_yticks(np.array([0.7, 0.8, 0.9, 1.0]))\n\tfa_ax.set_yticks(np.arange(0, 0.5, .10))\n\thr_ax.set_ylabel('Hit Rate', fontsize=30, labelpad=.5)\n\tfa_ax.set_ylabel('FA Rate', fontsize=30, labelpad=.5)\n\tfa_ax.set_xlabel('Prior Probability Cue', fontsize=30, labelpad=.5)\n\t\n\tfor ax in ax_list:\n\t\tax.set_xlim(0.5, 3.5)\n\t\tax.set_xticks(np.array([1, 2, 3]))\n\t\tax.set_xticklabels(xlist, fontsize=30)\n\t\t#ax.set_yticks(np.linspace(0.6, 1.10, .10)\n\t\tfor tick in ax.yaxis.get_major_ticks():\n\t\t tick.label.set_fontsize(22)\n\t\n\t\n\tplt.savefig(title_target+'_Hit_FA.jpeg', format='jpeg', dpi=900)\n\t\n\t\n\t#hr_ax.set_title(title_target, fontsize=40) \n\t#hr_ax.title.set_y(1.03)\n\t#hr_ax.set_ylim(0.6, 1.0)\n\t#fa_ax.set_ylim(0.0, 0.4)\n\t##hr_ax.set_ylabel('Hit Rate', fontsize=28, labelpad=1)\n\t##fa_ax.set_ylabel('FA Rate', fontsize=28, labelpad=1)\n\t#fa_ax.set_xlabel('Prior Probability Cue', fontsize=35, labelpad=.45)\n\t#\n\t#for ax in ax_list:\n\t#\tax.set_xlim(0.5, 3.5)\n\t#\tax.set_xticks(np.array([1, 2, 3]))\n\t#\tax.set_xticklabels(xlist, fontsize=30)\n\t#\tfor tick in ax.yaxis.get_major_ticks():\n\t#\t tick.label.set_fontsize(22)\n\t#\t\n\t#plt.savefig(title_target+'_Hit_FA.jpeg', format='jpeg', dpi=300)\n\t\ndef sim_rates_fill(rate_type='hit', ax=None, x=None, y=None, ind=0, last=np.zeros([3])):\n\t\n\tif rate_type=='hit':\n\t\tcline='#D6F5D6'\n\t\tcfill='#D6F5D6'\n\telse:\n\t\tcline='#FFB2B2'\n\t\tcfill='#FFB2B2'\n\ttheo=ax.plot(x, y, '-', color=cline, lw=0.6, alpha=0.2)\n\t\n\t#'-', color='RoyalBlue', lw=0.6, alpha=0.2)\n\t#'-', color='FireBrick', lw=0.6, alpha=0.2)\n\t\n\tif ind!=0:\n\t\ty2=last\n\t\tfill=ax.fill_between(x, y, y-(y-y2), facecolor=cfill, alpha=0.05, lw=0)\n\t\n\treturn y\n\t\n\t\ndef predict_rates(data, simdfs, mname='MSM', target='H'):\n\t\t\n\tsdt_dict=counts(data, target=target)\n\thr_df, fa_df=get_hit_fa(sdt_dict)\n\t\n\thr_mean=hr_df.mean(); fa_mean=fa_df.mean()\n\thr_std=hr_df.std(); fa_std=fa_df.std()\n\t\n\tif target=='H':\n\t\txlist=np.array(['50/50', '70H', '90H'])\n\t\thr_ylist=np.array([hr_mean[2], hr_mean[1], hr_mean[0]])\n\t\tfa_ylist=np.array([fa_mean[2], fa_mean[1], fa_mean[0]])\n\t\thr_err_ylist=np.array([hr_std[2], hr_std[1], hr_std[0]])\n\t\tfa_err_ylist=np.array([fa_std[2], fa_std[1], fa_std[0]])\n\t\ttitle_target='House'\n\telif target=='F':\n\t\txlist=np.array(['50/50', '70F', '90F'])\n\t\thr_ylist=np.array([hr_mean[2], hr_mean[3], hr_mean[4]])\n\t\tfa_ylist=np.array([fa_mean[2], fa_mean[3], fa_mean[4]])\n\t\thr_err_ylist=np.array([hr_std[2], hr_std[3], hr_std[4]])\n\t\tfa_err_ylist=np.array([fa_std[2], fa_std[3], fa_std[4]])\n\t\ttitle_target='Face'\n\t\n\thr_err_ylist=hr_err_ylist/np.sqrt(len(hr_df))\t\n\tfa_err_ylist=fa_err_ylist/np.sqrt(len(fa_df))\n\t\t\t\n\tfig, axes=plt.subplots(2, figsize=(7, 10))\n\tfig.set_tight_layout(True)\t\n\thr_ax=axes[0]\n\tfa_ax=axes[1]\n\t\n\tx=np.array([1, 2, 3])\n\tlast_hr=np.zeros([3])\n\tlast_fa=np.zeros([3])\n\t\n\tfor simn, rest in simdfs.groupby('sim_num'):\n\t\t\n\t\tsim_dict=counts(rest, target=target)\n\t\thr_sim_df, fa_sim_df=get_hit_fa(sim_dict)\n\t\thr_sim_mean=hr_sim_df.mean(); fa_sim_mean=fa_sim_df.mean()\n\t\t\n\t\tif target=='H':\n\t\t\thr_sim_ylist=np.array([hr_sim_mean[2], hr_sim_mean[1], hr_sim_mean[0]])\n\t\t\tfa_sim_ylist=np.array([fa_sim_mean[2], fa_sim_mean[1], fa_sim_mean[0]])\n\t\telif target=='F':\n\t\t\thr_sim_ylist=np.array([hr_sim_mean[2], hr_sim_mean[3], hr_sim_mean[4]])\n\t\t\tfa_sim_ylist=np.array([fa_sim_mean[2], fa_sim_mean[3], fa_sim_mean[4]])\n\t\t\t\n\t\tlast_hr = sim_rates_fill(rate_type='hit', ax=hr_ax, x=x, y=hr_sim_ylist, ind=simn, last=last_hr)\n\t\tlast_fa = sim_rates_fill(rate_type='fa', ax=fa_ax, x=x, y=fa_sim_ylist, ind=simn, last=last_fa)\n\t\t\n\thr_ax.errorbar(x, hr_ylist, yerr=hr_err_ylist, lw=7.0, elinewidth=4.5, capsize=0, color='LimeGreen', ecolor='k')\n\tfa_ax.errorbar(x, fa_ylist, yerr=fa_err_ylist, lw=7.0, elinewidth=4.5, capsize=0, color='Red', ecolor='k')\n\tax_list=[hr_ax, fa_ax]\n\t\n\thr_ax.set_title(title_target, fontsize=38) \n\thr_ax.set_ylabel('Hit Rate', fontsize=35)\n\tfa_ax.set_ylabel('FA Rate', fontsize=35)\n\thr_ax.set_ylim(0.7, 1.0)\n\tfa_ax.set_ylim(0.0, 0.4)\n\thr_ax.set_yticks(np.array([0.7, 0.8, 0.9, 1.0]))\n\tfa_ax.set_yticks(np.arange(0, 0.5, .10))\n\tfa_ax.set_xlabel('Prior Probability Cue', fontsize=38)\n\t\n\tfor ax in ax_list:\n\t\tax.set_xlim(0.5, 3.5)\n\t\tax.set_xticks(np.array([1, 2, 3]))\n\t\tax.set_xticklabels(xlist, fontsize=35)\n\t\tfor tick in ax.yaxis.get_major_ticks():\n\t\t tick.label.set_fontsize(30)\n\t\t\n\t#plt.savefig(target+\"_\"+mname+'.jpeg', format='jpeg', dpi=900)\n\tplt.savefig(target+\"_\"+mname+'.png', format='png', dpi=500)\n\t\ndef get_params(sdt_dict):\n\t\n\tfor df in sdt_dict:\n\t\tdf=calc_sdt(sdt_dict[df])\n\t\n\tnsubs=len(sdt_dict[sdt_dict.keys()[0]])\n\t\n\tif len(sdt_dict)==5:\n\t\tcols=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\telse:\n\t\tcols=['a80H', 'b50N', 'c80F']\n\t\n\tcdf=pd.DataFrame(np.zeros(nsubs*len(sdt_dict)).reshape(nsubs, len(sdt_dict)), columns=cols)\n\tdpdf=pd.DataFrame(np.zeros(nsubs*len(sdt_dict)).reshape(nsubs, len(sdt_dict)), columns=cols)\n\n\tfor k in sdt_dict.keys():\n\t\tcdf[k]=sdt_dict[k]['c']\n\t\tdpdf[k]=sdt_dict[k]['dp']\n\n\treturn cdf, dpdf\n\ndef plot_params(data):\n\t\n\tfrom scipy.stats import stats\n\t\n\tcountsdf=counts(data)\n\tc, d=get_params(countsdf)\n\t\n\tsem_crit=[]\n\tsem_dp=[]\t\n\tfor cond in c.columns:\n\t\tse_criterion=stats.sem(c[cond])\n\t\tse_dprime=stats.sem(d[cond])\n\t\n\t\tsem_crit.append(se_criterion)\n\t\tsem_dp.append(se_dprime)\n\t\t\n\t\tcmeans=c.describe().ix['mean', :].values\n\t\tdmeans=d.describe().ix['mean', :].values\n\t\n\tx=np.array([1,2,3])\n\tfig_c, ax_c=plt.subplots(1)\n\tfig_d, ax_d=plt.subplots(1)\n\t\n\tplotc=ax_c.errorbar(x, cmeans, yerr=sem_crit, elinewidth=2.5, ecolor='r', color='k', lw=4.0)\n\tplotd=ax_d.errorbar(x, dmeans, yerr=sem_dp, elinewidth=2.5, ecolor='r', color='k', lw=4.0)\n\t\n\tax_list=[ax_c, ax_d]\n\tfor a in ax_list:\n\t\ta.set_xlim(0.5, 3.5)\n\t\ta.set_xticks([1,2,3])\n\t\ta.set_xticklabels(['80H', '50N', '80F'], fontsize=16)\n\t\ta.set_xlabel(\"Prior Probability Cue\", fontsize=20)\n\t\n\tax_c.set_ylabel(\"Criterion (c)\", fontsize=20)\n\tax_d.set_ylabel(\"Discriminability (d')\", fontsize=20)\t\n\t\n\t\n\t#fig_c.savefig(\"criterion.jpeg\", format='jpeg', dpi=400)\n\t#fig_d.savefig(\"dprime.jpeg\", format='jpeg', dpi=400)\n\tfig_c.savefig(\"criterion.png\", format='png', dpi=500)\n\tfig_d.savefig(\"dprime.png\", format='png', dpi=500)\t\n\t\ndef rho_models(dataframe, depends_on, condsdf):\n\t\"\"\"\n\treturns correlation matrix (pd.DataFrame) for sdt params and\n\tall diffusion parameters included in depends_on dict\n\t\n\t\"\"\"\n\tcountsdt=counts(dataframe)\n\tcrit, dprime=get_params(countsdt)\n\t\n\tdiffusion_params=diffusionp.get_diffusion(condsdf, depends_on=depends_on)\n\t\n\tdiff_sdt_rho=dict()\n\n\tfor p in diffusion_params.keys():\n\t\tc=diffusion_params[p].corrwith(crit)\n\t\tdp=diffusion_params[p].corrwith(dprime)\n\t\t\n\t\tctag=p+'_c'\n\t\tdptag=p+'_dp'\n\t\t\n\t\tdiff_sdt_rho[ctag]=c\n\t\tdiff_sdt_rho[dptag]=dp\n\t\t\n\trho_mat=pd.DataFrame(diff_sdt_rho)\n\t\t\n\treturn rho_mat\n\ndef rho_sdt(dataframe, simdf):\n\t\"\"\"\n\treturns correlation matrix (pd.DataFrame) for empirical c, d' against diffusion simulated c, d'\n\t\n\t\"\"\"\n\temp_v_sim={}\n\n\temp_sdt=counts(dataframe)\n\tsim_sdt=counts(simdf)\n\n\t#SAVE SIM SDT COUNTS\n\tfor df in sim_sdt.keys():\n\t\tif df=='e90F':\n\t\t\tsim_sdt[df].to_csv(\"highF.csv\", index=False)\n\t\telif df=='c80F':\n\t\t\tsim_sdt[df].to_csv(\"highF.csv\", index=False)\n\t\telif df=='d70F':\n\t\t\tsim_sdt[df].to_csv(\"medF.csv\", index=False)\n\t\telif df=='c50N':\n\t\t\tsim_sdt[df].to_csv(\"neut.csv\", index=False)\n\t\telif df=='b50N':\n\t\t\tsim_sdt[df].to_csv(\"neut.csv\", index=False)\n\t\telif df=='b70H':\n\t\t\tsim_sdt[df].to_csv(\"medH.csv\", index=False)\n\t\telif df=='a90H':\n\t\t\tsim_sdt[df].to_csv(\"highH.csv\", index=False)\n\t\telif df=='a80H':\n\t\t\tsim_sdt[df].to_csv(\"highH.csv\", index=False)\n\n\n\tsimc, simdp=get_params(sim_sdt)\n\tempc, empdp=get_params(emp_sdt)\n\t\n\tccorr=empc.corrwith(simc)\n\tdpcorr=empdp.corrwith(simdp)\n\n\temp_v_sim['c']=ccorr\n\temp_v_sim['dprime']=dpcorr\n\t\n\tempvsim=pd.DataFrame(emp_v_sim)\n\t#empvsim.to_csv(\"sdt_rho_matrix.csv\")\n\t\n\treturn empvsim\n\ndef plot_rho_heatmap():\n\t\n\t#data=pd.read_csv(\"/Users/DunovanK/Desktop/beh_hddm/AllP_dEWMA5.csv\")\n\t\n\temp_c=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Empirical/emp_c.csv')\n\temp_d=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Empirical/emp_d.csv')\n\tmsm_c=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Theoretical/msm_sims/msm_c.csv')\n\tmsm_d=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Theoretical/msm_sims/msm_d.csv')\n\tpbm_c=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Theoretical/pbm_sims/pbm_c.csv')\n\tpbm_d=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Theoretical/pbm_sims/pbm_d.csv')\n\tdbm_c=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Theoretical/dbm_sims/dbm_c.csv')\n\tdbm_d=pd.read_csv('/Users/DunovanK/Desktop/beh_hddm/SDTModels/Theoretical/dbm_sims/dbm_d.csv')\n\n\t#print emp_c\n\t#print emp_d\n\t\n\t\n\tvz_c=emp_c.corrwith(msm_c)\n\tvz_d=emp_d.corrwith(msm_d)\n\tz_c=emp_c.corrwith(pbm_c)\n\tz_d=emp_d.corrwith(pbm_d)\n\tv_c=emp_c.corrwith(dbm_c)\n\tv_d=emp_d.corrwith(dbm_d)\t\n\n\t#print \"z_d\", z_d\t\n\t#print \"v_d\", v_d\n\t#print \"vz_d\", vz_d\n\t#\n\t#print \"z_c\", z_c \n\t#print \"v_c\", v_c\n\t#print \"vz_c\", vz_c\n\t\t\n\tcriterion_corr=np.array([vz_c, v_c, z_c])\n\tdprime_corr=np.array([vz_d, v_d, z_d])\n\t\n\t#print criterion_corr\n\t#print dprime_corr\n\t\n\tfig=plt.figure(figsize=(10,14))\n\tfig.set_tight_layout(True)\t\n\t#fig.suptitle(\"Correlation of Empirical and Theoretical SDT Parameters\", fontsize=25)\n\taxc=fig.add_subplot(211)\n\taxd=fig.add_subplot(212)\n\tfig.subplots_adjust(top=.95, hspace=.1, left=0.10, right=.9, bottom=0.1)\n\n\taxc.set_ylim(-0.5, 2.5)\n\taxc.set_yticks([0, 1, 2])\n\taxc.set_yticklabels(['MSM', 'DBM', 'PBM'], fontsize=34)\n\tplt.setp(axc.get_yticklabels(), rotation=90)\n\taxc.set_xlim(-0.5, 4.5)\n\taxc.set_xticks([0, 1, 2, 3, 4])\n\taxc.set_xticklabels(['90H', '70H', '50/50', '70F', '90F'], fontsize=34)\n\taxc.set_title(\"Criterion\", fontsize=36)\n\taxc.set_xlabel(\"Prior Probability Cue\", fontsize=36)\n\n\taxd.set_ylim(-0.5, 2.5)\n\taxd.set_yticks([0, 1, 2])\n\taxd.set_yticklabels(['MSM', 'DBM', 'PBM'], fontsize=34)\n\tplt.setp(axd.get_yticklabels(), rotation=90)\n\taxd.set_xlim(-0.5, 4.5)\n\taxd.set_xticks([0, 1, 2, 3, 4])\n\taxd.set_xticklabels(['90H', '70H', '50/50', '70F', '90F'], fontsize=34)\n\taxd.set_title(\"Discriminability\", fontsize=36)\n\taxd.set_xlabel(\"Prior Probability Cue\", fontsize=36)\n\n\n\taxc_map=axc.imshow(criterion_corr, interpolation='nearest', cmap='Reds', origin='lower', vmin=0.5, vmax=1)\n\tplt.colorbar(axc_map, ax=axc, shrink=0.66)\n\n\taxd_map=axd.imshow(dprime_corr, interpolation='nearest', cmap='Reds', origin='lower', vmin=0.5, vmax=1)\n\tplt.colorbar(axd_map, ax=axd, shrink=0.66)\n\t\n\t#plt.savefig('SDT_Corr.eps', format='eps', dpi=400)\n\t#plt.savefig('SDT_Corr.jpeg', dpi=900)\n\tplt.savefig('SDT_Corr.png', format='png', dpi=500)\n\ndef plot_evs_corr(df, simdf):\n\t\n\te=counts(df)\n\ts=counts(simdf)\n\tec, edp=get_params(e)\n\tsc, sdp=get_params(s)\n\tnconds=len(ec.columns)\n\n\tfig=plt.figure(figsize=(10, 6))\n\tfig.suptitle(\"Correlation of Empirical and Theoretical SDT Parameters\", fontsize=25)\n\tax=fig.add_subplot(111)\n\tfig.subplots_adjust(top=.85)\n\n\tax.set_ylim(-0.5, 1.5)\n\tax.set_yticks([0,1])\n\tax.set_yticklabels(['c', \"d'\"], fontsize=20)\n\tax.set_xlim([-0.5, nconds-0.5])\n\tax.set_xticks(np.arange(nconds))\n\tax.set_xticklabels(ec.columns, fontsize=16)\n\n\tcorr_c=ec.corrwith(sc)\n\tcorr_dp=edp.corrwith(sdp)\n\n\t#for i in np.arange(nconds):\n\tax_map=ax.imshow([corr_c, corr_dp], interpolation='nearest', cmap='Reds', origin='lower', vmin=0.1, vmax=1)\n\tplt.colorbar(ax_map, ax=ax, shrink=0.94)\n\tfor i, cond in enumerate(corr_c):\n\t\tax.text(i, 0, \"r=\"+str(corr_c[i])[:4], ha='center', va='center', fontsize=16)\t\n\tfor i, cond in enumerate(corr_dp):\n\t\tax.text(i, 1, \"r=\"+str(corr_dp[i])[:4], ha='center', va='center', fontsize=16)\t\n\ndef plot_rho_sdt(dataframe, simdf):\n\t\n\temp_sdt=counts(dataframe)\n\tsim_sdt=counts(simdf)\n\n\tsimc, simdp=get_params(sim_sdt)\n\tempc, empdp=get_params(emp_sdt)\n\n\t\n\tfigc=plt.figure(figsize=(10, 5))\n\tfigc.suptitle('Correlation of Empirical and HDDM Simulated SDT Criterion (c)' , fontsize=12)\n\tfigc.subplots_adjust(top=0.85, wspace=0.1)\n\tcounter=1\n\tfor sim, emp in zip(simc.columns, empc.columns):\n\t\tax=figc.add_subplot(1,len(simdf.cue.unique()),counter)\n\t\tx=empc[emp]\n\t\ty=simc[sim]\n\t\tm, b=np.polyfit(x, y, 1)\n\t\tax.plot(x, y, 'ko', x, m*x+b, 'r-', lw=3.0, ms=3.5)\n\t\tax.set_title(str(sim))\n\t\t#ax.set_xlabel('Empirical Criterion (c)')\n\t\t#ax.set_ylabel('HDDM Simulated SDT Criterion (c)')\n\t\t#ax.xaxis.set_major_locator(MaxNLocator(nbins = 4))\n\t\t#ax.yaxis.set_major_locator(MaxNLocator(nbins = 6))\n\t\tcounter+=1\n\t\tplt.locator_params(axis='x', nbins=4)\n\t\t\n\t\tfor tick in ax.xaxis.get_major_ticks():\n\t\t tick.label.set_fontsize(10)\n\t\t\n\t\tif ax.is_first_col():\n\t\t\tax.set_ylabel(\"Simulated Criterion (c)\", fontsize=16)\n\t\t\tfor tick in ax.yaxis.get_major_ticks():\n\t\t\t tick.label.set_fontsize(10)\n\t\telse:\n\t\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\tfigc.text(0.4, 0.009, 'Empirical Criterion (c)', fontsize=16)\n\tfigc.savefig('corr_sdt_c.jpeg', format='jpeg', dpi=300)\n\t\n\tfigdp=plt.figure(figsize=(10, 5))\n\tfigdp.suptitle(\"Correlation of Empirical and HDDM Simulated SDT Sensitivity (d')\" , fontsize=12)\n\tfigdp.subplots_adjust(top=0.85, wspace=0.1)\n\tcounter=1\n\tfor sim, emp in zip(simdp.columns, empdp.columns):\n\t\tax=figdp.add_subplot(1,len(simdf.cue.unique()),counter)\n\t\tx=empdp[emp]\n\t\ty=simdp[sim]\n\t\tm, b=np.polyfit(x, y, 1)\n\t\tax.plot(x, y, 'ko', x, m*x+b, 'r-', lw=3.0, ms=3.5)\n\t\tax.set_title(str(sim))\n\t\t#ax.set_xlabel(\"Empirical SDT Sensitivity (d')\")\n\t\t#ax.set_ylabel(\"HDDM Simulated SDT Sensitivity (d')\")\n\t\t#ax.xaxis.set_major_locator(MaxNLocator(nbins = 6))\n\t\t#ax.yaxis.set_major_locator(MaxNLocator(nbins = 6))\n\t\tcounter+=1\n\t\t\n\t\tfor tick in ax.xaxis.get_major_ticks():\n\t\t tick.label.set_fontsize(10)\n\n\t\tif ax.is_first_col():\n\t\t\tax.set_ylabel(\"Simulated Sensitivity (d')\", fontsize=16)\n\t\t\tfor tick in ax.yaxis.get_major_ticks():\n\t\t\t tick.label.set_fontsize(10)\n\t\telse:\n\t\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\tfigdp.text(0.4, 0.009, \"Empirical Sensitivity (d')\", fontsize=16)\n\tfigdp.savefig('corr_sdt_dp.jpeg', format='jpeg', dpi=300)\n\tx=rho_sdt(dataframe, simdf)\n\tx.to_csv(\"sdt_corr.csv\")\n\t\t\nif __name__ == \"__main__\":\n\tmain()\t\n\n"
},
{
"alpha_fraction": 0.5529675483703613,
"alphanum_fraction": 0.6361140608787537,
"avg_line_length": 58.710384368896484,
"blob_id": "edf3a00cd033cce007bdb485d3421f5e1838ae17",
"content_id": "114ffcfabde4c419db9d058c64697ff0dceb298e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21853,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 366,
"path": "/htest.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom scipy.stats.mstats import mquantiles\nfrom my_plots import diff_traces\n\n\n\ndef get_nodes(model, type='vz'):\n\t\n\tif type=='vz':\n\t\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(a90H.face)', 'v(b70H.face)', 'v(c50N.face)', 'v(d70F.face)', 'v(e90F.face)']] \n\t\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(a90H.house)', 'v(b70H.house)', 'v(c50N.house)', 'v(d70F.house)', 'v(e90F.house)']] \n\t\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(a90H)', 'z(b70H)', 'z(c50N)', 'z(d70F)', 'z(e90F)']]\n\t\tvFace=[v90Hface, v70Hface, v50Nface, v70Fface, v90Fface]\n\t\tvHouse=[v90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse]\n\t\tz=[z90H, z70H, z50N, z70F, z90F]\n\t\tvz_nodes={'vF':vFace, 'vH':vHouse, 'z':z}\n\t\treturn vz_nodes\n\telif type=='v':\n\t\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(a90H.face)', 'v(b70H.face)', 'v(c50N.face)', 'v(d70F.face)', 'v(e90F.face)']] \n\t\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(a90H.house)', 'v(b70H.house)', 'v(c50N.house)', 'v(d70F.house)', 'v(e90F.house)']] \n\t\tvFace=[v90Hface, v70Hface, v50Nface, v70Fface, v90Fface]\n\t\tvHouse=[v90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse]\n\t\tv_nodes={'vFace':vFace, 'vHouse':vHouse}\n\t\treturn v_nodes\n\telif type=='z':\n\t\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(a90H)', 'z(b70H)', 'z(c50N)', 'z(d70F)', 'z(e90F)']]\n\t\tz=[z90H, z70H, z50N, z70F, z90F]\n\t\treturn z\n\telse:\n\t\tprint 'Did not recognize type (v+z not supported, just use \"v\")'\n\t\t\n\ndef vz_credible(model):\n\t\"\"\"Prints node summary out to screen\"\"\"\n\t\n\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(a90H.face)', 'v(b70H.face)', 'v(c50N.face)', 'v(d70F.face)', 'v(e90F.face)']] \n\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(a90H.house)', 'v(b70H.house)', 'v(c50N.house)', 'v(d70F.house)', 'v(e90F.house)']] \n\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(a90H)', 'z(b70H)', 'z(c50N)', 'z(d70F)', 'z(e90F)']]\n\t\n\t#vF_list=[v90Hface, v70Hface, v50Nface, v70Fface, v90Fface]\n\t#vH_list=[v90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse]\n\t#z_list=[z90H, z70H, z50N, z70F, z90F]\n\t\n\tall_list=[v90Hface, v70Hface, v50Nface, v70Fface, v90Fface, v90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse, z90H, z70H, z50N, z70F, z90F]\n\n\tfor node in all_list:\n\t\t#n_quant=mquantiles(node, prob=[0.025, 0.975])\n\t\tnode.summary()\n\t\n\t\t\n\t\ndef allp_vz_htest(model):\n\n\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(a90H.face)', 'v(b70H.face)', 'v(c50N.face)', 'v(d70F.face)', 'v(e90F.face)']] \n\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(a90H.house)', 'v(b70H.house)', 'v(c50N.house)', 'v(d70F.house)', 'v(e90F.house)']] \n\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(a90H)', 'z(b70H)', 'z(c50N)', 'z(d70F)', 'z(e90F)']]\n\n\tfout=open('hypothesis_tests.txt', 'w')\n\n\tprint>>fout, \"Comparing drift-rates (v) for FACE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\t\n\tprint>>fout, \"p(v90H.face < v90F.face) = %s\" % str((v90Hface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v70F.face < v90F.face) = %s\" % str((v70Fface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v90F.face) = %s\" % str((v50Nface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v70F.face) = %s\" % str((v90Hface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v70H.face < v70F.face) = %s\" % str((v70Hface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v70F.face) = %s\" % str((v50Nface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v70H.face < v50N.face) = %s\" % str((v70Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v50N.face) = %s\" % str((v90Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v70H.face) = %s\" % str((v90Hface.trace() < v70Hface.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"Comparing drift-rates (v) for HOUSE trials across probability cues:/n/n\"\n\tprint>>fout, \"\\n\"\n\t#probability that left side of inequality is greater in magnitude\n\t#all values will be negative so the actual inequality test is opposite than\n\t#what is printed. Just remember, that \">\" is referring to absolute magnitude\n\t#when comparing drift rates for house trials across cue conditions\n\tprint>>fout, \"p(v90H.house > v90F.house) = %s\" % str((v90Hhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70F.house > v90F.house) = %s\" % str((v70Fhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v90F.house) = %s\" % str((v50Nhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v70F.house) = %s\" % str((v90Hhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70H.house > v70F.house) = %s\" % str((v70Hhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v70F.house) = %s\" % str((v50Nhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70H.house > v50N.house) = %s\" % str((v70Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v50N.house) = %s\" % str((v90Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v70H.house) = %s\" % str((v90Hhouse.trace() < v70Hhouse.trace()).mean())\t\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"Comparing starting-points (z0) across probability cues:\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"p(z90H < z90F) = %s\" % str((z90H.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z70F < z90F) = %s\" % str((z70F.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z50N < z90F) = %s\" % str((z50N.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z90H < z70F) = %s\" % str((z90H.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z70H < z70F) = %s\" % str((z70H.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z50N < z70F) = %s\" % str((z50N.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z70H < z50N) = %s\" % str((z70H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"p(z90H < z50N) = %s\" % str((z90H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"p(z90H < z70H) = %s\" % str((z90H.trace() < z70H.trace()).mean())\t\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"2.5 and 97.5 '%' QUANTILES for difference between drift-rates (v) for FACE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\t\n\tprint>>fout, \"v90H.face - v90F.face = %s\" % str(mquantiles((v90Hface.trace() - v90Fface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v70F.face - v90F.face = %s\" % str(mquantiles((v70Fface.trace() - v90Fface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v50N.face - v90F.face = %s\" % str(mquantiles((v50Nface.trace() - v90Fface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v90H.face - v70F.face = %s\" % str(mquantiles((v90Hface.trace() - v70Fface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v70H.face - v70F.face = %s\" % str(mquantiles((v70Hface.trace() - v70Fface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v50N.face - v70F.face = %s\" % str(mquantiles((v50Nface.trace() - v70Fface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v70H.face - v50N.face = %s\" % str(mquantiles((v70Hface.trace() - v50Nface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v90H.face - v50N.face = %s\" % str(mquantiles((v90Hface.trace() - v50Nface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v90H.face - v70H.face = %s\" % str(mquantiles((v90Hface.trace() - v70Hface.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\t\n\tprint>>fout, \"2.5 and 97.5 '%' QUANTILES for difference between drift-rates (v) for HOUSE trials across probability cues:/n/n\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"v90H.house - v90F.house = %s\" % str(mquantiles((v90Hhouse.trace() - v90Fhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v70F.house - v90F.house = %s\" % str(mquantiles((v70Fhouse.trace() - v90Fhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v50N.house - v90F.house = %s\" % str(mquantiles((v50Nhouse.trace() - v90Fhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v90H.house - v70F.house = %s\" % str(mquantiles((v90Hhouse.trace() - v70Fhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v70H.house - v70F.house = %s\" % str(mquantiles((v70Hhouse.trace() - v70Fhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v50N.house - v70F.house = %s\" % str(mquantiles((v50Nhouse.trace() - v70Fhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v70H.house - v50N.house = %s\" % str(mquantiles((v70Hhouse.trace() - v50Nhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v90H.house - v50N.house = %s\" % str(mquantiles((v90Hhouse.trace() - v50Nhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"v90H.house - v70H.house = %s\" % str(mquantiles((v90Hhouse.trace() - v70Hhouse.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"2.5 and 97.5 '%' QUANTILES for difference between starting-points (z0) across probability cues:\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"z90H - z90F = %s\" % str(mquantiles((z90H.trace() - z90F.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z70F - z90F = %s\" % str(mquantiles((z70F.trace() - z90F.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z50N - z90F = %s\" % str(mquantiles((z50N.trace() - z90F.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z90H - z70F = %s\" % str(mquantiles((z90H.trace() - z70F.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z70H - z70F = %s\" % str(mquantiles((z70H.trace() - z70F.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z50N - z70F = %s\" % str(mquantiles((z50N.trace() - z70F.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z70H - z50N = %s\" % str(mquantiles((z70H.trace() - z50N.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z90H - z50N = %s\" % str(mquantiles((z90H.trace() - z50N.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"z90H - z70H = %s\" % str(mquantiles((z90H.trace() - z70H.trace()), prob=[0.025, 0.975]))\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tvF, vH, z=diff_traces(model)\n\tprint>>fout, \"Face Drift Rates\"\n\tprint>>fout, \"\\n\"\n\tfor dtrace in vF:\n\t\tprint>>fout,\"p(node_difference > 0)=%s\" % str((dtrace>0).mean())\n\t\tprint>>fout,\"p(node_difference < 0)=%s\" % str((dtrace<0).mean())\n\t\tprint>>fout, \"\\n\\n\"\n\t\n\tprint>>fout, \"House Drift Rates\"\t\n\tprint>>fout, \"\\n\"\t\n\tfor dtrace in vH:\n\t\tprint>>fout,\"p(node_difference > 0)=%s\" % str((dtrace>0).mean())\n\t\tprint>>fout,\"p(node_difference < 0)=%s\" % str((dtrace<0).mean())\n\t\tprint>>fout, \"\\n\\n\"\n\t\n\tprint>>fout, \"Starting-Points\"\t\n\tprint>>fout, \"\\n\"\t\n\tfor dtrace in z:\n\t\tprint>>fout,\"p(node_difference > 0)=%s\" % str((dtrace>0).mean())\n\t\tprint>>fout,\"p(node_difference < 0)=%s\" % str((dtrace<0).mean())\n\t\tprint>>fout, \"\\n\\n\"\n\t\n\tfout.close()\n\n\n\ndef hnl_vz_htest(model):\n\t\n\tv80Hface, v50Nface, v80Fface=model.nodes_db.node[['v(a80H.face)', 'v(b50N.face)','v(c80F.face)']] \n\tv80Hhouse, v50Nhouse, v80Fhouse=model.nodes_db.node[['v(a80H.house)', 'v(b50N.house)', 'v(c80F.house)']]\n\tz80H, z50N, z80F = model.nodes_db.node[['z(a80H)', 'z(b50N)', 'z(c80F)']]\n\t\n\tfout=open('hypothesis_tests.txt', 'w')\n\t\n\tprint>>fout, \"Comparing drift-rates (v) for FACE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"p(v80H.face < v80F.face) = %s\" % str((v80Hface.trace() < v80Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v80F.face) = %s\" % str((v50Nface.trace() < v80Fface.trace()).mean())\n\tprint>>fout, \"p(v80H.face < v50N.face) = %s\" % str((v80Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"Comparing drift-rates (v) for HOUSE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\n\t#probability that left side of inequality is greater in magnitude\n\t#all values will be negative so the actual inequality test is opposite than\n\t#what is printed. Just remember, that \">\" is referring to absolute magnitude\n\t#when comparing drift rates for house trials across cue conditions\n\tprint>>fout, \"p(v80H.house > v80F.house) = %s\" % str((v80Hhouse.trace() < v80Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v80F.house) = %s\" % str((v50Nhouse.trace() < v80Fhouse.trace()).mean())\n\tprint>>fout, \"p(v80H.house > v50N.house) = %s\" % str((v80Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"Comparing starting-points (z0) across probability cues:\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"p(z80H < z80F) = %s\" % str((z80H.trace() < z80F.trace()).mean())\n\tprint>>fout, \"p(z50N < z80F) = %s\" % str((z50N.trace() < z80F.trace()).mean())\n\tprint>>fout, \"p(z80H < z50N) = %s\" % str((z80H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\n\tfout.close()\n\ndef allp_v_htest(model):\n\n\n\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(a90H.face)', 'v(b70H.face)', 'v(c50N.face)', 'v(d70F.face)', 'v(e90F.face)']] \n\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(a90H.house)', 'v(b70H.house)', 'v(c50N.house)', 'v(d70F.house)', 'v(e90F.house)']] \n\n\tfout=open('hypothesis_tests.txt', 'w')\n\n\tprint>>fout, \"Comparing drift-rates (v) for FACE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\t\n\tprint>>fout, \"p(v90H.face < v90F.face) = %s\" % str((v90Hface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v70F.face < v90F.face) = %s\" % str((v70Fface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v90F.face) = %s\" % str((v50Nface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v70F.face) = %s\" % str((v90Hface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v70H.face < v70F.face) = %s\" % str((v70Hface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v70F.face) = %s\" % str((v50Nface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v70H.face < v50N.face) = %s\" % str((v70Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v50N.face) = %s\" % str((v90Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v70H.face) = %s\" % str((v90Hface.trace() < v70Hface.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"Comparing drift-rates (v) for HOUSE trials across probability cues:/n/n\"\n\tprint>>fout, \"\\n\"\n\t#probability that left side of inequality is greater in magnitude\n\t#all values will be negative so the actual inequality test is opposite than\n\t#what is printed. Just remember, that \">\" is referring to absolute magnitude\n\t#when comparing drift rates for house trials across cue conditions\n\tprint>>fout, \"p(v90H.house > v90F.house) = %s\" % str((v90Hhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70F.house > v90F.house) = %s\" % str((v70Fhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v90F.house) = %s\" % str((v50Nhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v70F.house) = %s\" % str((v90Hhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70H.house > v70F.house) = %s\" % str((v70Hhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v70F.house) = %s\" % str((v50Nhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70H.house > v50N.house) = %s\" % str((v70Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v50N.house) = %s\" % str((v90Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v70H.house) = %s\" % str((v90Hhouse.trace() < v70Hhouse.trace()).mean())\t\n\tprint>>fout, \"\\n\\n\\n\"\n\n\tfout.close()\n\ndef hnl_v_htest(model):\n\n\tv80Hface, v50Nface, v80Fface=model.nodes_db.node[['v(a80H.face)', 'v(b50N.face)','v(c80F.face)']] \n\tv80Hhouse, v50Nhouse, v80Fhouse=model.nodes_db.node[['v(a80H.house)', 'v(b50N.house)', 'v(c80F.house)']]\n\n\tfout=open('hypothesis_tests.txt', 'w')\n\n\tprint>>fout, \"Comparing drift-rates (v) for FACE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\t\n\tprint>>fout, \"p(v80H.face < v80F.face) = %s\" % str((v80Hface.trace() < v80Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v80F.face) = %s\" % str((v50Nface.trace() < v80Fface.trace()).mean())\n\tprint>>fout, \"p(v80H.face < v50N.face) = %s\" % str((v80Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\t\n\t\n\t#probability that left side of inequality is greater in magnitude\n\t#all values will be negative so the actual inequality test is opposite than\n\t#what is printed. Just remember, that \">\" is referring to absolute magnitude\n\t#when comparing drift rates for house trials across cue conditions\n\tprint>>fout, \"Comparing drift-rates (v) for HOUSE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"p(v80H.house > v80F.house) = %s\" % str((v80Hhouse.trace() < v80Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v80F.house) = %s\" % str((v50Nhouse.trace() < v80Fhouse.trace()).mean())\n\tprint>>fout, \"p(v80H.house > v50N.house) = %s\" % str((v80Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tfout.close()\n\n\ndef allp_z_htest(model):\n\n\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(a90H)', 'z(b70H)', 'z(c50N)', 'z(d70F)', 'z(e90F)']]\n\n\tfout=open('hypothesis_tests.txt', 'w')\n\n\tprint>>fout, \"Comparing starting-points (z0) across probability cues:\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"p(z90H < z90F) = %s\" % str((z90H.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z70F < z90F) = %s\" % str((z70F.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z50N < z90F) = %s\" % str((z50N.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z90H < z70F) = %s\" % str((z90H.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z70H < z70F) = %s\" % str((z70H.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z50N < z70F) = %s\" % str((z50N.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z70H < z50N) = %s\" % str((z70H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"p(z90H < z50N) = %s\" % str((z90H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"p(z90H < z70H) = %s\" % str((z90H.trace() < z70H.trace()).mean())\t\n\tprint>>fout, \"\\n\\n\\n\"\n\n\tfout.close()\n\ndef hnl_z_htest(model):\n\n\tz80H, z50N, z80F = model.nodes_db.node[['z(a80H)', 'z(b50N)', 'z(c80F)']]\n\n\tfout=open('hypothesis_tests.txt', 'w')\n\n\tprint>>fout, \"Comparing starting-points (z0) across probability cues:\"\n\tprint>>fout, \"\\n\"\t\n\tprint>>fout, \"p(z80H < z80F) = %s\" % str((z80H.trace() < z80F.trace()).mean())\n\tprint>>fout, \"p(z50N < z80F) = %s\" % str((z50N.trace() < z80F.trace()).mean())\n\tprint>>fout, \"p(z80H < z50N) = %s\" % str((z80H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\n\tfout.close()\n\ndef TEST_FUNCTION(model):\n\n\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(90H.face)', 'v(70H.face)', 'v(50N.face)', 'v(70F.face)', 'v(90F.face)']] \n\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(90H.house)', 'v(70H.house)', 'v(50N.house)', 'v(70F.house)', 'v(90F.house)']] \n\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(90H)', 'z(70H)', 'z(50N)', 'z(70F)', 'z(90F)']]\n\n\tfout=open('hypothesis_tests.txt', 'w')\n\n\tprint>>fout, \"Comparing drift-rates (v) for FACE trials across probability cues:\"\n\tprint>>fout, \"\\n\"\t\n\tprint>>fout, \"p(v90H.face < v90F.face) = %s\" % str((v90Hface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v70F.face < v90F.face) = %s\" % str((v70Fface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v90F.face) = %s\" % str((v50Nface.trace() < v90Fface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v70F.face) = %s\" % str((v90Hface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v70H.face < v70F.face) = %s\" % str((v70Hface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v50N.face < v70F.face) = %s\" % str((v50Nface.trace() < v70Fface.trace()).mean())\n\tprint>>fout, \"p(v70H.face < v50N.face) = %s\" % str((v70Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v50N.face) = %s\" % str((v90Hface.trace() < v50Nface.trace()).mean())\n\tprint>>fout, \"p(v90H.face < v70H.face) = %s\" % str((v90Hface.trace() < v70Hface.trace()).mean())\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"Comparing drift-rates (v) for HOUSE trials across probability cues:/n/n\"\n\tprint>>fout, \"\\n\"\n\t#probability that left side of inequality is greater in magnitude\n\t#all values will be negative so the actual inequality test is opposite than\n\t#what is printed. Just remember, that \">\" is referring to absolute magnitude\n\t#when comparing drift rates for house trials across cue conditions\n\tprint>>fout, \"p(v90H.house > v90F.house) = %s\" % str((v90Hhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70F.house > v90F.house) = %s\" % str((v70Fhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v90F.house) = %s\" % str((v50Nhouse.trace() < v90Fhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v70F.house) = %s\" % str((v90Hhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70H.house > v70F.house) = %s\" % str((v70Hhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v50N.house > v70F.house) = %s\" % str((v50Nhouse.trace() < v70Fhouse.trace()).mean())\n\tprint>>fout, \"p(v70H.house > v50N.house) = %s\" % str((v70Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v50N.house) = %s\" % str((v90Hhouse.trace() < v50Nhouse.trace()).mean())\n\tprint>>fout, \"p(v90H.house > v70H.house) = %s\" % str((v90Hhouse.trace() < v70Hhouse.trace()).mean())\t\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\tprint>>fout, \"Comparing starting-points (z0) across probability cues:\"\n\tprint>>fout, \"\\n\"\n\tprint>>fout, \"p(z90H < z90F) = %s\" % str((z90H.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z70F < z90F) = %s\" % str((z70F.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z50N < z90F) = %s\" % str((z50N.trace() < z90F.trace()).mean())\n\tprint>>fout, \"p(z90H < z70F) = %s\" % str((z90H.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z70H < z70F) = %s\" % str((z70H.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z50N < z70F) = %s\" % str((z50N.trace() < z70F.trace()).mean())\n\tprint>>fout, \"p(z70H < z50N) = %s\" % str((z70H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"p(z90H < z50N) = %s\" % str((z90H.trace() < z50N.trace()).mean())\n\tprint>>fout, \"p(z90H < z70H) = %s\" % str((z90H.trace() < z70H.trace()).mean())\t\n\tprint>>fout, \"\\n\\n\\n\"\n\t\n\t\n\tfout.close()\n\n\n\nif __name__==\"__main__\":\n\tmain()"
},
{
"alpha_fraction": 0.6368963122367859,
"alphanum_fraction": 0.6521310210227966,
"avg_line_length": 26.340641021728516,
"blob_id": "fd75a8d2a21348b59e407d8d6189b1f0401104d4",
"content_id": "f09b69bbcb5823c520ad5cc2489f6dd5295189ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16213,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 593,
"path": "/parse.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\"\nIncludes Functions For:\n\n\t::Creating dataframe (pandas) stim for hddm.model.gen_stats()\n\n\t::Doing heavy parsing and reformatting\n\t\t**Creates extentions of stats dataframe including columns for:\n\t\t\t\t*parameter name\n\t\t\t\t*subj id\n\t\t\t\t*cue\n\t\t\t\t*cue\n\t\t\t\t*stim\n\t\t\t\t*etc...\n\t\t**Writes reformatted dataframe out to .csv in working dir\n\n\t::Transforming stats output for more convenient access to subj parameters by cue:\n\t\t**Flexible to take AllPriors or HNL coded data\n\t\t**Also able to accomodate different model configurations (i.e. bias_hyp=vz)\n\t\t\t\t*set up for z, v, v+z, and vz\n\t\t**Columns for each cue (e.g. a90H_face, b70H_face, ...e90F_house)\n\t\t**Column indexing parameter name (e.g. 'v', 'a', 'st', etc..)\n\t\t**Column for subj_id\n\n\t::Creating hierarchical dictionary from condsdf:\n\t\t**{subj_x{cue_y{param:param_value}}}\n\t\t**can be sampled from to generate data\n\t\t for each subject/cue using\n\t\t sim_subs() function:\n\t\t \t\tuses hddm.generate.gen_rand_data()\n\t\t\t\tto create a full dataset containing\n\t\t\t\tntrials per sub, per cue.\n\n\t::Doing basic data aggregation for empirical and simulated data\n\t \t**including average over subs RT or accuracy for each cue\n\t\t**calculate SE for RT or acc. for each cue\n\n\nMain Functions:\n\n\t1. parse_stats(model)\n\t\t\t*does all necessary formatting\n\t\t\t in order to plot emp v. sim data\n\n\"\"\"\n\nfrom __future__ import division\nimport hddm\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef stats_df(model, save=False):\n\t\"\"\"\n\tRETURNS: 1\n\t\t*model_stats (pandas DataFrame):\tsame as hddm.HDDM.gen_stats() \twith\n\t\t\t\t\t\t\t\t\t\t\tcolumn added for parameter names\n\t\t\t\t\t\t\t\t\t\t\t(usually call this \"fulldf\")\n\t\"\"\"\n\tif not hasattr(model, 'columns'):\n\t\tmodel_stats=model.gen_stats()\n\t\tmodel_stats['param']=model_stats.index\n\telse:\n\t\tmodel_stats=model\n\n\tslist=list()\n\tfor i in model_stats['param']:\n\t\tx=i.split('.')\n\t\tif x[-1].isdigit():\n\t\t\tsint=int(x[-1])\n\t\t\tslist.append(sint)\n\t\telse: slist.append(\"GRP\")\n\tmodel_stats['sub']=slist\n\n\tif save:\n\t\tmodel_stats.to_csv('fulldf.csv', index=False)\n\n\treturn model_stats\n\ndef parse_stats(minput, varlvl='grp', input_isdf=False, sc=None):\n\t\"\"\"\n\tArguments:\n\n\t\tminput (HDDM model \t\t\t\t\t\t(1)\thddm model complete with MCMC\n\t\tOR pd.DataFrame):\t\t\t\t\t\t\ttraces, stats, etc...\n\t\t\t\t\t\t\t\t\t\t(2) pandas dataframe of hddm.gen_stats()\n\n\tRETURNS 1: parsed_list=[subdf, condsdf, pdict]\n\n\t\t*subdf (pandas DataFrame): \t \t\t\tdataframe containing separate columns\n\t\t\t\t\t\t\t\t\t\t\t\tfor cue, stim, cue+stim, params, etc...\n\t\t\t\t\t\t\t\t\t\t\t\t(is written to a csv file in wd)\n\n\t\t*condsdf (pandas DataFrame):\t\t\tone column for each experimental cue\n\t\t\t\t\t\t\t\t\t\t\t\tcolumns for sub_id and params as well\n\t\t\t\t\t\t\t\t\t\t\t\t(is used to make pdict (which is used for simulating)\n\n\t\t*pdict (dict):\t\t\t\t\t\t\tdictionary created from condsdf, used\n\t\t\t\t\t\t\t\t\t\t\t\tfor simulating data for each sub/cue\n\t\t\t\t\t\t\t\t\t\t\t\tusing hddm.generate.gen_rand_data()\n\t\"\"\"\n\tgrp_dict=None\n\n\tif input_isdf:\n\t\tfulldf=minput\n\n\t\tslist=list()\n\t\tfor i in fulldf['param']:\n\t\t\tx=i.split('.')\n\t\t\tif x[-1].isdigit():\n\t\t\t\tsint=int(x[-1])\n\t\t\t\tslist.append(sint)\n\t\t\telse: slist.append(\"GRP\")\n\t\tfulldf['sub']=slist\n\n\t\tfulldf.to_csv(\"fulldf.csv\")\n\n\telse:\n\t\tfulldf=stats_df(model=minput)\n\n\tsubdf=get_subdf(fulldf=fulldf)\n\n\tif hasattr(minput, 'split_param'):\n\t\tsc=minput.split_param\n\telse:\n\t\tsc=sc\n\n\tgrpdf=get_grpdf(fulldf=fulldf)\n\n\tif varlvl=='grp':\n\t\tintervar=['sv', 'sz', 'st']\n\t\tgrp_dict=dict()\n\t\tfor i in fulldf['param']:\n\t\t\tif i in intervar:\n\t\t\t\tgrp_dict[i]=fulldf.ix[(fulldf['param']==i), 'mean'].values[0]\n\n\tif len(subdf.speed.unique())>1:\n\t\tpdict=[]; splist=['fast', 'slow']; cdf_list=[]\n\t\tfor spd in splist:\n\n\t\t\tsubdf_spd=subdf[subdf['speed'].isin([spd, 'constant'])]\n\t\t\tsubdf_spd.index=range(len(subdf_spd))\n\t\t\tcondsdf_spd=simform(subdf=subdf_spd, sc=sc)\n\t\t\tcondsdf_spd.index=condsdf_spd['param']\n\n\t\t\tpdict_spd=create_pdict(condsdf_spd, grp_dict)\n\n\t\t\tcondsdf_spd['speed']=[spd]*len(condsdf_spd)\n\t\t\tsubdf_spd.to_csv(\"subdf_\"+spd+\".csv\", index=False)\n\t\t\tcondsdf_spd.to_csv(\"condsdf_\"+spd+\".csv\", index=False)\n\n\t\t\tcdf_list.append(condsdf_spd)\n\n\t\t\tpdict.append(pdict_spd)\n\n\t\tcondsdf=cdf_list[0].append(cdf_list[1])\n\t\tcondsdf.to_csv(\"condsdf.csv\", index=False)\n\n\telse:\n\t\tcondsdf=simform(subdf=subdf, sc=sc)\n\t\tcondsdf.index=condsdf['param']\n\t\tpdict=create_pdict(condsdf=condsdf, grp_dict=grp_dict)\n\t\tcondsdf.to_csv(\"condsdf.csv\")\n\n\tparsed_list=[subdf, condsdf, pdict]\n\n\treturn parsed_list\n\n\ndef get_subdf(fulldf):\n\t\"\"\"\n\tArguments:\n\n\t\tfulldf (pd.DataFrame):\t\t\tpandas dataframe of hddm.gen_stats() output\n\t\t\t\t\t\t\t\t\t\t(fulldf as in full set of individual and group stats)\n\n\tRETURNS: 1\n\n\t\t*subdf (pandas DataFrame): \t \tdataframe containing separate columns\n\t\t\t\t\t\t\t\t\t\tfor cue, stim, cue+stim, params, etc...\n\t\t\t\t\t\t\t\t\t\t(is written to a csv file in wd)\n\t\"\"\"\n\n\tsubdf=fulldf.ix[(fulldf['sub']!='GRP'), ['sub', 'param', 'mean']]\n\tsubdf.index=range(len(subdf))\n\n\t#Make column for parameter\n\tplist=list()\n\tfor i in subdf.param:\n\t\tp=i.split('_')[0]\n\t\tplist.append(p)\n\tsubdf['parameter']=plist\n\n\tsubdf=txtparse(subdf, 'sub')\n\tsubdf.index=range(len(subdf))\n\n\treturn subdf\n\ndef get_grpdf(fulldf):\n\t\"\"\"\n\tArguments:\n\n\t\tfulldf (pd.DataFrame):\t\t\tpandas dataframe of hddm.gen_stats() output\n\t\t\t\t\t\t\t\t\t\t(fulldf as in full set of individual and group stats)\n\n\tRETURNS: 1\n\n\t\tgrpdf (pandas DataFrame): \t \tdataframe containing separate columns\n\t\t\t\t\t\t\t\t\t\tfor cue, stim, cue+stim, params, etc...\n\t\t\t\t\t\t\t\t\t\t(is written to a csv file in wd)\n\t\"\"\"\n\n\tgrpdf=fulldf.ix[(fulldf['sub']=='GRP'), ['param', 'mean']]\n\t#Make column for parameter\n\tplist=list()\n\tfor i in grpdf.param:\n\t\tif '.' in i:\n\t\t\tp=i.split('(')[0]\n\t\telse: p=i\n\t\tplist.append(p)\n\tgrpdf['parameter']=plist\n\n\tgrpdf=txtparse(grpdf, 'group')\n\tgrpdf.index=range(len(grpdf))\n\n\treturn grpdf\n\ndef simform(subdf, sc=None):\n\t\"\"\"\n\tRETURNS: 1\n\n\t\t*condsdf (pandas DataFrame):\tone column for each experimental cue\n\t\t\t\t\t\t\t\t\t\tcolumns for sub_id and params as well\n\t\t\t\t\t\t\t\t\t\t(is used to make pdict (which is used for simulating)\n\n\t\"\"\"\n\n\tgroupdf=False\n\n\tnparams=len(subdf.parameter.unique())\n\n\tnsubs=len(subdf['sub'].unique())\n\tnrows=nsubs*nparams\n\n\tif nrows==nparams:\n\t\tgroupdf=True\n\n\tif len(subdf.cue.unique())<5:\n\t\tcondsdf=pd.DataFrame(np.zeros(nrows*6).reshape((nrows, 6)), columns=['a80H_face', 'b50N_face', 'c80F_face',\n\t\t\t'a80H_house', 'b50N_house', 'c80F_house'])\n\telse:\n\t\tcondsdf=pd.DataFrame(np.zeros(nrows*10).reshape((nrows, 10)), columns=['a90H_face', 'b70H_face', 'c50N_face',\n\t\t\t'd70F_face', 'e90F_face', 'a90H_house', 'b70H_house', 'c50N_house', 'd70F_house', 'e90F_house'])\n\n\tcounter=1\n\tfor cond in condsdf.columns:\n\t\tcue_n=cond.split('_')[0]\n\t\timg_n=cond.split('_')[1]\n\n\t\tif counter==1:\n\t\t\tcdf=subdf.ix[subdf['stim'].isin([img_n, 'constant']) & subdf['cue'].isin([cue_n, 'constant']), ['sub', 'parameter', 'mean']]\n\t\t\tcdf.index=range(len(cdf))\n\t\t\tif not groupdf:\n\t\t\t\tcondsdf['sub']=cdf['sub'].values\n\t\t\tcondsdf['param']=cdf['parameter'].values\n\t\telse:\n\t\t\tcdf=subdf.ix[subdf['stim'].isin([img_n, 'constant']) & subdf['cue'].isin([cue_n, 'constant']), ['mean']]\n\t\t\tcdf.index=range(len(cdf))\n\t\tcondsdf[cond]=cdf['mean'].values\n\n\t\tcounter+=1\n\n\tif sc is not None:\n\t\tfor i in condsdf.columns:\n\t\t\tif '_' in i:\n\t\t\t\tisplit=i.split('_')\n\t\t\t\tif 'face' in isplit and sc=='v':\n\t\t\t\t\tcondsdf.ix[(condsdf['param']==sc), i]=abs(condsdf.ix[(condsdf['param']==sc), i])\n\t\t\t\telif 'face' in isplit and sc=='z':\n\t\t\t\t\tcondsdf.ix[(condsdf['param']==sc), i]=1-condsdf.ix[(condsdf['param']==sc), i]\n\n\treturn condsdf\n\n\ndef create_pdict(condsdf, grp_dict=None):\n\t\"\"\"\n\tArguments: condsdf (pandas dataframe)\n\n\tReturns:\n\t\t*pdict (dict):\t\tdict for all subs with parameter names and values\n\t\t\t\t\t\t\testimated for each exp. cue included in the\n\t\t\t\t\t\t\toriginal model.\n\n\t\t \t\t\t\t\tis used to loop through when simulating\n\t\t\t\t\t\t\tdata with hddm.generate.gen_rand_data()\n\n\t\t\t\t\t\t\tstructure:\n\n\t\t\t\t\t\t\t\t{subID{cond{param : param_value}}}\n\t\"\"\"\n\tadd_z=False\n\n\tif 'z' not in condsdf.param.unique():\n\t\tadd_z=True\n\n\tcondsdf.index=condsdf.param\n\n\tpdict=dict()\n\tfor subj, group in condsdf.groupby('sub'):\n\t sdict=dict()\n\t for cond in group:\n\t\t\tif cond == 'sub':\n\t\t\t\tcontinue\n\t\t\telif cond == 'param':\n\t\t\t\tcontinue\n\t\t\tsdict[cond]=dict(group[cond])\n\n\t pdict[subj]=sdict\n\n\tif hasattr(grp_dict, \"keys\"):\n\t\tfor sub in pdict:\n\t\t\tfor cond in pdict[sub]:\n\t\t\t\tpdict[sub][cond]['sv']=grp_dict['sv']\n\t\t\t\tpdict[sub][cond]['st']=grp_dict['st']\n\t\t\t\tpdict[sub][cond]['sz']=grp_dict['sz']\n\t\t\t\tif add_z:\n\t\t\t\t\tpdict[sub][cond]['z']=0.5\n\treturn pdict\n\ndef txtparse(dataframe, lvl):\n\t\"\"\"\n\tParses stats into a dataframe for all subjects or at the group level, depnding on \"lvl\"\n\n\t\"\"\"\n\n\t#make column for cue\n\tcondlist=list()\n\tfor i in dataframe.param:\n\t\tif '(' in i:\n\t\t\tcond_name=i.split('(')[1].split(')')[0]\n\t\telse:\n\t\t\tcond_name='constant'\n\n\t\tcondlist.append(cond_name)\n\n\tdataframe['cue']=condlist\n\n\tallnoise=['68', '69']\n\tallcues=['90H', '70H', 'neutral', '70F', '90F',\n\t\t\t\t'50N','a90H', 'b70H', 'c50N', 'd70F',\n\t\t\t\t'e90F', 'a80H', 'b50N', 'c80F']\n\tallimgs=['face', 'house', 'Face', 'House']\n\tallspeed=['fast', 'slow']\n\n\tcuelist=[]; noiselist=[]; stimlist=[]; speedlist=[]\n\n\tlistd={'cue':[allcues, cuelist], 'noise':[allnoise, noiselist], 'stim':[allimgs, stimlist], 'speed':[allspeed, speedlist]}\n\n\tfor i in condlist:\n\t\ti=str(i)\n\t\tfor k in listd.keys():\n\t\t\tkval=[kval for kval in listd[k][0] if kval in i.split('.') or kval==i]\n\n\t\t\tif kval:\n\t\t\t\tlistd[k][1].append(kval[0])\n\t\t\telse:\n\t\t\t\tlistd[k][1].append('constant')\n\n\tdataframe['stim']=listd['stim'][1]\n\tdataframe['cue']=listd['cue'][1]\n\tdataframe['noise']=listd['noise'][1]\n\tdataframe['speed']=listd['speed'][1]\n\n\tdataframe=change_cue(data=dataframe)\n\n\tif lvl=='sub':\n\t\tdataframe.to_csv(\"subdf.csv\", index=False)\n\telse:\n\t\tdataframe.to_csv(\"grpdf.csv\", index=False)\n\n\treturn dataframe\n\n\ndef change_cue(data):\n\n\tif len(data['cue'].unique())>=5:\n\n\t\tif '50N' in data['cue'].unique() or '50N.face' in data['cue'].unique():\n\t\t\tdata.cue.replace('50N', 'c50N', inplace=True)\n\t\t\tdata.cue.replace('90H', 'a90H', inplace=True)\n\t\t\tdata.cue.replace('70H', 'b70H', inplace=True)\n\t\t\tdata.cue.replace('90F', 'e90F', inplace=True)\n\t\t\tdata.cue.replace('70F', 'd70F', inplace=True)\n\n\t\tif 'neutral' in data['cue'].unique() or 'neutral.face' in data['cue'].unique():\n\t\t\tdata.cue.replace('neutral', 'c50N', inplace=True)\n\t\t\tdata.cue.replace('90H', 'a90H', inplace=True)\n\t\t\tdata.cue.replace('70H', 'b70H', inplace=True)\n\t\t\tdata.cue.replace('90F', 'e90F', inplace=True)\n\t\t\tdata.cue.replace('70F', 'd70F', inplace=True)\n\n\n\t\telif '50/50' in data['cue'].unique() or '50/50.face' in data['cue'].unique():\n\t\t\tdata.cue.replace('50/50', 'c50N', inplace=True)\n\t\t\tdata.cue.replace('90H', 'a90H', inplace=True)\n\t\t\tdata.cue.replace('70H', 'b70H', inplace=True)\n\t\t\tdata.cue.replace('90F', 'e90F', inplace=True)\n\t\t\tdata.cue.replace('70F', 'd70F', inplace=True)\n\n\n\n\treturn data\n\ndef get_empirical_means(data, code_type):\n\t\"\"\"\n\tGets empirical accuracy and rt means from dataframe\n\n\tRETURNS: 4\n\t\t*face_emp_acc (np.array):\tempirical accuracy means for\n\t\t\t\t\t\t\t\t\tface responses across all cues\n\t\t*house_emp_acc (np.array):\tempirical accuracy means for\n\t\t\t\t\t\t\t\t\thouse responses across all cues\n\t\t*face_emp_rts (np.array):\tempirical response time means for\n\t\t\t\t\t\t\t\t\tcorrect face responses across all cues\n\t\t*house_emp_rts (np.array):\tempirical response time means for\n\t\t\t\t\t\t\t\t\tcorrect house responses across cues\n\t\"\"\"\n\n\tdata['rt']=abs(data['rt'])\n\n\tdata=change_cue(data)\n\n\tif 'acc' not in data.columns:\n\t\t#add accuracy column to simdf\n\t\tdata['acc']=data['response'].values\n\t\tdata.ix[(data['stim']=='house') & (data['acc']==0), 'acc']=2\n\t\tdata.ix[(data['stim']=='house') & (data['acc']==1), 'acc']=0\n\t\tdata.ix[(data['stim']=='house') & (data['acc']==2), 'acc']=1\n\n\n\taccdf=data[['subj_idx', 'cue', 'stim', 'acc']]\n\tacc_pivot=pd.pivot_table(accdf, rows='subj_idx', cols=['stim', 'cue'], values='acc', aggfunc=np.average)\n\n\tallcor=data[data['acc'].isin([1])]\n\tcor_pivot=pd.pivot_table(allcor, values='rt', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)\n\n\n\tfor i in acc_pivot.mean(0):\n\t\tif code_type=='HNL':\n\t\t\tface_emp_acc=np.array(acc_pivot.mean(0)[:3].values)\n\t\t\thouse_emp_acc=np.array(acc_pivot.mean(0)[3:].values)\n\t\t\tface_emp_rts=np.array(cor_pivot.mean(0)[:3].values)\n\t\t\thouse_emp_rts=np.array(cor_pivot.mean(0)[3:].values)\n\t\telse:\n\t\t\tface_emp_acc=np.array(acc_pivot.mean(0)[:5].values)\n\t\t\thouse_emp_acc=np.array(acc_pivot.mean(0)[5:].values)\n\t\t\tface_emp_rts=np.array(cor_pivot.mean(0)[:5].values)\n\t\t\thouse_emp_rts=np.array(cor_pivot.mean(0)[5:].values)\n\n\treturn face_emp_acc, house_emp_acc, face_emp_rts, house_emp_rts\n\ndef get_emp_error_rt(data):\n\n\tdata['rt']=abs(data['rt'])\n\tdata=change_cue(data)\n\n\tallerr=data[data['acc'].isin([0])]\n\terr_pivot=pd.pivot_table(allerr, values='rt', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)\n\n\tfor i in err_pivot.mean(0):\n\n\t\tface_err=np.array(err_pivot.mean(0)[:len(data.cue.unique())].values)\n\t\thouse_err=np.array(err_pivot.mean(0)[len(data.cue.unique()):].values)\n\n\treturn face_err, house_err\n\ndef get_theo_error_rt(simdf):\n\n\tallerr=simdf[simdf['acc'].isin([0])]\n\terr_pivot=pd.pivot_table(allerr, values='rt', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)\n\n\tfor i in err_pivot.mean(0):\n\n\t\tfsim_err=np.array(err_pivot.mean(0)[:len(simdf.cue.unique())].values)\n\t\thsim_err=np.array(err_pivot.mean(0)[len(simdf.cue.unique()):].values)\n\n\treturn fsim_err, hsim_err\n\n\ndef get_theo_rt(simdf, code_type):\n\t\"\"\"\n\tCalculates and returns the average RT for each\n\tsimulated cue (averaged over simulated subject means)\n\n\tRETURNS: 2\n\n\t\t*face_theo_rts (numpy array):\tarray of predicted rt means for\n\t\t \t\t\t\t\t\t\t\tcorrect face responses across all\n\t\t\t\t\t\t\t\t\t\tprob. cues\n\n\t\t*house_theo_rts (numpy array):\tarray of predicted rt means for\n\t \t\t\t\t\t\t\t\t\tcorrect house responses across all\n\t\t\t\t\t\t\t\t\t\tprob. cues\n\n\n\t\"\"\"\n\t#GET THEORETICAL RT MEANS\n\tfrom scipy.stats import stats\n\tallcor=simdf[simdf['acc'].isin([1])]\n\tcor_pivot=pd.pivot_table(allcor, values='rt', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)\n\n\tfor i in cor_pivot.mean(0):\n\t\tif code_type=='HNL':\n\t\t\tface_theo_rts=np.array(cor_pivot.mean(0)[:3].values)\n\t\t\thouse_theo_rts=np.array(cor_pivot.mean(0)[3:].values)\n\t\telse:\n\t\t\tface_theo_rts=np.array(cor_pivot.mean(0)[:5].values)\n\t\t\thouse_theo_rts=np.array(cor_pivot.mean(0)[5:].values)\n\n\n\treturn face_theo_rts, house_theo_rts\n\ndef get_theo_acc(simdf, code_type):\n\t\"\"\"\n\tCalculates and returns the average accuracy for each\n\tsimulated condition (averaged over simulated subject means)\n\n\tRETURNS: 2\n\n\t\t*face_theo_acc (numpy array):\tarray of predicted accuracy\n\t\t\t\t\t\t\t\t\t\tmeans for face responses across\n\t\t\t\t\t\t\t\t\t\tall prob. cues\n\n\t\t*house_theo_acc (numpy array): \tarray of predicted accuracy\n\t\t\t\t\t\t\t\t\t\tmeans for house responses across\n\t\t\t\t\t\t\t\t\t\tall prob. cues\n\n\t\"\"\"\n\tfrom scipy.stats import stats\n\n\taccdf=simdf[['subj_idx', 'cue', 'stim', 'acc']]\n\tacc_pivot=pd.pivot_table(accdf, rows='subj_idx', cols=['stim', 'cue'], values='acc', aggfunc=np.average)\n\n\tfor i in acc_pivot.mean(0):\n\t\tif code_type=='HNL':\n\t\t\tface_theo_acc=np.array(acc_pivot.mean(0)[:3].values)\n\t\t\thouse_theo_acc=np.array(acc_pivot.mean(0)[3:].values)\n\t\telse:\n\t\t\tface_theo_acc=np.array(acc_pivot.mean(0)[:5].values)\n\t\t\thouse_theo_acc=np.array(acc_pivot.mean(0)[5:].values)\n\n\treturn face_theo_acc, house_theo_acc\n\ndef get_emp_SE(data, code_type):\n\n\tfrom scipy.stats import stats\n\n\n\tallcor=data[data['acc'].isin([1])]\n\tcor_pivot=pd.pivot_table(allcor, values='rt', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)\n\tacc_pivot=pd.pivot_table(data, values='acc', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)\n\t#Get theoretical RT S.E.M's\n\tsem_rt=[]\n\tfor img, cue in cor_pivot.columns:\n\t\tx=stats.sem(cor_pivot[img][cue])\n\t\tsem_rt.append(x)\n\n\t#Get theoretical ACCURACY S.E.M's\n\tsem_acc=[]\n\tfor img, cue in acc_pivot.columns:\n\t\tx=stats.sem(acc_pivot[img][cue])\n\t\tsem_acc.append(x)\n\n\tif code_type=='HNL':\n\t\tface_emp_acc_SE=sem_acc[:3]\n\t\thouse_emp_acc_SE=sem_acc[3:]\n\t\tface_emp_rts_SE=sem_rt[:3]\n\t\thouse_emp_rts_SE=sem_rt[3:]\n\telse:\n\t\tface_emp_acc_SE=sem_acc[:5]\n\t\thouse_emp_acc_SE=sem_acc[5:]\n\t\tface_emp_rts_SE=sem_rt[:5]\n\t\thouse_emp_rts_SE=sem_rt[5:]\n\n\tsem_list=[face_emp_acc_SE, house_emp_acc_SE, face_emp_rts_SE, house_emp_rts_SE]\n\n\treturn sem_list\n\n\nif __name__ == \"__main__\":\n\tmain()\n"
},
{
"alpha_fraction": 0.6124970316886902,
"alphanum_fraction": 0.6615189909934998,
"avg_line_length": 34.94022750854492,
"blob_id": "8acec81e4ea653f7f4c69cb30b5f6c402a9fa9c9",
"content_id": "b8714ff773c5cf31665d4e0e5b505defd91b309f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37881,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 1054,
"path": "/vis.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import division\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom myhddm import sims, parse\nimport hddm\nimport kabuki\nfrom kabuki.utils import interpolate_trace\nfrom scipy.stats.mstats import mquantiles\nfrom matplotlib import rc\nimport os\nimport seaborn as sns\n\ndef get_nodes(model, nodes, project='img'):\n\n\tif project=='behav':\n\n\t\tif nodes=='z':\n\t\t\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(a90H)', 'z(b70H)', 'z(c50N)', 'z(d70F)', 'z(e90F)']]\n\t\t\tzlist=[z90H, z70H, z50N, z70F, z90F]\n\t\t\treturn zlist\n\n\t\telif nodes=='vf':\n\t\t\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(a90H.face)', 'v(b70H.face)', 'v(c50N.face)', 'v(d70F.face)', 'v(e90F.face)']]\n\t\t\tvflist=[v90Hface, v70Hface, v50Nface, v70Fface, v90Fface]\n\t\t\treturn vflist\n\n\t \telif nodes=='vh':\n\t\t\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(a90H.house)', 'v(b70H.house)', 'v(c50N.house)', 'v(d70F.house)', 'v(e90F.house)']]\n\t\t\tvhlist=[v90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse]\n\t\t\treturn vhlist\n\n\telse:\n\t\tif nodes=='z':\n\t\t\tz80H, z50N, z80F = model.nodes_db.node[['z(a80H)','z(b50N)', 'z(c80F)']]\n\t\t\tzlist=[z80H, z50N, z80F]\n\t\t\treturn zlist\n\n\t\telif nodes=='vf':\n\t\t\tv80Hface, v50Nface, v80Fface=model.nodes_db.node[['v(a80H.face)', 'v(b50N.face)', 'v(c80F.face)']]\n\t\t\tvflist=[v80Hface, v50Nface, v80Fface]\n\t\t\treturn vflist\n\n\t \telif nodes=='vh':\n\t\t\tv80Hhouse, v50Nhouse, v80Fhouse=model.nodes_db.node[['v(a80H.house)', 'v(b50N.house)', 'v(c80F.house)']]\n\t\t\tvhlist=[v80Hhouse, v50Nhouse, v80Fhouse]\n\t\t\treturn vhlist\n\n\ndef plot_posterior_nodes(model, param_nodes, bins=100, lb=None, ub=None):\n\n\tsns.set_style('white')\n\tsns.despine()\n\n\t#title='Generic Title'\n\tif param_nodes=='z':\n\t\tnodes=get_nodes(model, 'z')\n\t\txlabel='Mean Starting-Point' + r'$\\/(\\mu_{z})$'\n\t\tlb=.55\n\t\tub=.75\n\t\txxticks=np.arange(.55, .80, .05)\n\telif param_nodes=='vf':\n\t\tnodes=get_nodes(model, 'vf')\n\t\txlabel='Mean Face Drift-Rate' + r'$\\/(\\mu_{vF})$'\n\t\tlb=.4\n\t\tub=1.2\n\t\txxticks=np.arange(.4, 1.4, .2)\n\telif param_nodes=='vh':\n\t\tnodes=get_nodes(model, 'vh')\n\t\txlabel='Mean House Drift-Rate' + r'$\\/(\\mu_{vH})$'\n\t\tlb=-1.75\n\t\tub=-.7\n\t\txxticks=np.arange(-1.8, -.6, .3)\n\telse:\n\t\tprint \"Must provide argument: 'z', 'vf', or 'vh'\"\n\n\tfig=plt.figure()\n\tfig.subplots_adjust(top=0.95, wspace=0.12, left=0.12, right=0.88, bottom=0.16)\n\tsns.despine()\n\t#fig.suptitle(title, fontsize=20)\n\t#fig.suptitle(title, fontsize=40)\n\n\tif lb is None:\n\t\tlb = min([min(node.trace()[:]) for node in nodes])\n\tif ub is None:\n\t\tub = max([max(node.trace()[:]) for node in nodes])\n\n\tx_data = np.linspace(lb, ub, 600)\n\t#colors=['Green', 'LimeGreen', 'Black', 'Cyan', 'Blue']\n\tcolors=['#e74c3c','#6C7A89', '#4168B7', 'Cyan', 'Blue']\n\tcolor_i=0\n\tfor node in nodes:\n\t\ttrace = node.trace()[:]\n\t\t#hist = interpolate_trace(x_data, trace, range=(trace.min(), trace.max()), bins=bins)\n\t\thist = interpolate_trace(x_data, trace, range=(lb, ub), bins=bins)\n\t\tplt.plot(x_data, hist, label=node.__name__, lw=2., color=colors[color_i])\n\t\tplt.fill_between(x_data, hist, 0, label=node.__name__, color=colors[color_i], alpha=0.5)\n\t\tax=plt.gca()\n\t\tax.set_xlim(lb, ub)\n\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\t\tax.set_xticks(xxticks)\n\t\tax.set_xticklabels(xxticks)\n\t\tsns.despine()\n\t\tax.set_ylabel('Probability Mass', fontsize=22, labelpad=12)\n\t\tax.set_xlabel(xlabel, fontsize=22, labelpad=13)\n\t\tplt.setp(ax.get_xticklabels(), fontsize=16)\n\t\t#plt.locator_params(axis='x', nbins=7)\n\t\tcolor_i+=1\n\t\tsns.despine()\n\t#leg = plt.legend(loc='best', fancybox=True)\n\t#leg.get_frame().set_alpha(0.5)\n\tplt.ylim(ymin=0)\n\tsns.despine()\n\tplt.savefig(str(param_nodes)+'_posterior_nodes.png', dpi=600)\n\t#plt.savefig(str(param_nodes)+'_posterior_nodes.pdf', format='pdf')\n\n\ndef diff_traces(model, output='all', project='behav'):\n\t\"\"\"\n\tchange output to 'neut' if just want difference\n\tbw face and house drift at neutral condition\n\t\"\"\"\n\tif project=='behav':\n\t\tv90Hface, v70Hface, v50Nface, v70Fface, v90Fface=model.nodes_db.node[['v(a90H.face)', 'v(b70H.face)', 'v(c50N.face)', 'v(d70F.face)', 'v(e90F.face)']]\n\t\tv90Hhouse, v70Hhouse, v50Nhouse, v70Fhouse, v90Fhouse=model.nodes_db.node[['v(a90H.house)', 'v(b70H.house)', 'v(c50N.house)', 'v(d70F.house)', 'v(e90F.house)']]\n\t\tz90H, z70H, z50N, z70F, z90F = model.nodes_db.node[['z(a90H)', 'z(b70H)', 'z(c50N)', 'z(d70F)', 'z(e90F)']]\n\n\t\txtrace=abs(v50Nhouse.trace()) - abs(v50Nface.trace())\n\t\tvf1_trace=v90Hface.trace() - v50Nface.trace()\n\t\tvf2_trace=v70Hface.trace() - v50Nface.trace()\n\t\tvf3_trace=v70Fface.trace() - v50Nface.trace()\n\t\tvf4_trace=v90Fface.trace() - v50Nface.trace()\n\t\tvf_list=[vf1_trace, vf2_trace, vf3_trace, vf4_trace]\n\n\t\tvh1_trace=v90Hhouse.trace() - v50Nhouse.trace()\n\t\tvh2_trace=v70Hhouse.trace() - v50Nhouse.trace()\n\t\tvh3_trace=v70Fhouse.trace() - v50Nhouse.trace()\n\t\tvh4_trace=v90Fhouse.trace() - v50Nhouse.trace()\n\t\tvh_list=[vh1_trace, vh2_trace, vh3_trace, vh4_trace]\n\n\t\tz1_trace=z90H.trace() - z50N.trace()\n\t\tz2_trace=z70H.trace() - z50N.trace()\n\t\tz3_trace=z70F.trace() - z50N.trace()\n\t\tz4_trace=z90F.trace() - z50N.trace()\n\t\tz_list=[z1_trace, z2_trace, z3_trace, z4_trace]\n\n\n\telse:\n\t\tz80H, z50N, z80F = model.nodes_db.node[['z(a80H)','z(b50N)', 'z(c80F)']]\n\t\tv80Hhouse, v50Nhouse, v80Fhouse=model.nodes_db.node[['v(a80H.house)', 'v(b50N.house)', 'v(c80F.house)']]\n\t\tv80Hface, v50Nface, v80Fface=model.nodes_db.node[['v(a80H.face)', 'v(b50N.face)', 'v(c80F.face)']]\n\n\t\t#xtrace=abs(v50Nhouse.trace()) - abs(v50Nface.trace())\n\t\tvf1_trace=v50Nface.trace() - v80Hface.trace()\n\t\tvf2_trace=v80Fface.trace() - v50Nface.trace()\n\t\tvf3_trace=v80Fface.trace() - v80Hface.trace()\n\n\t\tvf_list={'NH':vf1_trace, 'FN':vf2_trace, 'FH':vf3_trace}\n\n\t\tvh1_trace=abs(v50Nhouse.trace()) - abs(v80Fhouse.trace())\n\t\tvh2_trace=abs(v80Hhouse.trace()) - abs(v50Nhouse.trace())\n\t\tvh3_trace=abs(v80Hhouse.trace()) - abs(v80Fhouse.trace())\n\t\tvh_list={'NF':vh1_trace, 'HN':vh2_trace, 'HF':vh3_trace}\n\n\t\tz1_trace=z50N.trace() - z80H.trace()\n\t\tz2_trace=z80F.trace() - z50N.trace()\n\t\tz3_trace=z80F.trace() - z80H.trace()\n\t\tz_list={'NH':z1_trace, 'FN':z2_trace, 'FH':z3_trace}\n\n\tif output=='all':\n\t\treturn vf_list, vh_list, z_list\n\telse:\n\t\treturn xtrace\n\ndef plot_neutral_traces(model):\n\n\tsns.set_style('white')\n\n\tc=diff_traces(model, output='neut')\n\n\tsave_fig=True\n\tx_axis_list=r'$\\mu_{vH}\\/-\\/\\mu_{vF}$'\n\n\n\tfig=plt.figure()\n\tfig.subplots_adjust(top=0.95, wspace=0.12, left=0.12, right=0.88, bottom=0.16)\n\tsns.despine()\n\n\tax=fig.add_subplot(111)\n\tsns.despine()\n\n\tax.hist(c, bins=20, facecolor='DarkBlue', alpha=0.4)\n\tsns.despine()\n\tc_quantiles=mquantiles(c, prob=[0.025, 0.975])\n\tax.axvline(c_quantiles[0], lw=2.0, ls='--', color='Black', alpha=0.4)\n\tax.axvline(c_quantiles[1], lw=2.0, ls='--', color='Black', alpha=0.4)\n\tc_mean=str(c.mean())[:4]\n\tc_lower=str(c_quantiles[0])[:5]\n\tc_upper=str(c_quantiles[1])[:4]\n\n\tax.text(0.5, .92, r'$\\mu_\\Delta=%s;\\/\\/95%sCI[%s, %s]$' % (c_mean, \"\\%\", c_lower, c_upper), fontsize=22, va='center', ha='center', transform=ax.transAxes)\n\n\tpos_float=(c>0).mean()*100\n\tneg_float=(c<0).mean()*100\n\tpos_str=str(pos_float)\n\tneg_str=str(neg_float)\n\tpos=pos_str[:5]\n\tneg=neg_str[:4]\n\n\tax.text(0.5, .82, r'$%s%s\\/<\\/0\\/<\\/%s%s$' % (neg, \"\\%\", pos, \"\\%\"), fontsize=22, va='center', ha='center', transform=ax.transAxes)\n\tplt.setp(ax.get_xticklabels(), fontsize=18)\n\tplt.setp(ax.get_yticklabels(), visible=False)\n\tplt.xlim(-0.1, 0.4)\n\tplt.ylim(0, 3000)\n\n\tplt.locator_params(axis='x', nbins=6)\n\n\tax.set_xlabel(r'$\\mu_{vH}\\/-\\/\\mu_{vF}$', fontsize=24, labelpad=13)\n\tax.set_ylabel(\"Probability Mass\", fontsize=22, labelpad=12)\n\n\tplt.savefig(\"face_house_neutral_drift.png\", format=\"png\", dpi=600)\n\t#plt.savefig(\"face_house_neutral_drift.jpeg\", format=\"jpeg\", dpi=900)\n\n\n\ndef plot_diff_traces(model):\n\n\tvf_list, vh_list, z_list=diff_traces(model)\n\n\tparam_list=[vf_list, vh_list, z_list]\n\n\tx_axis_list=[r'$\\mu_{90H}\\/-\\/\\mu_{50N}$', r'$\\mu_{70H}\\/-\\/\\mu_{50N}$', r'$\\mu_{70F}\\/-\\/\\mu_{50N}$', r'$\\mu_{90F}\\/-\\/\\mu_{50N}$']\n\n\tfig=plt.figure(figsize=(38, 5))\n\tfig.subplots_adjust(top=0.95, wspace=0.15, left=0.02, right=0.98, bottom=0.20)\n\t#fig.suptitle('Mean Face Drift-Rate' + r'$\\/(\\mu_{vF})$', fontsize=35)\n\ti=1\n\tfor c in vf_list:\n\t\tax=fig.add_subplot(1, 4, i)\n\t\t#ax.hist(c, bins=15, facecolor='SlateBlue', alpha=0.8)\n\t\tax.hist(c, bins=15, facecolor='Blue', alpha=0.4)\n\t\tc_quantiles=mquantiles(c, prob=[0.025, 0.975])\n\t\tax.axvline(c_quantiles[0], lw=3.0, ls='--', color='DarkGray', alpha=0.5)\n\t\tax.axvline(c_quantiles[1], lw=3.0, ls='--', color='DarkGray', alpha=0.5)\n\n\t\tc_mean=str(c.mean())[:5]\n\t\tc_lower=str(c_quantiles[0])[:5]\n\t\tc_upper=str(c_quantiles[1])[:5]\n\n\t\tax.text(0.5, .92, r'$\\mu_\\Delta=%s;\\/95%sCI[%s, %s]}$' % (c_mean, \"\\%\", c_lower, c_upper), fontsize=16, va='center', ha='center', transform=ax.transAxes)\n\n\t\tpos_float=(c>0).mean()*100\n\t\tneg_float=(c<0).mean()*100\n\t\tpos_str=str(pos_float)[:4]\n\t\tneg_str=str(neg_float)[:4]\n\t\tax.text(0.5, .82, r'$%s%s\\/<\\/0\\/<\\/%s%s$' % (neg_str, \"\\%\", pos_str, \"\\%\"), fontsize=17, va='center', ha='center', transform=ax.transAxes)\n\n\t\tax.set_xlabel(x_axis_list[i-1], fontsize=25, labelpad=12)\n\n\t\tplt.ylim(0, 3000)\n\n\t\tplt.locator_params(axis='x', nbins=6)\n\t\tplt.setp(ax.get_xticklabels(), fontsize=16)\n\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\t\t#if ax.is_first_col():\n\t\t#\tax.set_ylabel(\"Probability Mass\", fontsize=30, labelpad=10)\n\t\ti+=1\n\n\tplt.savefig(\"face_drift_comparisons.png\", format=\"png\", dpi=600)\n\t#plt.savefig(\"face_drift_comparisons.jpeg\", format=\"jpeg\", dpi=900)\n\n\n\tfig=plt.figure(figsize=(38, 5))\n\tfig.subplots_adjust(top=0.95, wspace=0.15, left=0.02, right=0.98, bottom=0.20)\n\t#fig.suptitle('Mean House Drift-Rate' + r'$\\/(\\mu_{vH})$', fontsize=35)\n\ti=1\n\tfor c in vh_list:\n\t\tax=fig.add_subplot(1, 4, i)\n\t\t#ax.hist(c, bins=15, facecolor='SlateBlue', alpha=0.8)\n\t\tax.hist(c, bins=15, facecolor='Red', alpha=0.4)\n\t\tc_quantiles=mquantiles(c, prob=[0.025, 0.975])\n\t\tax.axvline(c_quantiles[0], lw=3.0, ls='--', color='DarkGray', alpha=0.5)\n\t\tax.axvline(c_quantiles[1], lw=3.0, ls='--', color='DarkGray', alpha=0.5)\n\n\t\tc_mean=str(c.mean())[:5]\n\t\tc_lower=str(c_quantiles[0])[:5]\n\t\tc_upper=str(c_quantiles[1])[:5]\n\n\t\tax.text(0.5, .92, r'$\\mu_\\Delta = %s;\\/95%s CI[%s, %s]}$' % (c_mean, \"\\%\", c_lower, c_upper), fontsize=16, va='center', ha='center', transform=ax.transAxes)\n\n\t\tpos_float=(c>0).mean()*100\n\t\tneg_float=(c<0).mean()*100\n\t\tpos_str=str(pos_float)[:4]\n\t\tneg_str=str(neg_float)[:4]\n\t\tax.text(0.5, .82, r'$%s%s\\/<\\/0\\/<\\/%s%s$' % (neg_str, \"\\%\", pos_str, \"\\%\"), fontsize=17, va='center', ha='center', transform=ax.transAxes)\n\n\t\tax.set_xlabel(x_axis_list[i-1], fontsize=25, labelpad=10)\n\n\t\tplt.ylim(0, 3000)\n\n\t\tplt.locator_params(axis='x', nbins=6)\n\t\tplt.setp(ax.get_xticklabels(), fontsize=16)\n\n\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\t\t#if ax.is_first_col():\n\t\t#\tax.set_ylabel(\"Probability Mass\", fontsize=30, labelpad=10)\n\n\t\ti+=1\n\tplt.savefig(\"house_drift_comparisons.png\", format=\"png\", dpi=600)\n\t#plt.savefig(\"house_drift_comparisons.pdf\", format=\"pdf\")\n\t#plt.savefig(\"house_drift_comparisons.jpeg\", format=\"jpeg\", dpi=900)\n\n\n\tfig=plt.figure(figsize=(38, 5))\n\tfig.subplots_adjust(top=0.95, wspace=0.15, left=0.02, right=0.98, bottom=0.20)\n\t#fig.suptitle('Mean Starting-Point' + r'$\\/(\\mu_{z})$', fontsize=35)\n\ti=1\n\tfor c in z_list:\n\t\tax=fig.add_subplot(1, 4, i)\n\t\tax.hist(c, bins=15, facecolor='SlateBlue', alpha=0.4)\n\n\t\tc_quantiles=mquantiles(c, prob=[0.025, 0.975])\n\t\tax.axvline(c_quantiles[0], lw=3.0, ls='--', color='DarkGray', alpha=0.5)\n\t\tax.axvline(c_quantiles[1], lw=3.0, ls='--', color='DarkGray', alpha=0.5)\n\n\t\tc_mean=str(c.mean())[:5]\n\t\tc_lower=str(c_quantiles[0])[:5]\n\t\tc_upper=str(c_quantiles[1])[:5]\n\n\t\tax.text(0.5, .92, r'$\\mu_\\Delta = %s;\\/95%s CI[%s, %s]}$' % (c_mean, \"\\%\", c_lower, c_upper), fontsize=16, va='center', ha='center', transform=ax.transAxes)\n\n\t\tpos_float=(c>0).mean()*100\n\t\tneg_float=(c<0).mean()*100\n\t\tpos_str=str(pos_float)[:4]\n\t\tneg_str=str(neg_float)[:4]\n\t\tax.text(0.5, .82, r'$%s%s\\/<\\/0\\/<\\/%s%s$' % (neg_str, \"\\%\", pos_str, \"\\%\"), fontsize=17, va='center', ha='center', transform=ax.transAxes)\n\n\t\tax.set_xlabel(x_axis_list[i-1], fontsize=25, labelpad=12)\n\n\t\tplt.ylim(0, 1000)\n\n\t\tplt.locator_params(axis='x', nbins=6)\n\t\tplt.setp(ax.get_xticklabels(), fontsize=16)\n\n\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\n\t\t#if ax.is_first_col():\n\t\t#\tax.set_ylabel(\"Probability Mass\", fontsize=30, labelpad=10)\n\n\t\ti+=1\n\n\tplt.savefig(\"starting_point_comparisons.png\", format=\"png\", dpi=600)\n\t#plt.savefig(\"starting_point_comparisons.pdf\", format=\"pdf\")\n\t#plt.savefig(\"starting_point_comparisons.jpeg\", format=\"jpeg\", dpi=900)\n\t#plt.savefig(\"starting_point_comparisons.tiff\", format=\"tiff\", dpi=900)\n\ndef _plot_posterior_quantiles_node(node, axis, quantiles=(.1, .3, .5, .7, .9),\n samples=100, alpha=.5, hexbin=False,\n value_range=(0, 6),\n data_plot_kwargs=None, predictive_plot_kwargs=None):\n\t\"\"\"Plot posterior quantiles for a single node.\n\n\t:Arguments:\n\n\tnode : pymc.Node\n\t Must be observable.\n\n\taxis : matplotlib.axis handle\n\t Axis to plot into.\n\n\t:Optional:\n\n\tvalue_range : numpy.ndarray\n\t Range over which to evaluate the CDF.\n\n\tsamples : int (default=10)\n\t Number of posterior samples to use.\n\n\talpha : float (default=.75)\n\t Alpha (transparency) of posterior quantiles.\n\n\thexbin : bool (default=False)\n\t Whether to plot posterior quantile density\n\t using hexbin.\n\n\tdata_plot_kwargs : dict (default=None)\n\t Forwarded to data plotting function call.\n\n\tpredictive_plot_kwargs : dict (default=None)\n\t Forwareded to predictive plotting function call.\n\n\t\"\"\"\n\tquantiles = np.asarray(quantiles)\n\taxis.set_xlim(value_range)\n\taxis.set_ylim((0, 1))\n\n\tsq_lower = np.empty((len(quantiles), samples))\n\tsq_upper = sq_lower.copy()\n\tsp_upper = np.empty(samples)\n\tfor i_sample in range(samples):\n\t kabuki.analyze._parents_to_random_posterior_sample(node)\n\t sample_values = node.random()\n\t sq_lower[:, i_sample], sq_upper[:, i_sample], sp_upper[i_sample] = data_quantiles(sample_values)\n\n\ty_lower = np.dot(np.atleast_2d(quantiles).T, np.atleast_2d(1 - sp_upper))\n\ty_upper = np.dot(np.atleast_2d(quantiles).T, np.atleast_2d(sp_upper))\n\tif hexbin:\n\t if predictive_plot_kwargs is None:\n\t predictive_plot_kwargs = {'gridsize': 85, 'bins': 'log', 'extent': (value_range[0], value_range[1], 0, 1)}\n\t x = np.concatenate((sq_lower, sq_upper))\n\t y = np.concatenate((y_lower, y_upper))\n\t axis.hexbin(x.flatten(), y.flatten(), label='post pred lb', **predictive_plot_kwargs)\n\telse:\n\t if predictive_plot_kwargs is None:\n\t predictive_plot_kwargs = {'alpha': .3}\n\t axis.plot(sq_lower, y_lower, 'o', label='post pred lb', color='LimeGreen', markersize=10, markeredgecolor=None, markeredgewidth=0.0, **predictive_plot_kwargs)\n\t axis.plot(sq_upper, y_upper, 'o', label='post pred ub', color='RoyalBlue', markersize=10, markeredgecolor=None, markeredgewidth=0.0, **predictive_plot_kwargs)\n\n\t# Plot data\n\tdata = node.value\n\tcolor = 'w' if hexbin else 'k'\n\tif data_plot_kwargs is None:\n\t data_plot_kwargs = {'color': color, 'lw': 2., 'marker': 'o', 'markersize': 10}\n\n\tif len(data) != 0:\n\t q_lower, q_upper, p_upper = data_quantiles(data)\n\n\t axis.plot(q_lower, quantiles*(1-p_upper), **data_plot_kwargs)\n\t axis.plot(q_upper, quantiles*p_upper, **data_plot_kwargs)\n\n\t#axis.set_xlabel('RT')\n\t#axis.set_ylabel('Prob respond')\n\taxis.set_ylim(bottom=0) # Likelihood and histogram can only be positive\n\n\n\ndef _plot_posterior_pdf_node(bottom_node, axis, value_range=None, samples=10, bins=100):\n\t\"\"\"Calculate posterior predictive for a certain bottom node.\n\n\t:Arguments:\n\t\tbottom_node : pymc.stochastic\n\t\t\tBottom node to compute posterior over.\n\n\t\taxis : matplotlib.axis\n\t\t\tAxis to plot into.\n\n\t\tvalue_range : numpy.ndarray\n\t\t\tRange over which to evaluate the likelihood.\n\n\t:Optional:\n\t\tsamples : int (default=10)\n\t\t\tNumber of posterior samples to use.\n\n\t\tbins : int (default=100)\n\t\t\tNumber of bins to compute histogram over.\n\n\t\"\"\"\n\n\tif value_range is None:\n\t\t# Infer from data by finding the min and max from the nodes\n\t\traise NotImplementedError, \"value_range keyword argument must be supplied.\"\n\n\tlike = np.empty((samples, len(value_range)), dtype=np.float32)\n\tfor sample in range(samples):\n\t\t_parents_to_random_posterior_sample(bottom_node)\n\t\t# Generate likelihood for parents parameters\n\t\tlike[sample,:] = bottom_node.pdf(value_range)\n\n\ty = like.mean(axis=0)\n\ttry:\n\t\ty_std = like.std(axis=0)\n\texcept FloatingPointError:\n\t\tprint \"WARNING! %s threw FloatingPointError over std computation. Setting to 0 and continuing.\" % bottom_node.__name__\n\t\ty_std = np.zeros_like(y)\n\n\t# Plot pp\n\t#axis.plot(value_range, y, label='post pred', color='b')\n\t#axis.fill_between(value_range, y-y_std, y+y_std, color='b', alpha=.6)\n\n\t# Plot data\n\tif len(bottom_node.value) != 0:\n\t\taxis.hist(bottom_node.value.values, normed=True, color='Green',\n\t\t\t\t range=(value_range[0], value_range[-1]), label='data',\n\t\t\t\t bins=bins, histtype='stepfilled', lw=2., alpha=0.4)\n\n\t# Plot pp\n\taxis.plot(value_range, y, label='post pred', color='Blue')\n\taxis.fill_between(value_range, y-y_std, y+y_std, color='Blue', alpha=.6)\n\taxis.set_ylim(bottom=0) # Likelihood and histogram can only be positive\n\taxis.grid=True\n\n\n\n\ndef plot_avgm_predictive(model, plot_func=None, required_method='pdf', columns=None, save=True, path=None, figsize=(12,11), format='jpeg', **kwargs):\n\n\tfrom hddm import utils\n\n\tif 'value_range' is None:\n\t\trt = np.abs(model.data['rt']); kwargs['value_range'] = (np.min(rt.min()-.2, 0), rt.max())\n\n\tif plot_func is None:\n\t\tpltmethod='_quant'\n\t\tplot_func = utils._plot_posterior_quantiles_node\n\telse:\n\t\tpltmethod='_quant'\n\tobserveds = model.get_observeds()\n\tfig = plt.figure(figsize=figsize)\n\ti=1\n\tfor tag, nodes in observeds.groupby('tag'):\n\t\t#fig = plt.figure(figsize=figsize)\n\t\t#fig.suptitle(tag, fontsize=18)\n\t\tfig.subplots_adjust(top=0.95, hspace=0, wspace=0)\n\t\tax = fig.add_subplot(5, 2, i)\n\t\t#fig.text(0.04, 0.5, \"Probability of Response\", va=\"center\", rotation=\"vertical\", fontsize=30, family='sans-serif')\n\t\tax.text(0.9, 0.7, tag[0][-3:], fontsize=32)\n\t\t# Plot individual subjects (if present)\n\t\tfor node_name, bottom_node in nodes.iterrows():\n\t\t\tif not hasattr(bottom_node['node'], required_method):\n\t\t\t\tcontinue # skip nodes that do not define the required_method\n\n\t\t\tprint str(node_name)\n\t\t\tprint str(tag)\n\t\t\tplot_func(bottom_node['node'], ax, **kwargs)\n\t\t\ti=i+1\n\n\tfor ax in fig.axes:\n\t\tax.set_xlim(0.5, 5.5)\n\t\tax.set_xticks([1, 2, 3, 4, 5])\n\t\tif ax.is_last_row():\n\t\t\tax.set_xlabel(\"Response Time (s)\", fontsize=30)\n\t\t\tfor tick in ax.xaxis.get_major_ticks():\n\t\t\t tick.label.set_fontsize(25)\n\n\t\t\tax.set_xticklabels([1, 2, 3, 4, 5])\n\n\t\telse:\n\t\t\tplt.setp(ax.get_xticklabels(), visible=False)\n\n\t\tif ax.is_first_row():\n\t\t\tif ax.is_first_col():\n\t\t\t\tax.set_title(\"Face\", fontsize=40)\n\t\t\telse:\n\t\t\t\tax.set_title(\"House\", fontsize=40)\n\n\t\tif ax.is_first_col():\n\t\t\t#ax.set_ylabel(\"Probability\", fontsize=16)\n\t\t\tax.set_yticks([.1, .3, .5, .7, .9])\n\t\t\tax.set_yticklabels([.1, .3, .5, .7, .9])\n\t\t\tfor tick in ax.yaxis.get_major_ticks():\n\t\t\t tick.label.set_fontsize(23)\n\t\telse:\n\t\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\n\tfig.text(0.01, 0.5, \"Probability of Response\", va=\"center\", rotation=\"vertical\", fontsize=30, family='sans-serif')\n\n\tif save:\n\t\tprint \"Double Okay\"\n\t\tfname = \"AvgmQP\"\n\t\tif path is None:\n\t\t\tpath = '.'\n\t\tif isinstance(format, str):\n\t\t\tformat = [format]\n\t\t[fig.savefig('%s.%s' % (os.path.join(path, fname), x), format=x, dpi=300) for x in format]\n\n\ndef plot_posterior_predictive(model, plot_func=None, required_method='pdf', columns=None, save=False, path=None,\n\t\t\t\t\t\t\t figsize=(8,6), format='png', **kwargs):\n\n\tif plot_func is None:\n\t\tpltmethod='_pdf'\n\t\tplot_func = _plot_posterior_pdf_node\n\telse:\n\t\tpltmethod='_quant'\n\tobserveds = model.get_observeds()\n\n\tif columns is None:\n\t\t# If there are less than 3 items to plot per figure,\n\t\t# only use as many columns as there are items.\n\t\tmax_items = max([len(i[1]) for i in\n\t\t\t\t\t\t observeds.groupby('tag').groups.iteritems()])\n\t\tcolumns = min(3, max_items)\n\n\t# Plot different conditions (new figure for each)\n\tfor tag, nodes in observeds.groupby('tag'):\n\t\tfig = plt.figure(figsize=figsize)\n\t\tfig.suptitle(tag, fontsize=18)\n\t\tfig.subplots_adjust(top=0.95, hspace=0.10, wspace=0.10)\n\n\t\t# Plot individual subjects (if present)\n\t\tfor subj_i, (node_name, bottom_node) in enumerate(nodes.iterrows()):\n\t\t\tif not hasattr(bottom_node['node'], required_method):\n\t\t\t\tcontinue # skip nodes that do not define the required_method\n\n\t\t\tax = fig.add_subplot(np.ceil(len(nodes)/columns), columns, subj_i+1)\n\n\t\t\tif 'subj_idx' in bottom_node:\n\t\t\t\t#ax.set_title(str(bottom_node['subj_idx']))\n\t\t\t\tax.text(0.05, 0.95, (str(bottom_node['subj_idx'])),\n\t\t\t\t\tva='top', transform=ax.transAxes,\n\t\t\t\t\tfontsize=16, fontweight='bold')\n\n\t\t\tplot_func(bottom_node['node'], ax, **kwargs)\n\n\t\tfor ax in fig.axes:\n\t\t\tif ax.is_last_row():\n\t\t\t\tax.set_xlabel(\"Response Time\", fontsize=16)\n\t\t\t\tfor tick in ax.xaxis.get_major_ticks():\n\t\t\t\t tick.label.set_fontsize(14)\n\t\t\telse:\n\t\t\t\tplt.setp(ax.get_xticklabels(), visible=False)\n\t\t\tif ax.is_first_col():\n\t\t\t\tax.set_ylabel(\"Probability\", fontsize=16)\n\t\t\t\tfor tick in ax.yaxis.get_major_ticks():\n\t\t\t\t tick.label.set_fontsize(14)\n\t\t\telse:\n\t\t\t\tplt.setp(ax.get_yticklabels(), visible=False)\n\n\t\t# Save figure if necessary\n\n\t\tif save:\n\t\t\tfname = str(tag) + pltmethod\n\t\t\tif path is None:\n\t\t\t\tpath = '.'\n\t\t\tif isinstance(format, str):\n\t\t\t\tformat = [format]\n\t\t\t[fig.savefig('%s.%s' % (os.path.join(path, fname), x), format=x) for x in format]\n\n\n\ndef sub_dists(data, nbins=40, save=True):\n\n\tfor i, rest in data.groupby('subj_idx'):\n\t\tfdata=rest[rest['stim']=='face']\n\t\thdata=rest[rest['stim']=='house']\n\n\t\tface=hddm.utils.flip_errors(fdata)\n\t\thouse=hddm.utils.flip_errors(hdata)\n\n\t\tface_rts=face.rt\n\t\thouse_rts=house.rt\n\n\t\tsubj_fig=plt.figure(figsize=(14, 8), dpi=150)\n\t\taxF = subj_fig.add_subplot(211, xlabel='RT', ylabel='count', title='FACE RT distributions')\n\t\taxH = subj_fig.add_subplot(212, xlabel='RT', ylabel='count', title='HOUSE RT distributions')\n\n\t\taxF.hist(face_rts, color='DodgerBlue', lw=1.5, bins=nbins, histtype='stepfilled', alpha=0.6)\n\t\taxH.hist(house_rts, color='LimeGreen', lw=1.5, bins=nbins, histtype='stepfilled', alpha=0.6)\n\t\taxF.grid()\n\t\taxH.grid()\n\t\taxF.set_xlim(-6, 6)\n\t\taxH.set_xlim(-6, 6)\n\t\tif save:\n\t\t\tsubj_fig.savefig('Subj'+str(i)+'_RTDist'+'.jpeg', dpi=300)\n\t\telse:\n\t\t\tsubj_fig.show()\n\n\ndef all_dists(data, nbins=40, save=False):\n\n\n\t\tfdata=hddm.utils.flip_errors(data[(data['stim']=='face')])\n\t\thdata=hddm.utils.flip_errors(data[(data['stim']=='house')])\n\n\t\tfig=plt.figure(figsize=(14, 8))\n\t\taxF = fig.add_subplot(211, xlabel='RT', ylabel='count', title='FACE RT distributions')\n\t\taxH = fig.add_subplot(212, xlabel='RT', ylabel='count', title='HOUSE RT distributions')\n\n\t\tfor i, facerts in fdata.groupby('subj_idx'):\n\t\t axF.hist(facerts.rt, bins=nbins, label=str(i), histtype='stepfilled', alpha=0.3)\n\t\tfor i, houserts in hdata.groupby('subj_idx'):\n\t\t axH.hist(houserts.rt, bins=nbins, label=str(i), histtype='stepfilled', alpha=0.3)\n\n\t\taxF.grid()\n\t\taxH.grid()\n\t\taxF.set_xlim(-6, 6)\n\t\taxH.set_xlim(-6, 6)\n\n\t\thandles, labels = axF.get_legend_handles_labels()\n\t\tfig.legend(handles[:], labels[:], loc=7)\n\n\t\tif save:\n\t\t\tplt.savefig('AllSubj_RTDists.png')\n\t\telse:\n\t\t\tplt.show()\n\n\ndef pred_rtPLOT(code_type, rt_ax=None, xrt=None, yrtFace=None, yrtHouse=None, mname='EvT', ind=0, flast_rt=np.zeros([5]), hlast_rt=np.zeros([5])):\n\t\"\"\"\n\tPlotting function:\n\t\t*plots average empirical and theoretical accuracy\n\t\t for each condition (averaged over subjects)\n\t\t*the matplotlib fill function covers the area between\n\t\t all simulated datasets -- Better when running multiple\n\t\t simulations. If only running 1 sim, use the between_PLOTs\n\t\t*called by plot_predictive_behav()\n\t\"\"\"\n\n\tf_theo=rt_ax.plot(xrt, yrtFace, '-', color='RoyalBlue', lw=0.3, alpha=0.2)\n\th_theo=rt_ax.plot(xrt, yrtHouse,'-', color=\"#EF4836\", lw=0.3, alpha=0.2)\n\n\tif ind!=0:\n\t\tyrtF2=flast_rt\n\t\tyrtH2=hlast_rt\n\t\tface_fill=rt_ax.fill_between(xrt, yrtFace, yrtFace-(yrtFace-yrtF2), facecolor='RoyalBlue', alpha=0.05)\n\t\thouse_fill=rt_ax.fill_between(xrt, yrtHouse, yrtHouse-(yrtHouse-yrtH2), facecolor=\"#EF4836\", alpha=0.05)\n\n\tif code_type=='HNL':\n\t\trt_ax.set_ylim(1.5, 3.5)\n\t\trt_ax.set_xticks([1, 2, 3])\n\t\trt_ax.set_xlim(0.5, 3.5)\n\t\trt_ax.set_xticklabels(['House', 'Neutral', 'Face'], fontsize=32)\n\t\trt_ax.set_yticks(np.arange(1.5, 4.0, 0.5))\n\t\trt_ax.set_yticklabels(np.arange(1.5, 4.0, 0.5), fontsize=25)\n\t\trt_ax.set_ylabel('Response Time (s)', fontsize=30, labelpad=14)\n\t\trt_ax.set_xlabel('Prior Cue', fontsize=30, labelpad=10)\n\t\tsns.despine()\n\n\telse:\n\t\trt_ax.set_ylim(1.6, 3.6)\n\t\trt_ax.set_xticks([1, 2, 3, 4, 5])\n\t\trt_ax.set_xlim(0.6, 5.5)\n\t\trt_ax.set_xticklabels(['90H', '70H', '50/50', '70F', '90F'], fontsize=18)\n\t\trt_ax.set_yticks(np.arange(1.5, 4.0, 0.5))\n\t\trt_ax.set_yticklabels(np.arange(1.5, 4.0, 0.5), fontsize=18)\n\n\t\tif mname=='pbm':\n\t\t\trt_ax.set_ylabel('Response Time (s)', fontsize=22, labelpad=14)\n\n\t\trt_ax.set_xlabel('Prior Probability Cue', fontsize=22, labelpad=10)\n\n\n\treturn yrtFace, yrtHouse\n\ndef pred_accPLOT(code_type, acc_ax=None, xacc=None, yaccFace=None, yaccHouse=None, mname='EvT', ind=0, flast_acc=np.zeros([5]), hlast_acc=np.zeros([5])):\n\t\"\"\"\n\n\tPlotting function:\n\t\t*plots average empirical and theoretical accuracy\n\t\t for each condition (averaged over subjects)\n\t\t*the matplotlib fill function covers the area between\n\t\t all simulated datasets -- Better when running multiple\n\t\t simulations. If only running 1 sim, use the between_PLOTs\n\t\t*called by plot_predictive_behav()\n\n\t\"\"\"\n\n\tf_theo=acc_ax.plot(xacc, yaccFace, '-', color='RoyalBlue', lw=0.3, alpha=0.2)\n\th_theo=acc_ax.plot(xacc, yaccHouse, '-', color=\"#EF4836\", lw=0.3, alpha=0.2)\n\n\tif ind!=0:\n\t\tyaccF2=flast_acc\n\t\tyaccH2=hlast_acc\n\t\tface_fill=acc_ax.fill_between(xacc, yaccFace, yaccFace-(yaccFace-yaccF2), facecolor='RoyalBlue', alpha=0.05)\n\t\thouse_fill=acc_ax.fill_between(xacc, yaccHouse, yaccHouse-(yaccHouse-yaccH2), facecolor=\"#EF4836\", alpha=0.05)\n\n\tif code_type=='HNL':\n\t\tacc_ax.set_ylim(0.7, 1.0)\n\t\tacc_ax.set_xticks([1, 2, 3])\n\t\tacc_ax.set_xlim(0.5, 3.5)\n\t\tacc_ax.set_xticklabels(['House', 'Neutral', 'Face'], fontsize=32)\n\t\tacc_ax.set_yticks(np.arange(0.7, 1.0, .1))\n\t\tacc_ax.set_yticklabels(np.arange(0.7, 1.0, .1), fontsize=25)\n\t\tacc_ax.set_ylabel('Percent Correct', fontsize=30, labelpad=14)\n\t\tacc_ax.set_xlabel('Prior Cue', fontsize=30, labelpad=10)\n\t\tsns.despine()\n\n\telse:\n\n\t\tacc_ax.set_ylim(0.6, 1.0)\n\t\tacc_ax.set_xticks([1, 2, 3, 4, 5])\n\t\tacc_ax.set_xlim(0.5, 5.5)\n\t\tacc_ax.set_xticklabels(['90H', '70H', '50/50', '70F', '90F'], fontsize=18)\n\t\tacc_ax.set_yticks(np.arange(0.6, 1.05, .05))\n\t\tacc_ax.set_yticklabels(np.arange(0.6, 1.05, .05), fontsize=18)\n\n\t\tif mname=='pbm':\n\t\t\tacc_ax.set_ylabel('Proportion Correct', fontsize=22, labelpad=14)\n\t\t\t#acc_ax.set_xlabel('Prior Probability Cue', fontsize=22, labelpad=10)\n\n\treturn yaccFace, yaccHouse\n\n\ndef predict_from_simdfs(data, simdfs, save=True, mname='EvT', plot_legend=False):\n\t\"\"\"\n\tArguments:\n\n\t\tdata (pandas df):\t\tpandas dataframe with the empirical data used\n\t\t\t\t\t\t\t\tto fit the model to generate the simulation parameters\n\n\t\tsimdfs (pandas df): pandas dataframe with multiple simulated datasets\n\n\n\t*plot behavioral data against model predictions from multiple simulations\n\n\t*If save=True, will save RT and ACC plots to working dir\n\t\"\"\"\n\n\tsns.set_style(\"white\")\n\n\tif len(data.cue.unique())==3:\n\t\tx=np.array([1,2,3])\n\t\tcode_type='HNL'\n\telse:\n\t\tx=np.array([1, 2, 3, 4, 5])\n\t\tcode_type='AllP'\n\n\tface_acc, house_acc, face_rt, house_rt=parse.get_empirical_means(data=data, code_type=code_type)\n\tsem_list=parse.get_emp_SE(data, code_type)\n\n\t#init sep figure, axes for RT & ACC data\n\t#fig_rt, ax_rt=plt.subplots(1)\n\tfig_rt, ax_rt=plt.subplots(1, figsize=(10,7))\n\tsns.despine()\n\n\tfig_acc, ax_acc=plt.subplots(1, figsize=(10,7))\n\tsns.despine()\n\n\tfig_acc.subplots_adjust(top=0.88, left=0.15, right=0.88, bottom=0.15)\n\tfig_rt.subplots_adjust(top=0.88, left=0.15, right=0.88, bottom=0.15)\n\n\tflast_rt=np.zeros([5])\n\thlast_rt=np.zeros([5])\n\tflast_acc=np.zeros([5])\n\thlast_acc=np.zeros([5])\n\n\tfor simn, rest in simdfs.groupby('sim_n'):\n\n\t\tFtheo_acc, Htheo_acc = parse.get_theo_acc(simdf=rest, code_type=code_type)\n\t\tflast_acc, hlast_acc = pred_accPLOT(code_type=code_type, acc_ax=ax_acc, xacc=x, yaccFace=Ftheo_acc, yaccHouse=Htheo_acc, ind=simn, flast_acc=flast_acc, hlast_acc=hlast_acc, mname=mname)\n\n\t\tFtheo_rt, Htheo_rt=parse.get_theo_rt(simdf=rest, code_type=code_type)\n\t\tflast_rt, hlast_rt = pred_rtPLOT(code_type=code_type, rt_ax=ax_rt, xrt=x, yrtFace=Ftheo_rt, yrtHouse=Htheo_rt, ind=simn, flast_rt=flast_rt, hlast_rt=hlast_rt, mname=mname)\n\n\t#plot empirical ACC\n\tf_emp_acc=ax_acc.errorbar(x, face_acc, yerr=sem_list[0], elinewidth=2.0, ecolor='k', color=\"#1e3280\", lw=6.5)\n\th_emp_acc=ax_acc.errorbar(x, house_acc, yerr=sem_list[1], elinewidth=2.0, ecolor='k', color=\"#C0392B\", lw=6.5)\n\n\t#if mname=='pbm':\n\tif plot_legend:\n\t\tax_acc.legend((ax_acc.lines[-4], ax_acc.lines[-1], ax_acc.lines[0], ax_acc.lines[1]), ('Face Data', 'House Data', 'Face Model', 'House Model'), loc=0, fontsize=18)\n\telse: pass\n\tsns.despine()\n\n\t#plot empirical RT\n\tf_emp_rt=ax_rt.errorbar(x, face_rt, yerr=sem_list[2], elinewidth=2.0, ecolor='k', color=\"#1e3280\", lw=6.5)\n\th_emp_rt=ax_rt.errorbar(x, house_rt, yerr=sem_list[3], elinewidth=2.0, ecolor='k', color=\"#C0392B\", lw=6.5)\n\tsns.despine()\n\t#if mname=='pbm':\n\tif plot_legend:\n\t\tax_rt.legend((ax_rt.lines[-4], ax_rt.lines[-1], ax_rt.lines[0], ax_rt.lines[1]), ('Face Data', 'House Data', 'Face Model', 'House Model'), loc=0, fontsize=18)\n\t\tsns.despine()\n\n\tflist=[fig_rt, fig_acc]\n\tplt.tight_layout()\n\tif save:\n\t\tfig_rt.savefig(mname+'_rt.png', dpi=600)\n\t\tfig_acc.savefig(mname+'_acc.png', dpi=600)\n\t\t#fig_rt.savefig(mname+'_rt_lowres.png', dpi=300)\n\t\t#fig_acc.savefig(mname+'_acc_lowres.png', dpi=300)\n\telse:\n\t\treturn fig_rt, fig_acc\n\ndef predict(params, data, simfx=sims.sim_exp, ntrials=160, pslow=0.0, pfast=0.0, nsims=100, nsims_per_sub=1, errors=False, save=False, RTname='RT_simexp', ACCname='Acc_simexp'):\n\t\"\"\"\n\tArguments:\n\n\t\tparams (dict):\t\t\thierarchical dictionary created with\n\t\t\t\t\t\t\t\teither parse_stats() or reformat_sims_input()\n\n\t\tdata (pandas df):\t\tpandas dataframe with the empirical data used\n\t\t\t\t\t\t\t\tto fit the model to generate the simulation parameters\n\n\n\t*Simulates subject-wise data using parameter estimates for each subj/condition\n\t and calls rt and acc plotting functions to plot behavioral data against model predictions\n\n\t*If save=True, will save RT and ACC plots to working dir\n\t\"\"\"\n\tfrom myhddm import parse\n\n\tsimdf_list=[]\n\n\tif len(data.cue.unique())==3:\n\t\tx=np.array([1,2,3])\n\t\tcode_type='HNL'\n\telse:\n\t\tx=np.array([1, 2, 3, 4, 5])\n\t\tcode_type='AllP'\n\n\tif errors:\n\t\tface_rt, house_rt=parse.get_emp_error_rt(data=data)\n\t\tface_acc, house_acc, cf, ch=parse.get_empirical_means(data=data, code_type=code_type)\n\n\telse:\n\t\tface_acc, house_acc, face_rt, house_rt=parse.get_empirical_means(data=data, code_type=code_type)\n\n\tface_rt_error, house_rt_error=parse.get_emp_error_rt(data=data)\n\n\tsem_list=parse.get_emp_SE(data, code_type)\n\n\t#init sep figure, axes for RT & ACC data\n\tfig_rt, ax_rt=plt.subplots(1, figsize=(9,6))\n\tfig_acc, ax_acc=plt.subplots(1, figsize=(9,6))\n\n\tfig_acc.subplots_adjust(top=0.9, left=0.15, right=0.88, bottom=0.15)\n\tfig_rt.subplots_adjust(top=0.9, left=0.15, right=0.88, bottom=0.15)\n\n\tflast_rt=np.zeros([5])\n\thlast_rt=np.zeros([5])\n\tflast_acc=np.zeros([5])\n\thlast_acc=np.zeros([5])\n\n\tfor i in range(nsims):\n\n\t\tsimdf, params_used=simfx(pdict=params, ntrials=ntrials, pfast=pfast, pslow=pslow, nsims_per_sub=nsims_per_sub)\n\t\tsimdf['sim_n']=[i]*len(simdf.index)\n\t\tsimdf_list.append(simdf)\n\t\tFtheo_acc, Htheo_acc = parse.get_theo_acc(simdf=simdf, code_type=code_type)\n\t\tflast_acc, hlast_acc = pred_accPLOT(code_type=code_type, acc_ax=ax_acc, xacc=x, yaccFace=Ftheo_acc, yaccHouse=Htheo_acc, ind=i, flast_acc=flast_acc, hlast_acc=hlast_acc)\n\n\t\tif errors:\n\t\t\tFtheo_rt, Htheo_rt=parse.get_theo_error_rt(simdf=simdf)\n\t\telse:\n\t\t\tFtheo_rt, Htheo_rt=parse.get_theo_rt(simdf=simdf, code_type=code_type)\n\n\t\tflast_rt, hlast_rt = pred_rtPLOT(code_type=code_type, rt_ax=ax_rt, xrt=x, yrtFace=Ftheo_rt, yrtHouse=Htheo_rt, ind=i, flast_rt=flast_rt, hlast_rt=hlast_rt)\n\t\t#[\"#1e3280\", \"#89a0ff\", '#3554cf']\n\tsimdf_concat=pd.concat(simdf_list)\n\t#plot empirical ACC\n\t#ax_acc.grid()\n\t#f_emp_acc=ax_acc.errorbar(x, face_acc, yerr=sem_list[0], elinewidth=3.5, ecolor='k', color='Blue', lw=6.0)\n\t#h_emp_acc=ax_acc.errorbar(x, house_acc, yerr=sem_list[1], elinewidth=3.5, ecolor='k', color='Green', lw=6.0)\n\tf_emp_acc=ax_acc.errorbar(x, face_acc, yerr=sem_list[0], elinewidth=2.0, ecolor='k', color=\"#1e3280\", lw=6.5)\n\th_emp_acc=ax_acc.errorbar(x, house_acc, yerr=sem_list[1], elinewidth=2.0, ecolor='k', color=\"#C0392B\", lw=6.5)\n\n\t#ax_acc.legend((ax_acc.lines[-4], ax_acc.lines[-1], ax_acc.lines[0], ax_acc.lines[1]), ('Face Data', 'House Data', 'Face Model', 'House Model'), loc=0, fontsize=18)\n\tsns.despine()\n\tplt.tight_layout()\n\t#plot empirical RT\n\t#ax_rt.grid()\n\t#f_emp_rt=ax_rt.errorbar(x, face_rt, yerr=sem_list[2], elinewidth=3.5, ecolor='k', color='Blue', lw=6.0)\n\t#h_emp_rt=ax_rt.errorbar(x, house_rt, yerr=sem_list[3], elinewidth=3.5, ecolor='k', color='Green', lw=6.0)\n\tf_emp_rt=ax_rt.errorbar(x, face_rt, yerr=sem_list[2], elinewidth=2.0, ecolor='k', color=\"#1e3280\", lw=6.5)\n\th_emp_rt=ax_rt.errorbar(x, house_rt, yerr=sem_list[3], elinewidth=2.0, ecolor='k', color=\"#C0392B\", lw=6.5)\n\t#ax_rt.legend((ax_rt.lines[-4], ax_rt.lines[-1], ax_rt.lines[0], ax_rt.lines[1]), ('Face Data', 'House Data', 'Face Model', 'House Model'), loc=0, fontsize=18)\n\tsns.despine()\n\tplt.tight_layout()\n\tsimdf_concat.to_csv(\"simdf.csv\")\n\tif save:\n\t\tfig_rt.savefig(RTname+'.png', dpi=900)\n\t\tfig_acc.savefig(ACCname+'.png', dpi=900)\n\t\t#fig_rt.savefig(RTname+'.png', format='png', dpi=500)\n\t\t#fig_acc.savefig(ACCname+'.png', format='png', dpi=500)\n\t\t#fig_rt.savefig(RTname+'.tif', format='tif', dpi=500)\n\t\t#fig_acc.savefig(ACCname+'.tif', fortmat='tif', dpi=500)\n\treturn simdf_concat\n\n\ndef plot_data(data, save=True, RTname='RT_Data', ACCname='Acc_Data'):\n\t\"\"\"\n\tArguments:\n\n\t\tparams (dict):\t\t\thierarchical dictionary created with\n\t\t\t\t\t\t\t\teither parse_stats() or reformat_sims_input()\n\n\t\tdata (pandas df):\t\tpandas dataframe with the empirical data used\n\t\t\t\t\t\t\t\tto fit the model to generate the simulation parameters\n\n\n\t*Simulates subject-wise data using parameter estimates for each subj/condition\n\t and calls rt and acc plotting functions to plot behavioral data against model predictions\n\n\t*If save=True, will save RT and ACC plots to working dir\n\t\"\"\"\n\n\tsns.set_style(\"white\")\n\t#sns.despine()\n\n\tif len(data.cue.unique())==3:\n\t\tx=np.array([1,2,3])\n\t\tcode_type='HNL'\n\telse:\n\t\tx=np.array([1, 2, 3, 4, 5])\n\t\tcode_type='AllP'\n\n\tface_acc, house_acc, face_rt, house_rt=parse.get_empirical_means(data=data, code_type=code_type)\n\tsem_list=parse.get_emp_SE(data, code_type)\n\n\t#init sep figure, axes for RT & ACC data\n\tfig_rt, ax_rt=plt.subplots(1)\n\tsns.despine()\n\tfig_acc, ax_acc=plt.subplots(1)\n\tsns.despine()\n\tfig_acc.subplots_adjust(top=0.9, left=0.15, right=0.88, bottom=0.15)\n\tfig_rt.subplots_adjust(top=0.9, left=0.15, right=0.88, bottom=0.15)\n\t#fat\n\t#fig_rt.subplots_adjust(top=0.7, left=0.15, right=0.88, bottom=0.15)\n\t#plot empirical ACC\n\tf_emp_acc=ax_acc.errorbar(x, face_acc, yerr=sem_list[0], elinewidth=2.5, ecolor='k', color='Blue', lw=4.0)\n\th_emp_acc=ax_acc.errorbar(x, house_acc, yerr=sem_list[1], elinewidth=2.5, ecolor='k', color='Red', lw=4.0)\n\t#ax_acc.set_title(\"Accuracy\")\n\tax_acc.legend((ax_acc.lines[-4], ax_acc.lines[-1]), ('Face', 'House'), loc=0, fontsize=18)\n\n\t#plot empirical RT\n\tf_emp_rt=ax_rt.errorbar(x, face_rt, yerr=sem_list[2], elinewidth=2.5, ecolor='k', color='Blue', lw=4.0)\n\th_emp_rt=ax_rt.errorbar(x, house_rt, yerr=sem_list[3], elinewidth=2.5, ecolor='k', color='Red', lw=4.0)\n\t#ax_rt.set_title(\"Response-Time\")\n\tax_rt.legend((ax_rt.lines[-4], ax_rt.lines[-1]), ('Face', 'House'), loc=0, fontsize=18)\n\n\tif code_type=='HNL':\n\t\tax_rt.set_ylim(1.5, 3.5)\n\t\tax_rt.set_xticks([1, 2, 3])\n\t\tax_rt.set_xlim(0.5, 3.5)\n\t\tax_rt.set_xticklabels(['80H', '50N', '80F'], fontsize=32)\n\t\tax_rt.set_ylabel('Response Time (s)', fontsize=35, labelpad=14)\n\t\tax_rt.set_xlabel('Prior Probability Cue', fontsize=35, labelpad=10)\n\t\tax_rt.set_yticklabels(np.arange(1.5, 4, 0.5), fontsize=25)\n\t\tax_acc.set_ylim(0.7, 1.0)\n\t\tax_acc.set_xticks([1, 2, 3])\n\t\tax_acc.set_xlim(0.5, 3.5)\n\t\tax_acc.set_xticklabels(['80H', '50N', '80F'], fontsize=32)\n\t\tax_acc.set_ylabel('Proportion Correct', fontsize=35, labelpad=14)\n\t\tax_acc.set_xlabel('Prior Probability Cue', fontsize=35, labelpad=10)\n\t\tax_acc.set_yticks(np.arange(0.6, 1.05, .05))\n\t\tax_acc.set_yticklabels(np.arange(0.6, 1.05, .05), fontsize=25)\n\telse:\n\t\tax_rt.set_ylim(1.6, 3.5)\n\t\tax_rt.set_xticks([1, 2, 3, 4, 5])\n\t\tax_rt.set_xlim(0.6, 5.5)\n\t\tax_rt.set_xticklabels(['90H', '70H', '50/50', '70F', '90F'], fontsize=18)\n\t\tax_rt.set_yticks(np.arange(1.5, 4.0, 0.5))\n\t\tax_rt.set_yticklabels(np.arange(1.5, 4.0, 0.5), fontsize=18)\n\t\tax_rt.set_ylabel('Response Time (s)', fontsize=22, labelpad=14)\n\t\tax_rt.set_xlabel('Prior Probability Cue', fontsize=22, labelpad=10)\n\n\t\tax_acc.set_ylim(0.6, 1.0)\n\t\tax_acc.set_xticks([1, 2, 3, 4, 5])\n\t\tax_acc.set_xlim(0.5, 5.5)\n\t\tax_acc.set_xticklabels(['90H', '70H', '50/50', '70F', '90F'], fontsize=18)\n\t\tax_acc.set_yticks(np.arange(0.6, 1.05, .05))\n\t\tax_acc.set_yticklabels(np.arange(0.6, 1.05, .05), fontsize=18)\n\t\tax_acc.set_ylabel('Proportion Correct', fontsize=22, labelpad=14)\n\t\tax_acc.set_xlabel('Prior Probability Cue', fontsize=22, labelpad=10)\n\n\t#save figures\n\tif save:\n\t\tfig_rt.savefig(RTname+'.png', dpi=600)\n\t\tfig_acc.savefig(ACCname+'.png', dpi=600)\n\nif __name__ == \"__main__\":\n\tmain()\n"
},
{
"alpha_fraction": 0.6801087260246277,
"alphanum_fraction": 0.6968736052513123,
"avg_line_length": 22.9891300201416,
"blob_id": "bba6d6952a66a0603be89eca85c26990e11b1e04",
"content_id": "fc5b0a11162dc4a6570d2030bd267e59223d6666",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2207,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 92,
"path": "/analyze.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom __future__ import division\nimport sys\nimport os\nimport hddm\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom myhddm import sims, sdt, parse, vis, defmod, mle\n\n\ndef rna(mname):\n\n\tm=defmod.define_model(mname)\n\n\tm.sample(5000, burn=1000, dbname=mname+'_traces.db', db='pickle')\n\n\tpostproc(m, mname=mname)\n\ndef postproc(m, mname='model', varlvl='grp', traces_info=None):\n\n\tif not os.path.isdir(\"avg_pdf\"):\n\t\tos.makedirs(\"avg_pdf\")\n\tif not os.path.isdir(\"avg_quant\"):\n\t\tos.makedirs(\"avg_quant\")\n\tif not os.path.isdir(\"posteriors\"):\n\t\tos.makedirs(\"posteriors\")\n\tif not os.path.isdir(\"correlations\"):\n\t\tos.makedirs(\"correlations\")\n\tif not os.path.isdir(\"simulations\"):\n\t\tos.makedirs(\"simulations\")\n\n\tos.chdir(\"posteriors\")\n\n\tm.plot_posteriors(save=True)\n\tplt.close('all')\n\n\tos.chdir(\"../\")\n\n\tm.print_stats(fname=mname+'_stats.txt')\n\tparsed = parse.parse_stats(minput=m, varlvl=varlvl, input_isdf=False)\n\tsubdf=parsed[0]\n\tcondsdf=parsed[1]\n\tpdict=parsed[2]\n\n\tos.chdir('simulations')\n\n\t#pdict=mle.optimize_sx(mname)\n\n\tprint \"simulating from mle optimized params\"\n\tsimdf=vis.predict(params=pdict, data=m.data, simfx=sims.sim_exp, ntrials=240, pslow=0.0, pfast=0.0, nsims=100, save=True, RTname=mname+'_RT_simexp', ACCname=mname+'_ACC_simexp')\n\tsimdf.to_csv(\"simdf.csv\", index=False)\n\tsimdf=pd.read_csv(\"simdf.csv\")\n\tvis.predict_from_simdfs(m.data, simdf)\n\tplt.close('all')\n\n\tos.chdir('../')\n\n\tsimdf.to_csv(\"simdf_opt.csv\")\n\n\tos.chdir('correlations')\n\n\tprint \"estimating emp-theo sdt correlation, saving...\"\n\tsdt_corr=sdt.rho_sdt(m.data, simdf)\n\n\tprint \"plotting emp-theo sdt correlation, saving...\"\n\tsdt.plot_evs_corr(m.data, simdf)\n\tsdt_corr.to_csv(\"sdt_corr.csv\")\n\tplt.close('all')\n\n\tos.chdir('../')\n\n\t#mname, traces_name=defmod.find_traces_imaging(m)\n\n\n\tavgm=m.get_average_model()\n\t#avgm.load_db(dbname=traces_name, db='pickle')\n\tavgm.load_db(dbname=\"msmIO_traces.db\", db='pickle')\n\n\tos.chdir(\"./avg_pdf\")\n\n\tavgm.plot_posterior_predictive(save=True, value_range=np.linspace(-6, 6, 100), figsize=(12, 10))\n\n\tos.chdir(\"../avg_quant\")\n\n\tavgm.plot_posterior_quantiles(save=True, samples=250, figsize=(12, 10))\n\n\tplt.close('all')\n\n\nif __name__==\"__main__\":\n\tmain()\n"
},
{
"alpha_fraction": 0.6741195917129517,
"alphanum_fraction": 0.6852990388870239,
"avg_line_length": 27.396825790405273,
"blob_id": "fb4db1bf50753d45dbdfc81f8d17dd30e60b8b4d",
"content_id": "ed979502f9e08e7b3f1a18850972d01adbf300b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1789,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 63,
"path": "/final.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import division\nimport hddm\nimport kabuki\nimport pandas as pd\nimport os\nfrom mydata.munge import find_path\nfrom myhddm import defmod, parse, opt, vis, sims\n\ndef analyze_models(nsims=100, ntrials=100):\n\n\tmnames=['msm', 'dbm', 'dbmz', 'pbm']\n\t#mnames=['pbm']\n\tbias=True\n\tdata=pd.read_csv(\"/Users/kyle/Desktop/beh_hddm/allsx_feat.csv\")\n\n\tfor m in mnames:\n\n\t\tif m=='dbm':\n\t\t\tbias=False\n\n\t\tmodel=defmod.define_model(m, project='behav')\n\n\t\tmpath=\"/Users/kyle/Desktop/beh_hddm/revised_models/\"+m\n\t\tos.chdir(mpath)\n\n\t\tm0=model; m1=model; m2=model\n\t\tmlist=[m0.load_db(m+\"_traces0.db\", db='pickle'), m1.load_db(m+\"_traces1.db\", db='pickle'), m2.load_db(m+\"_traces2.db\", db='pickle')]\n\t\tallmodels=kabuki.utils.concat_models(mlist)\n\t\tallmodels.print_stats(m+\"_stats_all.txt\")\n\n\t\tvis.plot_neutral_traces(allmodels)\n\t\tfor node in ['z', 'vf', 'vh']:\n\t\t\tvis.plot_posterior_nodes(allmodels, node)\n\n\t\tgparams={}; subj_params=[]\n\n\t\tmsingle=defmod.define_single(m, project='behav')\n\n\t\tfor subj_idx, subj_data in data.groupby('subj_idx'):\n\t\t\tm_subj=hddm.HDDM(subj_data, depends_on=msingle.depends_on, bias=bias, include=msingle.include)\n\t\t\tsx_params=m_subj.optimize('ML')\n\t\t\tpdict=opt.get_pdict(sx_params)\n\t\t\tsubj_params.append(sx_params)\n\t\t\tgparams[subj_idx]=pdict\n\n\t\t#write gparams to .txt file for reloading later\n\t\tf=open(m+'mle_gparams.txt', 'w')\n\t\tf.write('gparams=' + repr(gparams) + '\\n')\n\t\tf.close()\n\n\t\tsimdf_list=[]\n\t\tfor i in range(nsims):\n\t\t\tsimdf, params_used=sims.sim_exp(pdict=gparams, ntrials=ntrials, pfast=0.0, pslow=0.0, nsims_per_sub=1)\n\t\t\tsimdf['sim_n']=[i]*len(simdf.index)\n\t\t\tsimdf_list.append(simdf)\n\n\t\tsimdf=pd.concat(simdf_list)\n\n\t\tparams = pd.DataFrame(subj_params)\n\t\tsimdf.to_csv(m+\"_simdf.csv\")\n\t\tparams.to_csv(m+\"_sparams.csv\", index=False)\n"
},
{
"alpha_fraction": 0.5827140212059021,
"alphanum_fraction": 0.6353456377983093,
"avg_line_length": 36.63218307495117,
"blob_id": "999a17864dfac02ea1f3d3ad70cd7d797adf800f",
"content_id": "edf39a09a515607565d3f82f30a0847b384da950",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9823,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 261,
"path": "/lindis.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "from scipy import linalg\nimport numpy as np\nfrom matplotlib import colors\nimport matplotlib.pyplot as plt\nfrom sklearn.lda import LDA\nfrom sklearn.qda import QDA\nimport pandas as pd\nimport seaborn as sns\n\n# colormap\ncmap = colors.LinearSegmentedColormap(\n 'red_blue_classes',\n {'blue': [(0, 1, 1), (1, 0.7, 0.7)],\n 'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],\n 'red': [(0, 0.7, 0.7), (1, 1, 1)]})\nplt.cm.register_cmap(cmap=cmap)\n\ndef fh_lda():\n\n\ty=[];yf=[];yh=[];X=[];XF=[];XH=[]\n\tfdf=pd.read_csv(\"mds_face_coords.csv\")\n\thdf=pd.read_csv(\"mds_house_coords.csv\")\n\n\tfor i in fdf.index:\n\t XF.append([fdf.ix[i, 0], fdf.ix[i, 1]])\n\tyf=[0]*len(XF)\n\n\tfor i in hdf.index:\n\t XH.append([hdf.ix[i, 0], hdf.ix[i, 1]])\n\tyh=[1]*len(XH)\n\n\tfor i in XH:\n\t XF.append(i)\n\tfor i in yh:\n\t yf.append(i)\n\n\ty=np.array(yf)\n\tX=np.array(XF)\n\n\treturn X, y\n\n\n# plot functions\ndef plot_LDA(lda, X, y, y_pred):\n\t#splot = plt.subplot(111)\n\tfig=plt.figure(figsize=(5, 6))\n\tax=fig.add_subplot(111)\n\t#plt.title('Linear Discriminant Analysis')\n\n\ttp = (y == y_pred) # True Positive\n\ttp0, tp1 = tp[y == 0], tp[y == 1]\n\tX0, X1 = X[y == 0], X[y == 1]\n\tX0_tp, X0_fp = X0[tp0], X0[~tp0]\n\tX1_tp, X1_fp = X1[tp1], X1[~tp1]\n\txmin, xmax = X[:, 0].min(), X[:, 0].max()\n\tymin, ymax = X[:, 1].min(), X[:, 1].max()\n\n\t# class 0: dots\n\tplt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', ms=10, color='DarkBlue', alpha=0.6)\n\tplt.plot(X0_fp[:, 0], X0_fp[:, 1], 'o', ms=10, color='RoyalBlue', alpha=0.6)\n\n\t# class 1: dots\n\tplt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', ms=10, color='FireBrick', alpha=0.6)\n\tplt.plot(X1_fp[:, 0], X1_fp[:, 1], 'o', ms=10, color='Crimson', alpha=0.6)\n\n plt.plot()\n\n\tax=plt.gca()\n\tax.set_xlim([-15000, 18000]); ax.set_ylim([-15000, 15000])\n\n\t# class 0 and 1 : areas\n\tnx, ny = 80, 40 #was 200, 100\n\tx_min, x_max = plt.xlim()\n\ty_min, y_max = plt.ylim()\n\txx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),\n\t np.linspace(y_min, y_max, ny))\n\tzz=np.c_[xx.ravel(), yy.ravel()]\n\tZ = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])\n\tZ = Z[:, 1].reshape(xx.shape)\n\tplt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',\n\t norm=colors.Normalize(0., 1.))\n\tplt.contour(xx, yy, Z, [0.5], linewidths=3.5, colors='k')\n\n\t# means\n\t#print \"cat1 mean coordinates: %s, %s\" % (str(lda.means_[0][0]), str(lda.means_[0][1]))\n\t#print \"cat2 mean coordinates: %s, %s\" % (str(lda.means_[1][0]), str(lda.means_[1][1]))\n\t#plt.plot(lda.means_[0][0], lda.means_[0][1],\n\t# 'o', color='black', markersize=10)\n\t#plt.plot(lda.means_[1][0], lda.means_[1][1],\n\t# 'o', color='black', markersize=10)\n\n plt.plot(-7831.69871092267,-763.116931264117,\n 'o', color='black', markersize=10, mec='Blue', mew=2)\n\n plt.plot(2296.02745742291, -306.329358115368,\n 'o', color='black', markersize=10, mec='Red', mew=2)\n\n\t#ax=plt.gca()\n\t#ax.set_xlim([-15000, 18000]); ax.set_ylim([-15000, 15000])\n\tax.set_xticklabels([])\n\tax.set_yticklabels([])\n\tax.set_xlabel(\"Dimension 1\", fontsize=16, labelpad=8)\n\tax.set_ylabel(\"Dimension 2\", fontsize=16, labelpad=10)\n\tplt.savefig(\"lda_keep.png\", format='png', dpi=900)\n\n\teigen_dist=pd.Series(lda.decision_function(X))\n\teigen_dist.to_csv(\"eigen_distance_tobound_keep.csv\", index=False)\n\n\treturn ax\n\n\ndef main_lda():\n\tX,y=fh_lda()\n\n\tlda=LDA()\n\tlda.fit(X,y)\n\n\tsplot=plot_LDA(lda, X, y, lda.fit(X,y).predict(X))\n\treturn splot\n\n\ndef plot_correl(ax=None, figname=\"correlation_plot\"):\n\n\tsns.set_style(\"white\")\n\tsns.set_style(\"white\", {\"legend.scatterpoints\": 1, \"legend.frameon\":Tru\t\n\t#if ax:\n\t# ax=sns.regplot(data.ix[:,0], data.ix[:,1], color='Red', scatter=True, ci=None, scatter_kws={'s':18}, ax=ax)\n\t#else:\n\t# ax=sns.regplot(data.ix[:,0], data.ix[:,1], color='Blue', scatter=True, ci=None, scatter_kws={'s':1\t\n\tdataf=pd.read_csv(\"FaceEigen_RT_keep.csv\")\n\tdatah=pd.read_csv(\"HouseEigen_RT_keep.csv\")\n\tdata_all=pd.read_csv(\"StimEigen_RT_keep.csv\")\t\n\tfig=plt.figure(figsize=(5, 6))\n\tax=fig.add_subplot(1\t\n\taxx=sns.regplot(data_all.ix[:,0], data_all.ix[:,1], color='Black', fit_reg=True, robust=True, label='All, r=.326**', scatter=True, ci=None, scatter_kws={'s':2}, ax=ax)\n\taxx=sns.regplot(datah.ix[:,0], datah.ix[:,1], color='Red', fit_reg=True, robust=True, scatter=True, ci=None, scatter_kws={'s':35}, ax=ax)\n\taxx=sns.regplot(dataf.ix[:,0], dataf.ix[:,1], color='Blue', fit_reg=True, robust=True, scatter=True, ci=None, scatter_kws={'s':35}, ax=ax)\n\taxx=sns.regplot(datah.ix[:,0], datah.ix[:,1], color='Red', fit_reg=True, robust=True, scatter=True, ci=None, scatter_kws={'s':35}, ax=ax)\n\taxx=sns.regplot(dataf.ix[:,0], dataf.ix[:,1], color='Blue', fit_reg=True, robust=True, scatter=True, ci=None, scatter_kws={'s':35}, ax=ax)\n\taxx=sns.regplot(dataf.ix[:,0], dataf.ix[:,1], color='Blue', fit_reg=True, robust=True, label='Face, r=.320*', scatter=True, ci=None, scatter_kws={'s':35}, ax=ax)\n\taxx=sns.regplot(datah.ix[:,0], datah.ix[:,1], color='Red', fit_reg=True, robust=True, label='House, r=.333*', scatter=True, ci=None, scatter_kws={'s':35}, ax=\t\n\tfig.set_tight_layout(True)\n\tfig.subplots_adjust(left=.22, bottom=.14, top=.95, right=.7)\n\tax.set_ylim([-1,1])\n\tax.set_xlim([2,14])\n\t#ax.set_xticklabels(np.arange(2, 16, 2), fontsize=16)\n\tax.set_xticklabels(np.arange(2, 16, 2), fontsize=10)\n\tax.set_xlabel(\"Distance to Category Boundary\", fontsize=12, labelpad\t\n\tleg = ax.legend(loc='best', fancybox=True, fontsize=10)\n\tleg.get_frame().set_alpha(0.\t\n\t#ax.legend(loc=0, fontsize=14)\n\t#plt.tight_layou\t\n\tax.set_ylabel(\"Response Time (s)\", fontsize=12, labelpad=5)\n\tax.set_yticklabels(np.arange(-1, 1.5, 0.5), fontsize=10)\n\tsns.despine()\n\t#plt.tight_layout(pad=2)\n\t#plt.subplots_adjust(left=.22, bottom=.14, top=.95, right=.7)\n\tplt.savefig(figname+\".png\", format='png', dpi=6\t\n\treturn fig, ax\n\ndef plot_correl_bycue(ax=None, figname=\"correlbycue_plot\"):\n\n\tsns.set_style(\"white\")\n\tsns.set_style(\"white\", {\"legend.scatterpoints\": 1, \"legend.frameon\":True})\n\n\tdf=pd.read_csv(\"/Users/kyle/Desktop/beh_hddm/MDS_Analysis/dist_RTxCue_allcor.csv\")\n\n\tdataf=df[df['stim']=='face']\n\tdatah=df[df['stim']=='house']\n\n\tfig=plt.figure(figsize=(10, 12))\n\taxf=fig.add_subplot(121)\n\taxh=fig.add_subplot(122)\n\n\taxx=sns.regplot(dataf['distance'], dataf['hcRT'], color='Red', fit_reg=True, robust=True, label='House Cue, r=-.19', scatter=True, ci=None, scatter_kws={'s':35}, ax=axf)\n\taxx=sns.regplot(dataf['distance'], dataf['ncRT'], color='Black', fit_reg=True, robust=False, label='Neutral Cue, r=-.15', scatter=True, ci=None, scatter_kws={'s':35}, ax=axf)\n\taxx=sns.regplot(dataf['distance'], dataf['fcRT'], color='Blue', fit_reg=True, robust=True, label='Face Cue, r=-.320*', scatter=True, ci=None, scatter_kws={'s':35}, ax=axf)\n\n\taxx=sns.regplot(datah['distance'], datah['hcRT'], color='Red', fit_reg=True, robust=True, label='House Cue, r=-.330*', scatter=True, ci=None, scatter_kws={'s':35}, ax=axh)\n\taxx=sns.regplot(datah['distance'], datah['ncRT'], color='Black', fit_reg=True, robust=True, label='Neutral Cue, r=-.18', scatter=True, ci=None, scatter_kws={'s':35}, ax=axh)\n\taxx=sns.regplot(datah['distance'], datah['fcRT'], color='Blue', fit_reg=True, robust=True, label='face Cue, r=-.09', scatter=True, ci=None, scatter_kws={'s':35}, ax=axh)\n\n\t#fig.set_tight_layout(True)\n\t#fig.subplots_adjust(left=.22, bottom=.14, top=.95, right=.7)\n\tfor ax in fig.axes:\n\t\tax.set_ylim([-1.2,1.2])\n\t\tax.set_xlim([-5,18])\n\t\t#ax.set_xticklabels(np.arange(2, 16, 2), fontsize=16)\n\t\t#axf.set_xticklabels(np.arange(2, 16, 2), fontsize=10)\n\t\tax.set_xlabel(\"Distance to Category Boundary\", fontsize=12, labelpad=5)\n\t\n\t\tleg = ax.legend(loc='best', fancybox=True, fontsize=10)\n\t\tleg.get_frame().set_alpha(0.95)\n\t\n\t\t#ax.legend(loc=0, fontsize=14)\n\t\t#plt.tight_layout()\n\t\n\t\tax.set_ylabel(\"Response Time (s)\", fontsize=12, labelpad=5)\n\t\t#ax.set_yticklabels(np.arange(-1, 1.5, 0.5), fontsize=10)\n\t\tsns.despine()\n\t\t#plt.tight_layout(pad=2)\n\t\t#plt.subplots_adjust(left=.22, bottom=.14, top=.95, right=.7)\n\t\n\tplt.savefig(figname+\".png\", format='png', dpi=600)\n\n\treturn fig\n\n\ndef plot_rho_heatmap():\n\t\n\tsns.set_style(\"white\")\n\tpal=sns.blend_palette(['Darkred', 'Pink'], as_cmap=True)\n\t\n\tdf=pd.read_csv(\"/Users/kyle/Desktop/beh_hddm/MDS_Analysis/dist_RTxCue_allcor.csv\")\n\t\n\tdataf=df[df['stim']=='face']\n\tdatah=df[df['stim']=='house']\n\n\tfhc=dataf['distance'].corr(dataf['hcRT'], method='spearman')\n\tfnc=dataf['distance'].corr(dataf['ncRT'], method='spearman')\n\tffc=dataf['distance'].corr(dataf['fcRT'], method='spearman')\n\thhc=datah['distance'].corr(datah['hcRT'], method='spearman')\n\thnc=datah['distance'].corr(datah['ncRT'], method='spearman')\n\thfc=datah['distance'].corr(datah['fcRT'], method='spearman')\n\t\n\tfcorr=np.array([fhc, fnc, ffc])\n\thcorr=np.array([hhc, hnc, hfc])\n\t\n\tcorr_matrix=np.array([fcorr, hcorr])\n\t\n\tfig=plt.figure(figsize=(10,8))\n\tfig.set_tight_layout(True)\t\n\t\n\tax=fig.add_subplot(111)\n\t\n\tfig.subplots_adjust(top=.95, hspace=.1, left=0.10, right=.9, bottom=0.1)\n\n\tax.set_ylim(-0.5, 1.5)\n\tax.set_yticks([0, 1])\n\tax.set_yticklabels(['Face', 'House'], fontsize=24)\n\tplt.setp(ax.get_yticklabels(), rotation=90)\n\tax.set_ylabel(\"Stimulus\", fontsize=28, labelpad=8)\n\tax.set_xlim(-0.5, 2.5)\n\tax.set_xticks([0, 1, 2])\n\tax.set_xticklabels(['House', 'Neutral', 'Face'], fontsize=24)\n\tax.set_xlabel(\"Cue Type\", fontsize=28, labelpad=8)\n\tax_map=ax.imshow(corr_matrix, interpolation='nearest', cmap=pal, origin='lower', vmin=-0.40, vmax=0)\n\tplt.colorbar(ax_map, ax=ax, shrink=0.65)\n\t\n\tfor i, cond in enumerate(corr_matrix):\n\t\tx=0\n\t\tfor xval in cond:\n\t\t\tif -.35<xval<=-.30:\n\t\t\t\tax.text(x, i, \"r=\"+str(xval)[:5]+\"*\", ha='center', va='center', fontsize=29)\n\t\t\telif xval<-.35:\n\t\t\t\tax.text(x, i, \"r=\"+str(xval)[:5]+\"**\", ha='center', va='center', fontsize=29)\n\t\t\telse:\n\t\t\t\tax.text(x, i, \"r=\"+str(xval)[:5], ha='center', va='center', fontsize=22)\n\t\t\tx+=1\n\t\n\tplt.savefig('corr.png', format='png', dpi=600)\t\t"
},
{
"alpha_fraction": 0.5958257913589478,
"alphanum_fraction": 0.5996370315551758,
"avg_line_length": 33.873416900634766,
"blob_id": "8926229c93296365518b79020cb71cb01af54436",
"content_id": "b8562b8f6face318f674cddf940229e4904da7de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5510,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 158,
"path": "/defmod.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import division\nimport hddm\nimport pandas as pd\nimport numpy as np\nfrom patsy import dmatrix\nimport os\nfrom mydata.munge import find_path\n\npth=find_path()\nmpath=\"/home/kyle/Dropbox/PFH/iPFH/HDDM/Final/\"\n#data=pd.read_csv(pth+\"/beh_hddm/allsx_feat.csv\")\ndata=pd.read_csv(mpath+\"allsx_ewma.csv\")\n\ndef z_link_func(x, data=data):\n stim = (np.asarray(dmatrix('0 + C(s,[[1],[-1]])', {'s':data.stimulus.ix[x.index]})))\n return 1 / (1 + np.exp(-(x * stim)))\n\ndef v_link_func(x, data=data):\n stim = (np.asarray(dmatrix('0 + C(s,[[1],[-1]])', {'s':data.stimulus.ix[x.index]})))\n return x * stim\n\n\ndef define_model(mname, project='imaging', regress=False, info=False):\n\n\tcheck_model(mname)\n\n\tdata=find_data(mname, project)\n\n\tif project=='imaging':\n\t\tintercept=\"b50N\"\n\telse:\n\t\tintercept=\"c50N\"\n\n\tif regress:\n\t\tif mname=='msm':\n\t\t\tmsm_vreg = {'model': 'v ~ 1 + C(cue, Treatment('+intercept+'))', 'link_func': v_link_func}\n\t\t\tm=hddm.HDDMRegressor(data, msm_vreg, depends_on={'v':'stim', 'z':'cue'}, bias=True, informative=info, include=['v', 'z', 't', 'a'])\n\t\telif mname=='pbm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':'stim', 'z':'cue'}, informative=info, bias=True, include=['v', 'z', 't', 'a'])\n\t\telif mname=='dbm':\n\t\t\tdbm_vreg = {'model': 'v ~ 1 + C(cue, Treatment('+intercept+'))', 'link_func': v_link_func(data=data)}\n\t\t\tm = hddm.HDDMRegressor(data, dbm_vreg, depends_on={'v':'stim'}, bias=False, informative=info, include=['v', 't', 'a'])\n\t\telif mname=='dbmz':\n\t\t\tdbmz_vreg = {'model': 'v ~ 1 + C(cue, Treatment('+intercept+'))', 'link_func': v_link_func}\n\t\t\tm=hddm.HDDMRegressor(data, dbmz_vreg, depends_on={'v':'stim'}, bias=True, informative=info, include=['v', 'z', 't', 'a'])\n\n\telse:\n\t\tif mname=='msmt':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue'], 'z':'cue', 't':['stim', 'cue']}, bias=True, informative=info, include=['v', 'z', 't', 'a', 'sv', 'sz', 'st'])\n\t\telif mname=='msm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue'], 'z':'cue'}, bias=True, informative=info, include=['v', 'z', 't', 'a', 'sv', 'sz', 'st'])\n\t\telif mname=='pbm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':'stim', 'z':'cue'}, bias=True, informative=info, include=['v', 'z', 't', 'a', 'sv', 'sz', 'st'])\n\t\telif mname=='dbm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue']}, bias=False, informative=info, include=['v', 'z', 't', 'a', 'sv', 'sz', 'st'])\n\t\telif mname=='dbmz':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue']}, bias=True, informative=info, include=['v', 'z', 't', 'a', 'sv', 'sz', 'st'])\n\n\treturn m\n\ndef define_sxbayes(mname, data, project='imaging', regress=False):\n\n\tm=define_single(mname, data, project='imaging', regress=False)\n\treturn m\n\ndef define_single(mname, data, project='imaging', regress=False):\n\n\tcheck_model(mname)\n\n\tif project=='imaging':\n\t\tvreg = {'model': 'v ~ 1 + C(cue, Treatment(\"b50N\"))', 'link_func': v_link_func}\n\telse:\n\t\tvreg = {'model': 'v ~ 1 + C(cue, Treatment(\"c50N\"))', 'link_func': v_link_func}\n\n\tif regress:\n\t\tif mname=='msm':\n\t\t\tm=hddm.HDDMRegressor(data, vreg, depends_on={'v':'stim', 'z':'cue'}, bias=True, informative=False, include=['v', 'z', 't', 'a'])\n\t\telif mname=='pbm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':'stim', 'z':'cue'}, informative=False, bias=True, include=['v', 'z', 't', 'a'])\n\t\telif mname=='dbm':\n\t\t\tdbm = hddm.HDDMRegressor(data, vreg, depends_on={'v':'stim'}, bias=False, informative=False, include=['v', 't', 'a'])\n\t\telif mname=='dbmz':\n\t\t\tm=hddm.HDDMRegressor(data, vreg, depends_on={'v':'stim'}, bias=True, informative=False, include=['v', 'z', 't', 'a'])\n\n\telse:\n\t\tif mname=='msmt':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue'], 'z':'cue', 't':['stim', 'cue']}, informative=False, bias=True, include=['v', 'z', 't', 'a'])\n\t\telif mname=='msm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue'], 'z':'cue'}, informative=False, bias=True, include=['v', 'z', 't', 'a'])\n\t\telif mname=='pbm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':'stim', 'z':'cue'}, informative=False, bias=True, include=['v', 'z', 't', 'a'])\n\t\telif mname=='dbm':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue']}, informative=False, bias=False, include=['v', 't', 'a'])\n\t\telif mname=='dbmz':\n\t\t\tm=hddm.HDDM(data, depends_on={'v':['stim', 'cue']}, informative=False, bias=True, include=['v', 'z', 't', 'a'])\n\n\treturn m\n\ndef build_model(mname, project='imaging'):\n\n\tm=define_model(mname, project)\n\n\tm=load_traces(m, mname, project)\n\n\treturn m\n\ndef check_model(mname):\n\n\tmname_list=['msmt', 'msm', 'pbm', 'dbm', 'dbmz']\n\n\tif mname not in mname_list:\n\t\tprint \"mname not recognized: must be 'msmt', 'msm', 'pbm', or 'dbm'\"\n\t\texit()\n\telse:\n\t\tprint \"building \", mname\n\ndef find_data(mname, project='imaging'):\n\n\tpth=find_path()\n\n\tif project=='behav':\n\t\tdata=pd.read_csv(pth+\"beh_hddm/allsx_feat.csv\")\n\n\telse:\n\t\t#data=pd.read_csv(pth+\"img_hddm/allsx_ewma.csv\")\n mpath=\"/home/kyle/Dropbox/PFH/iPFH/HDDM/Final/\"\n data=pd.read_csv(mpath+\"allsx_ewma.csv\")\n\n\n\treturn data\n\ndef load_traces(m, mname, project='imaging'):\n\n\tpth=find_path()\n\n\tif project=='behav':\n\t\tm.load_db(pth+\"beh_hddm/\"+mname+\"/\"+mname+\"_traces.db\", db='pickle')\n\n\telse:\n\t\t#m.load_db(pth+\"img_hddm/\"+mname+\"/\"+mname+\"_traces.db\", db='pickle')\n mpath=pth+\"PFH/iPFH/HDDM/Final/\"\n m.load_db(mpath+mname+'/'+mname+\"_traces.db\", db='pickle')\n\n\n\treturn m\n\n\ndef build_avgm(m, project='imaging'):\n\n\tavgm=m.get_average_mname()\n\tavgm=load_traces(avgm, project)\n\n\treturn avgm\n\nif __name__==\"__main__\":\n\tmain()\n"
},
{
"alpha_fraction": 0.6540540456771851,
"alphanum_fraction": 0.6766892075538635,
"avg_line_length": 27.447114944458008,
"blob_id": "fb2c85fa521a2ecaa517224d892925c8be2e59f4",
"content_id": "145497af8d33ff92c13a33cb1876d2d5e8e4f130",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5920,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 208,
"path": "/sims.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import division\nimport hddm\nimport pandas as pd\nimport numpy as np\n\ndef sim_exp(pdict, ntrials=500, p_outlier=None, pfast=0, pslow=0, nsims_per_sub=1):\n\t\"\"\"\n\tSimulates a dataset for each subject(i), for every condition(j) in pdict of \n\tsize n=ntrials*exp_proportion. <-- Makes simulated trial count for each condition\n\tproportional to the number of empirical observations (unbalanced face/house obs across cues).\n\tAlso, does light parsing and reformatting on output dataframe\n\n\tRETURNS: 2\n\n\t\t*sim_df (pandas DataFrame): columns for condition name, sub id, cue, stim, \n\t\t\t \t\t\t\t\t\tresponse (1: upperbound, 0: lowerbound), \n\t\t\t\t\t\t\t\t\taccuracy (1: cor, 0: err), and response time\n\n\t\t*param_dict (dict):\t\t\tdataframe of parameters used \n\t\t\t\t\t\t\t\t\t(only needed if noise/outliers \n\t\t\t\t\t\t\t\t\tsimulated)\n\n\t\"\"\"\n\tparam_dict=dict()\n\tfor i, sub in enumerate(pdict):\n\t\tfor num, cond in enumerate(pdict[sub].keys()):\n\t\t\t#check if cue predicts image (i.e. 90F --> face)\n\t\t\tif cond.split('_')[0][-1].lower()==cond.split('_')[1][0].lower():\n\t\t\t\t#make total trials for this simulated condition\n\t\t\t\t#proportional to number of experimental\n\t\t\t\t#trials in this condition\n\t\t\t\tperc=int(cond[1:3])*.01\n\t\t\telse:\n\t\t\t\tperc=1-(int(cond[1:3])*.01)\n\t\t\texptrials=perc*ntrials\n\t\t\n\t\t\tif 'p' in pdict[sub][cond].keys():\n\t\t\t\tpfast=pdict[sub][cond]['p']\n\t\t\t\tpslow=pdict[sub][cond]['p']\n\t\t\n\t\t\tnfast=int((exptrials/2)*pfast)\n\t\t\tnslow=int((exptrials/2)*pslow)\n\n\t\t\tdata, parameters = hddm.generate.gen_rand_data(params={cond:pdict[sub][cond]}, subjs=nsims_per_sub, \n\t\t\t\tn_fast_outliers=nfast, n_slow_outliers=nslow, size=exptrials)\n\t\t\tdata.subj_idx[:]=sub\n\n\t\t\tif i==0 and num==0:\n\t\t\t\tsimdf=data\n\t\t\telse:\n\t\t\t\tsimdf=pd.concat([simdf, data], ignore_index=True)\n\t\t\tparam_dict[i]=parameters\t\n\t\n\tsimdf=ref_simdf(simdf)\n\n\treturn simdf, param_dict\n\ndef sim_and_concat(params, nsims=25, ntrials=100):\n\t\n\tsimdf_list=[]\n\t\n\tfor i in range(nsims):\t\n\t\tsimdf, params_used=sim_exp(pdict=params, ntrials=ntrials)\n\t\tsimdf['sim_num']=simdf['subj_idx'].copy()\n\t\tsimdf.sim_num[:]=i\n\t\tsimdf_list.append(simdf)\n\t\n\tall_simdfs=pd.concat(simdf_list)\n\t\n\treturn all_simdfs\n\ndef sim_exp_subj(pdict, ntrials=500, p_outlier=None, pfast=0, pslow=0, nsims_per_sub=1):\n\t\"\"\"\n\tSimulates a dataset for a single subject, for every condition in pdict of \n\tsize n=ntrials*exp_proportion. <-- Makes simulated trial count for each condition\n\tproportional to the number of empirical observations (unbalanced face/house obs across cues).\n\t\n\tAlso, does light parsing and reformatting on output dataframe. \n\n\tRETURNS: 2\n\n\t\t*sim_df (pandas DataFrame): columns for condition name, sub id, cue, stim, \n\t\t\t \t\t\t\t\t\tresponse (1: upperbound, 0: lowerbound), \n\t\t\t\t\t\t\t\t\taccuracy (1: cor, 0: err), and response time\n\n\t\t*param_dict (dict):\t\t\tdataframe of parameters used \n\t\t\t\t\t\t\t\t\t(only needed if noise/outliers \n\t\t\t\t\t\t\t\t\tsimulated)\n\n\t\"\"\"\n\tparam_dict=dict()\n\tfor i, cond in enumerate(pdict.keys()):\n\n\t\t#check if cue predicts image (i.e. 90F --> face)\n\t\tif cond.split('_')[0][-1].lower()==cond.split('_')[1][0].lower():\n\t\t\t#make total trials for this simulated condition\n\t\t\t#proportional to number of experimental\n\t\t\t#trials in this condition\n\t\t\tperc=int(cond[1:3])*.01\n\t\telse:\n\t\t\tperc=1-(int(cond[1:3])*.01)\n\t\texptrials=perc*ntrials\n\n\t\tif 'p' in pdict[cond].keys():\n\t\t\tpfast=pdict[cond]['p']\n\t\t\tpslow=pdict[cond]['p']\n\n\t\tnfast=int((exptrials/2)*pfast)\n\t\tnslow=int((exptrials/2)*pslow)\n\n\t\tdata, parameters = hddm.generate.gen_rand_data(params={cond:pdict[cond]}, subjs=nsims_per_sub, \n\t\t\tn_fast_outliers=nfast, n_slow_outliers=nslow, size=exptrials)\n\n\n\t\tif i==0:\n\t\t\tsimdf=data\n\t\telse:\n\t\t\tsimdf=pd.concat([simdf, data], ignore_index=True)\n\t\tparam_dict[i]=parameters\t\n\n\tsimdf=ref_simdf(simdf)\n\n\treturn simdf, param_dict\n\n\n\ndef sim_subs(pdict, ntrials=500, p_outlier=None, pfast=0, pslow=0, nsims_per_sub=1):\n\t\n\tparam_dict=dict()\n\tnfast=int((ntrials)*pfast)\n\tnslow=int((ntrials)*pslow)\n\tfor i, x in enumerate(pdict):\n\n\t\tdata, parameters = hddm.generate.gen_rand_data(params=pdict[x], subjs=nsims_per_sub, \n\t\t\t\t\t\tn_fast_outliers=nfast, n_slow_outliers=nslow, size=ntrials)\n\t\tdata.subj_idx[:]=x\n\n\t\tif i==0:\n\t\t\tsimdf=data\n\t\telse:\n\t\t\tsimdf=pd.concat([simdf, data], ignore_index=True)\n\t\tparam_dict[i]=parameters\t\n\n\tsimdf=ref_simdf(simdf)\n\t\n\treturn simdf, param_dict\n\ndef sim_noise_sep(pdict, ntrials=100, nsims=10, simfx=sim_exp, pfast=0, pslow=0, nsims_per_sub=1):\n\tfor i in range(nsims):\n\t\tp68=pdict[0]\n\t\tp69=pdict[1]\n\t\tsimdf68, params_used=simfx(pdict=p68, ntrials=ntrials, pfast=pfast, pslow=pslow, nsims_per_sub=nsims_per_sub)\n\t\tsimdf69, params_used=simfx(pdict=p69, ntrials=ntrials, pfast=pfast, pslow=pslow, nsims_per_sub=nsims_per_sub)\n\t\t\n\t\tsimdf68['noise']=['68']*len(simdf68)\n\t\tsimdf69['noise']=['69']*len(simdf69)\n\t\t\n\t\tif i==0:\n\t\t\tsimdf=pd.concat([simdf68, simdf69], ignore_index=True)\n\t\telse:\n\t\t\tsimdf=pd.concat([simdf, simdf68], ignore_index=True)\n\t\t\tsimdf=pd.concat([simdf, simdf69], ignore_index=True)\n\t\n\treturn simdf\n\n\ndef sim_grp(pdict, ntrials=5000, pfast=0.00, pslow=0.00, nsims_per_sub=25, subj_noise=0.1):\n\n\tparam_dict=dict()\n\tnfast=int((ntrials)*pfast)\n\tnslow=int((ntrials)*pslow)\n\n\tsimdf, parameters = hddm.generate.gen_rand_data(params=pdict, subjs=nsims_per_sub, \n\t\t\t\t\tn_fast_outliers=nfast, n_slow_outliers=nslow, size=ntrials, subj_noise=subj_noise)\n\n\tsimdf=ref_simdf(simdf)\n\n\treturn simdf, parameters\n\n\ndef ref_simdf(simdf):\n\t#add separate cols for \n\t#stim and cue names\n\tsim_cue=list()\n\tsim_img=list()\n\tfor cond in simdf['condition']:\n\t\tif '_' in cond:\n\t\t\timg=cond.split('_')[1]\n\t\t\tcue=cond.split('_')[0]\n\t\tsim_cue.append(cue)\n\t\tsim_img.append(img)\n\tsimdf['stim']=sim_img\n\tsimdf['cue']=sim_cue\n\n\t#add accuracy column to simdf\n\tsimdf['acc']=simdf['response'].values\n\tsimdf.ix[(simdf['stim']=='house') & (simdf['response']==0), 'acc']=1\n\tsimdf.ix[(simdf['stim']=='house') & (simdf['response']==1), 'acc']=0\n\n\n\treturn simdf\n\n\n\nif __name__ == \"__main__\":\n\tmain()\t\n\n\n"
},
{
"alpha_fraction": 0.6030479073524475,
"alphanum_fraction": 0.6211901307106018,
"avg_line_length": 27,
"blob_id": "14e6461b4158566f53d9aaa1b6568b07611d7aa9",
"content_id": "19a292ea22b3405f94b7ba47c116df6ab72c78b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1378,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 49,
"path": "/outliers.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n#TODO: Now apply subj-estimated EWMA cutoffs to eliminate fast outliers\nfrom __future__ import division\nimport pandas as pd\nimport numpy as np\n\ndef cutoff_sd(data, sd=2.0):\n\t\"\"\"\n\t\"Removes trials where RT is higher than 2SD above the mean for that subject/cue\"\n\t\n\t::Arguments::\n\t\t\n\t\tdata (pandas df)\t\tpandas df containing typical HDDM \n\t\t\t\t\t\t\t\tinput format for hierarchical model\n\t\t\t\t\t\t\t\tNOTE: data input needs accuracy column\n\t\t\n\t\tsd (float)\t\t\t\tstdev cutoff value, default is 2.0 (keep ~95%)\n\t\t \t\t\t\t\t\tNOTE: Ratcliff advises stdev cutoff at 1, 1.5, or 2\n\t\t\t\t\t\t\t\t\n\t::Returns::\n\t\n\t\tcleandf (pandas df)\t\tpandas df with slow outlier removed removed\n\t\n\t\"\"\"\n\t\n\tgrpdf=data.groupby(['subj_idx', 'cue', 'stim', 'acc'])\n\t\n\t#counter to make sure that \n\t#cleandf is only initialized once, \n\t#for sub1's first cue\n\ti=1\n\t\n\tfor x, rest in grpdf:\n\t\tcutoff=rest['rt'].std()*sd + (rest['rt'].mean())\n\t\t\n\t\tif x[0]==1 and i==1:\n\t\t\tcleandf=data.ix[ (data['subj_idx']==x[0]) & (data['cue']==x[1]) & (data['stim']==x[2]) & (data['acc']==x[3]) & ( data['rt']<cutoff) ]\n\t\t\n\t\telse:\n\t\t\t#just call it something else\n\t\t\t#and append it to cleandf\n\t\t\tothersubs=data.ix[ (data['subj_idx']==x[0]) & (data['cue']==x[1]) & (data['stim']==x[2]) & (data['acc']==x[3]) & ( data['rt']<cutoff) ]\n\t\t\t\n\t\t\tcleandf=pd.concat([cleandf, othersubs], ignore_index=True)\n\t\t\n\t\ti+=1\n\t\n\treturn cleandf\n\t\n\t\n\t\n"
},
{
"alpha_fraction": 0.6117693185806274,
"alphanum_fraction": 0.6311678290367126,
"avg_line_length": 26.23404312133789,
"blob_id": "299cff0ca455aaa91412ee7c452dcb7a5d3ebe91",
"content_id": "138e60bf33602de8f10b54042b282707349b552a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7681,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 282,
"path": "/sxbayes.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import division\nimport hddm, os\nimport numpy as np\nimport pandas as pd\nfrom myhddm import defmod, parse, vis\nfrom mydata.munge import find_path\nfrom patsy import dmatrix\n\n#data=pd.read_csv(\"/Users/kyle/Desktop/beh_hddm/allsx_feat.csv\")\n\n\ndef z_link_func(x, data):\n stim = (np.asarray(dmatrix('0 + C(s,[[1],[-1]])', {'s':data.stimulus.ix[x.index]})))\n return 1 / (1 + np.exp(-(x * stim)))\n\ndef v_link_func(x, data):\n stim = (np.asarray(dmatrix('0 + C(s,[[1],[-1]])', {'s':data.stimulus.ix[x.index]})))\n return x * stim\n\ndef run_models(mname, project, regress=False):\n\t\n\t#bayes fit all subject\n\tallsx_df=fit_sx(mname, project=project, regress=regress)\n\t\n\t#parse model output\n\tsubdf=parse_allsx(allsx_df)\n\tpdict=subdf_to_pdict(subdf)\n\t\n\t#simulate and compare with observed data\n\tdata=defmod.find_data(mname, project=project)\n\tsimdf=vis.predict(pdict, data, ntrials=160, nsims=100, save=True, RTname=\"SimRT_EvT.jpeg\", ACCname=\"SimACC_EvT.jpeg\")\n\tsimdf.to_csv(\"simdf_sxbayes.csv\")\n\t\n\t#save pdict; can be reloaded and transformed back into\n\t#the original pdict format by the following commands \n\t#1. pdict=pd.read_csv(\"sxbayes_pdict.csv\")\n\t#2. pdict=pdict.to_dict()\n\tparams=pd.DataFrame(pdict)\n\tparams.to_csv(\"sxbayes_pdict.csv\", index=False)\n\ndef aic(model):\n\tk = len(model.get_stochastics())\n\tlogp = sum([x.logp for x in model.get_observeds()['node']])\n\treturn 2 * k - 2 * logp\n\ndef bic(model):\n\tk = len(model.get_stochastics())\n\tn = len(model.data)\n\tlogp = sum([x.logp for x in model.get_observeds()['node']])\n\treturn -2 * logp + k * np.log(n)\n\ndef dic(model):\n\treturn model.dic\n\t\ndef fit_sx(mname, project='behav', regress=False):\n\t\n\tpth=find_path()\n\n\tdata=defmod.find_data(mname, project)\n\t\n\tgrp_dict={}; subj_params=[]; aic_list=[]; bic_list=[]; dic_list=[]; ic_dict={}\n\n\tfor subj_idx, subj_data in data.groupby('subj_idx'):\n\n\t\tm_sx=defmod.define_sxbayes(mname, subj_data, project=project, regress=regress)\n\t\tm_sx.sample(1000, burn=500, dbname=str(subj_idx)+\"_\"+mname+'_traces.db', db='pickle')\n\t\t\n\t\tsx_df=parse.stats_df(m_sx)\n\t\tsx_df=sx_df.drop(\"sub\", axis=1)\n\t\tsx_df['sub']=[subj_idx]*len(sx_df)\n\t\t\n\t\tsubj_params.append(sx_df)\n\t\taic_list.append(aic(m_sx)); bic_list.append(bic(m_sx)); dic_list.append(m_sx.dic)\n\n\tallsx_df=pd.concat(subj_params)\n\tallsx_df.to_csv(mname+\"_SxStats.csv\", index=False)\n\t\n\tic_dict={'aic':aic_list, 'bic':bic_list, 'dic':dic_list}\n\tic_df=pd.DataFrame(ic_dict)\n\tic_df.to_csv(mname+\"_IC_Rank.csv\")\n\t\n\treturn allsx_df\n\ndef parse_allsx(allsx_df):\n\t\n\tstims=[]; stim_list=['face', 'house']\n\tcues=[]; cue_list=['a90H', 'b70H', 'c50N', 'd70F', 'e90F']\n\tparams=[]\n\n\tfor p in allsx_df['param']:\n\t\tif \")\" in list(p):\n\t\t\tparams.append(p[0])\n\t\t\tcond=p[2:-1]\n\t\t\tif cond in stim_list:\n\t\t\t\tstims.append(cond)\n\t\t\telse: \n\t\t\t\tstims.append(\"constant\")\n\t\t\tif cond in cue_list:\n\t\t\t\tcues.append(cond)\n\t\t\telse:\n\t\t\t\tcues.append(\"constant\")\n\t\telse:\n\t\t\tparams.append(p)\n\t\t\tstims.append(\"constant\")\n\t\t\tcues.append(\"constant\")\n\t\t\n\tallsx_df['stim']=stims\n\tallsx_df['cue']=cues\n\tallsx_df['parameter']=params\n\tallsx_df['noise']=['constant']*len(allsx_df)\n\t\n\tsubdf=allsx_df[['sub', 'param', 'mean', 'parameter', 'cue', 'stim', 'noise']]\n\t\n\tsubdf=parse.txtparse(subdf, 'sub')\n\tsubdf.index=range(len(subdf))\t\n\t\n\treturn subdf\n\n\ndef subdf_to_pdict(mname, subdf):\n\t\n\tcond_list=['a90H_face', 'b70H_face', 'c50N_face', 'd70F_face', 'e90F_face', 'a90H_house', 'b70H_house', 'c50N_house', 'd70F_house', 'e90F_house']\n\t\n\tallsx={}\n\tconditions={}\n\tparams={}\n\t\n\tfor sx, sxdata in subdf.groupby('sub'):\n\t\tconditions={}\n\t\tfor cond in cond_list:\n\t\t\tcond_cue=cond.split(\"_\")[0]\n\t\t\tcond_stim=cond.split(\"_\")[1]\n\t\t\tconditions[cond]={'a':sxdata.ix[sxdata['parameter']=='a', 'mean'].unique()[0], \n\t\t\t\t't':sxdata.ix[sxdata['parameter']=='t', 'mean'].unique()[0], \n\t\t\t\t'v':sxdata.ix[(sxdata['parameter']=='v')&(sxdata['stim']==cond_stim), 'mean'].unique()[0], \n\t\t\t\t'z':sxdata.ix[(sxdata['parameter']=='z')&(sxdata['cue']==cond_cue), 'mean'].unique()[0]}\n\n\t\t\tfor i in sxdata.param:\n\t\t\t\n\t\t\t\tif \"_\" not in list(i) or i.split(\"_\")[1][0]=='I':\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tif i.split(\"_\")[1][0]=='C':\n\t\t\t\t\tcue=i.split(']')[0][-4:]\n\t\t\t\tif cue==cond_cue:\n\t\t\t\t\tconditions[cond]['v']=conditions[cond]['v']+sxdata.ix[sxdata['param']==i, 'mean'].unique()[0]\n\t\n\t\tallsx={str(sx):conditions}\n\t\n\treturn allsx\n\ndef simform(subdf, sc=None):\n\t\"\"\"\n\tRETURNS: 1\n\n\t\t*condsdf (pandas DataFrame):\tone column for each experimental cue\n\t\t\t\t\t\t\t\t\t\tcolumns for sub_id and params as well \n\t\t\t\t\t\t\t\t\t\t(is used to make pdict (which is used for simulating)\t\t\t\t\t\t\t\n\n\t\"\"\"\n\t\n\tgroupdf=False\n\n\tnparams=len(subdf.parameter.unique())\n\n\tnsubs=len(subdf['sub'].unique())\n\tnrows=nsubs*nparams\n\n\tif nrows==nparams:\n\t\tgroupdf=True\n\n\tif len(subdf.cue.unique())<5:\n\t\tcondsdf=pd.DataFrame(np.zeros(nrows*6).reshape((nrows, 6)), columns=['a80H_face', 'b50N_face', 'c80F_face', \n\t\t\t'a80H_house', 'b50N_house', 'c80F_house'])\n\telse:\n\t\tcondsdf=pd.DataFrame(np.zeros(nrows*10).reshape((nrows, 10)), columns=['a90H_face', 'b70H_face', 'c50N_face', \n\t\t\t'd70F_face', 'e90F_face', 'a90H_house', 'b70H_house', 'c50N_house', 'd70F_house', 'e90F_house'])\n\n\tcounter=1\n\tfor cond in condsdf.columns:\n\t\tcue_n=cond.split('_')[0]\n\t\timg_n=cond.split('_')[1]\n\n\t\tif counter==1:\n\t\t\tcdf=subdf.ix[subdf['stim'].isin([img_n, 'constant']) & subdf['cue'].isin([cue_n, 'constant']), ['sub', 'parameter', 'mean']]\n\t\t\tcdf.index=range(len(cdf))\n\t\t\tif not groupdf:\n\t\t\t\tcondsdf['sub']=cdf['sub'].values\n\t\t\tcondsdf['param']=cdf['parameter'].values\n\t\telse:\n\t\t\tcdf=subdf.ix[subdf['stim'].isin([img_n, 'constant']) & subdf['cue'].isin([cue_n, 'constant']), ['mean']]\n\t\t\tcdf.index=range(len(cdf))\n\t\tcondsdf[cond]=cdf['mean'].values\n\n\t\tcounter+=1\n\n\tif sc is not None:\n\t\tfor i in condsdf.columns:\n\t\t\tif '_' in i: \n\t\t\t\tisplit=i.split('_')\n\t\t\t\tif 'face' in isplit and sc=='v':\n\t\t\t\t\tcondsdf.ix[(condsdf['param']==sc), i]=abs(condsdf.ix[(condsdf['param']==sc), i])\n\t\t\t\telif 'face' in isplit and sc=='z':\n\t\t\t\t\tcondsdf.ix[(condsdf['param']==sc), i]=1-condsdf.ix[(condsdf['param']==sc), i]\n\n\treturn condsdf\n\ndef create_pdict(condsdf, grp_dict=None):\n\t\"\"\"\n\tArguments: condsdf (pandas dataframe)\n\n\tReturns: \n\t\t*pdict (dict):\t\tdict for all subs with parameter names and values\n\t\t\t\t\t\t\testimated for each exp. cue included in the \n\t\t\t\t\t\t\toriginal model.\n\n\t\t \t\t\t\t\tis used to loop through when simulating \n\t\t\t\t\t\t\tdata with hddm.generate.gen_rand_data()\n\n\t\t\t\t\t\t\tstructure: \n\n\t\t\t\t\t\t\t\t{subID{cond{param : param_value}}}\n\t\"\"\"\n\tadd_z=False\n\n\tif 'z' not in condsdf.param.unique():\n\t\tadd_z=True\n\n\tcondsdf.index=condsdf.param\n\n\tpdict=dict()\n\tfor subj, group in condsdf.groupby('sub'):\n\t sdict=dict()\n\t for cond in group:\n\t\t\tif cond == 'sub':\n\t\t\t\tcontinue\n\t\t\telif cond == 'param':\n\t\t\t\tcontinue\n\t\t\tsdict[cond]=dict(group[cond])\n\n\t pdict[subj]=sdict\n\n\tif hasattr(grp_dict, \"keys\"):\n\t\tfor sub in pdict:\n\t\t\tfor cond in pdict[sub]:\n\t\t\t\tpdict[sub][cond]['sv']=grp_dict['sv']\n\t\t\t\tpdict[sub][cond]['st']=grp_dict['st']\n\t\t\t\tpdict[sub][cond]['sz']=grp_dict['sz']\n\t\t\t\tif add_z:\n\t\t\t\t\tpdict[sub][cond]['z']=0.5\n\treturn pdict\n\n\n\t\t\t\t\ndef get_sxmodel_dic():\t\t\t\t\n\t\n\t#models=['v', 'z', 'vz']\n\tmodels=['msm', 'dbm', 'dbmz', 'pbm']\n\tdic_list=[]\n\tdic_dict={}\n\t#skip_sx=[0, 1, 2, 3, 4, 5, 6, 7, 9, 12, 14, 25, 28]\n\tskip_sx=[0, 5, 19]\n\tglobalp=\"/Users/DunovanK/Desktop/beh_hddm/EWMA5/subj_bayes/MCMC10K_NoVar/\"\n\t#pth=find_path()\n\t#globalp=pth+\"img_hddm/subj_bayes/\"\n\tfor m in models:\n\t\tdic_list=[]\n\t\tos.chdir(globalp+m)\n\t\tfor sx in range(26):\n\n\t\t\tif sx in skip_sx:\n\t\t\t\tcontinue\n\n\t\t\tsubj=pd.read_table(str(sx)+\"_params.txt\", delim_whitespace=True, header=0, index_col=0)['mean']\n\t\t\tdic_list.append(subj.ix['DIC:'])\n\n\t\tdic_dict[m]=dic_list\n\n\tmdic_df=pd.DataFrame(dic_dict)\n\treturn mdic_df\t\t"
},
{
"alpha_fraction": 0.7175792455673218,
"alphanum_fraction": 0.729106605052948,
"avg_line_length": 25.461538314819336,
"blob_id": "85033072af13cffa505d9cc9f13cf3dbdd01d84f",
"content_id": "5302aa53467fd0e4aee307116e52603952134ce5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 13,
"path": "/test.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import division\nimport pandas as pd\nimport hddm\nfrom myhddm import sdt, defmod, vis, sims\n\ndef test_dfs():\n\t\n\tdf=pd.read_csv(\"/usr/local/lib/python2.7/site-packages/myhddm/test_dfs/allsx_ewma.csv\")\n\tsimdf=pd.read_csv(\"/usr/local/lib/python2.7/site-packages/myhddm/test_dfs/simdf.csv\")\n\t\n\treturn df, simdf\n\t\n\t"
},
{
"alpha_fraction": 0.5943337678909302,
"alphanum_fraction": 0.6332502961158752,
"avg_line_length": 35.91954040527344,
"blob_id": "386eeb41b1f7704c16b3258b11a4af65c336614d",
"content_id": "2ad918ba3429b35364f99568017cf67b148b7406",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3212,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 87,
"path": "/pfh_diffusion_traces.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "\ndef plotPFH(df, pGo=0.5, ssd=.300, timebound=0.653, task='ssRe', normp=False):\n\n\tplt.ion()\n\tsns.set_style('white')\n\ta=list(pd.Series(df['tparams']))[0]['a']\n\tz=list(pd.Series(df['tparams']))[0]['z']\n\tt=list(pd.Series(df['tparams']))[0]['t']\n\tlb=0\n\tsns.set_palette('Set1')\n\tif normp:\n\t\ta_orig=a; a=a-z; lb=-z; z=0\n\n\tss_tsteps=list(pd.Series(df['ss_tsteps']))\n\tss_paths=list(pd.Series(df['ss_paths']))\n\t\n\tgo_tsteps=list(pd.Series(df['go_tsteps']))\n\tgo_paths=list(pd.Series(df['go_paths']))\n\tchoices=list(pd.Series(df['choice']))\n\t\n\tf = plt.figure(figsize=(8,6))\n\tax = f.add_subplot(111)\n\tsns.set_style('white')\n\t\n\ttr=t\n\txmax=timebound+.05\n\txmin=t-.01\n\txlim=ax.set_xlim([xmin, xmax])\n\tylim=ax.set_ylim([lb, a])\n\t\n\tplt.hlines(y=a, xmin=xlim[0], xmax=xlim[1], lw=4, color='k')\n\tplt.hlines(y=lb, xmin=xlim[0], xmax=xlim[1], lw=4, color='k')\n\tplt.hlines(y=z, xmin=xlim[0], xmax=xlim[1], color='#545454', lw=3, linestyle='-')\n\tplt.vlines(x=xlim[0], ymin=lb, ymax=a, lw=4, color='k')\t\n\t\n\tsns.despine(fig=f, ax=ax,top=True, bottom=True, left=False, right=False)\n\tsns.set_style('white')\n\t\n\tfor i, (sst, ssp) in enumerate(zip(ss_tsteps, ss_paths)):\n\t\tif len(sst)<=1:\n\t\t\tcontinue\n\t\tax.plot(sst, ssp, color='#C0392B', alpha=.3, lw=.5, rasterized=True)\n\t\n\tfor t,p in zip(go_tsteps, go_paths):\n\t\tax.plot(t, p, color='#3A539B', alpha=.3, lw=.5, rasterized=True)\n\n\tdivider = make_axes_locatable(ax)\n\tlo = divider.append_axes(\"bottom\", size=1, pad=0, xlim=[xmin, xmax]) #xlim=[0, xmax],\n\thi = divider.append_axes(\"top\", size=1, pad=0, xlim=[xmin, xmax]) #xlim=[0, xmax],\n\tsns.set_style('white')\n\n\ttry:\t\t\n\t\tgort=df.ix[df['choice']=='go', 'rt'].mean()\n\t\tgo_tarrays=np.array([gts[-1] for gts in go_paths])\n\t\tgo_idx = (np.abs(go_tarrays - gort)).argmin()\n\t\tax.plot(go_paths.iloc[go_idx], go_paths.iloc[go_idx], color=\"Blue\", linewidth=1.95)\n\texcept Exception:\n\t\tpass\n\t\n\tsns.set_style('white')\n\tsns.distplot(df[df['choice']=='go']['rt'].values, 45, kde=False, ax=hi, \n\t\tkde_kws={\"color\": 'Blue', \"shade\":True, \"lw\": 2, \"bw\":.3, \"alpha\":.5},\n\t\thist_kws={\"color\": '#3A539B', \"alpha\":.9});\n\n\tsns.distplot(df[df['choice']=='stop']['rt'].values, 45, kde=False, ax=lo, \n\t\tkde_kws={\"color\": 'Red', \"shade\":True, \"lw\": 2, \"bw\":.3, \"alpha\":.5},\n\t\thist_kws={\"color\": '#C0392B', \"alpha\":.9});\n\t\n\tsns.set_style('white')\n\thi.set_xticklabels([]); hi.set_yticklabels([]); lo.set_yticklabels([]); lo.set_xticklabels([])\n\tax.set_xticklabels([]); ax.set_yticklabels([]); f.subplots_adjust(hspace=0.00001); lo.invert_yaxis();\n\tsns.set_style('white')\n\tsns.despine(fig=f, ax=lo, top=True, bottom=True, left=True, right=True)\n\tsns.despine(fig=f, ax=hi, top=True, bottom=True, left=True, right=True)\n\tsns.set_style('white')\n\t\n\tfor a in f.axes:\n\t\ta.set_aspect(\"auto\")\n\t\tlo.set_aspect(\"auto\")\n\t\thi.set_aspect(\"auto\")\n\t#pth=utils.find_path()\n\t#f.savefig(pth+str(int(ssd*1000))+\".svg\", rasterized=True, dpi=600)\n\t#f.savefig(\"/Users/kyle/Dropbox/\"+str(int(ssd*1000))+\".png\", dpi=900)\n\t#f.savefig(\"/Users/kyle/Dropbox/\"+str(int(ssd*1000))+\".eps\", dpi=900)\n\tf.savefig(\"/Users/kyle/Dropbox/\"+str(int(ssd*1000))+\".svg\", rasterized=True, dpi=900)\n\tf.savefig(\"/Users/kyle/Dropbox/\"+str(int(ssd*1000))+\".pdf\", rasterized=True, dpi=900)\n\tsns.set_style('white')\n\treturn f"
},
{
"alpha_fraction": 0.6611751914024353,
"alphanum_fraction": 0.6688632369041443,
"avg_line_length": 27.904762268066406,
"blob_id": "f2f966728b1ed3b5233e1d8dcebfc2706f715a48",
"content_id": "d75fdcb6495617f1ba063a22efe1e32cf14f1bb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1821,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 63,
"path": "/mle.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom __future__ import division\nimport hddm\nfrom myhddm import defmod, vis, opt\nimport pandas as pd\nimport numpy as np\n\ndef aic(model):\n k = len(model.get_stochastics())\n logp = sum([x.logp for x in model.get_observeds()['node']])\n return 2 * k - logp\n\ndef bic(model):\n k = len(model.get_stochastics())\n n = len(model.data)\n logp = sum([x.logp for x in model.get_observeds()['node']])\n return -2 * logp + k * np.log(n)\n\ndef dic(model):\n\treturn model.dic\n\ndef optimize_sx(mname, project='imaging'):\n\n\tm=defmod.define_model(mname, project=project)\n\tdata=m.data\n\tif 'z' in m.depends_on.keys():\n\t\tbias=True\n\telse:\n\t\tbias=False\n\n\tgrp_dict={}; subj_params=[]; aic_list=[]; bic_list=[]; dic_list=[]; ic_dict={}\n\n\tfor subj_idx, subj_data in data.groupby('subj_idx'):\n\n\t\tm_subj=hddm.HDDM(subj_data, depends_on=m.depends_on, bias=bias, include=m.include)\n\n\t\tsx_params=m_subj.optimize('ML')\n\n\t\tpdict=opt.get_pdict(sx_params)\n\t\tsubj_params.append(sx_params)\n\t\taic_list.append(aic(m_subj));\n bic_list.append(bic(m_subj)); #dic_list.append(m_subj.dic)\n\n\t\tgrp_dict[subj_idx]=pdict\n\n\tic_dict={'aic':aic_list, 'bic':bic_list}\n\tic_df=pd.DataFrame(ic_dict)\n\tic_df.to_csv(mname+\"_IC_Rank.csv\")\n\t#write grp_dict to .txt file for reloading later\n\tf=open('mle_params.txt', 'w')\n\tf.write('grp_dict=' + repr(grp_dict) + '\\n')\n\tf.close()\n\n\tparams = pd.DataFrame(subj_params)\n\tsimdf=vis.predict(grp_dict, data, ntrials=160, nsims=100, save=True, RTname=mname+\"_rt.png\", ACCname=mname+\"_acc.png\")\n\t#simdf=vis.predict(grp_dict, df, ntrials=160, nsims=100, save=True, RTname=\"SimRT_EvT.jpeg\", ACCname=\"SimACC_EvT.jpeg\")\n\tsimdf.to_csv(\"simdf_opt.csv\")\n\tparams.to_csv(\"subj_params_opt.csv\", index=False)\n\tsdt.plot_rho_sdt(data, simdf)\n\tempvsim=sdt.rho_sdt(data, simdf)\n\n\treturn grp_dict, ic_df\n"
},
{
"alpha_fraction": 0.60813307762146,
"alphanum_fraction": 0.6404805779457092,
"avg_line_length": 21.372413635253906,
"blob_id": "7e01d9cce95d6dbf1933672159608edcedcb5faf",
"content_id": "2d7be9eb3cfb7eb30b3d84accd84e4417a6d566e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3246,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 145,
"path": "/opt.py",
"repo_name": "dunovank/myhddm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\"\nIncludes functions for parsing parameter output from HDDM.optimize() routines\n\n\"\"\"\n\n\nfrom __future__ import division\nimport hddm\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\ndef get_pdict(params):\n\n\tflatdf=get_flatdf(params)\n\t\n\tif len(flatdf.noise.unique())>1:\n\t\tpdict=[]\n\t\tflat68=(flatdf[flatdf['noise'].isin(['68', 'constant'])])\n\t\tflat69=(flatdf[flatdf['noise'].isin(['69', 'constant'])])\n\t\tc68=flat_simform(flat68)\n\t\tc69=flat_simform(flat69)\n\t\tpdict.append(flat_pdict(c68))\n\t\tpdict.append(flat_pdict(c69))\t\t\n\n\telse:\n\t\tcondsdf=flat_simform(flatdf)\n\t\tpdict=flat_pdict(condsdf)\n\t\n\treturn pdict\n\t\ndef get_flatdf(group_pdict, save=False):\n\t\"\"\"\n\tParses stats into a dataframe for chi-square estimated group params\"\n\n\t\"\"\" \n\n\tdataframe=pd.DataFrame(columns=['param', 'mean'], index=group_pdict.keys())\n\tdataframe.param=group_pdict.keys()\n\tdataframe.mean=group_pdict.values()\t\n\n\n\tcondlist=list()\n\tfor i in dataframe.param:\n\t\tif '(' in i:\n\t\t\tcond_name=i.split('(')[1].split(')')[0]\n\t\telse: \n\t\t\tcond_name='constant'\n\n\t\tcondlist.append(cond_name)\n\n\tallnoise=['68', '69']\n\tallcues=['90H', '70H', 'neutral', '70F', '90F', \n\t\t\t\t'50N','a90H', 'b70H', 'c50N', 'd70F', \n\t\t\t\t'e90F', 'a80H', 'b50N', 'c80F']\n\tallimgs=['face', 'house', 'Face', 'House']\n\n\tcuelist=[]; noiselist=[]; stimlist=[]\n\n\tlistd={'cue':[allcues, cuelist], 'noise':[allnoise, noiselist], 'stim':[allimgs, stimlist]}\n\n\tfor i in condlist:\n\t\ti=str(i)\n\t\tfor k in listd.keys():\n\t\t\tkval=[kval for kval in listd[k][0] if kval in i.split('.') or kval==i]\n\n\t\t\tif kval:\n\t\t\t\tlistd[k][1].append(kval[0])\n\t\t\telse:\n\t\t\t\tlistd[k][1].append('constant')\n\n\tdataframe['stim']=listd['stim'][1]\n\tdataframe['cue']=listd['cue'][1]\n\tdataframe['noise']=listd['noise'][1]\n\n\tplist=list()\n\tfor i in group_pdict.keys():\n\t\tp=i.split('(')[0]\n\t\tplist.append(p)\n\tdataframe['param']=plist\n\t\n\tif save:\n\t\tdataframe.to_csv(\"flatdf.csv\", index=False)\n\n\treturn dataframe\n\n\n\ndef flat_simform(flatdf):\n\t\"\"\"\n\tRETURNS: 1\n\n\t\t*flatdf (pandas DataFrame):\t\tgroup-level df with one column for each experimental cue/stim combo\n\t\t\t\t\t\t\t\t\t\t(is used to make pdict (which is used for simulating)\t\t\t\t\t\t\t\n\n\t\"\"\"\n\n\tnrows=len(flatdf.param.unique())\n\n\tif len(flatdf['cue'].unique())<5:\n\t\tcondsdf=pd.DataFrame(np.zeros(nrows*6).reshape((nrows, 6)), columns=['a80H_face', 'b50N_face', 'c80F_face', \n\t\t\t'a80H_house', 'b50N_house', 'c80F_house'])\n\telse:\n\t\tcondsdf=pd.DataFrame(np.zeros(nrows*10).reshape((nrows, 10)), columns=['a90H_face', 'b70H_face', 'c50N_face', \n\t\t\t'd70F_face', 'e90F_face', 'a90H_house', 'b70H_house', 'c50N_house', 'd70F_house', 'e90F_house'])\n\n\n\tfor cond in condsdf.columns:\n\t\tcue_n=cond.split('_')[0]\n\t\timg_n=cond.split('_')[1]\n\t\tcdf=flatdf.ix[flatdf['stim'].isin([img_n, 'constant']) & flatdf['cue'].isin([cue_n, 'constant']), ['param', 'mean']]\n\n\t\tcdf=cdf.sort('param')\n\t\tcdf.index=range(len(cdf))\n\t\n\t\tcondsdf['param']=cdf['param'].values\n\t\tcondsdf[cond]=cdf['mean'].values\n\t\t\n\treturn condsdf\n\n\ndef flat_pdict(df, addz=False):\n\n\tdf.index=df.param\n\tif 'z' not in df.index:\n\t\taddz=True\n\t\n\tpdict=dict()\n\tfor cond in df:\n\t\tif cond == 'param':\n\t\t\tcontinue\n\t\t\n\t\tpdict[cond]=dict(df[cond])\n\t\tif addz:\n\t\t\tpdict[cond]['z']=0.5\n\t\t\n\treturn pdict\n\n\nif __name__ == \"__main__\":\n\tmain()\t\n\n"
}
] | 16 |
lauragreige/P-ANDROIDE
|
https://github.com/lauragreige/P-ANDROIDE
|
a79903402b2c21837fca4bcb203916e97cfb73e3
|
42bc327b82171a9099f6327669283e900688cb77
|
de37a2dbc9e83f39ab5aa18dea1542c4774f91c3
|
refs/heads/master
| 2020-03-03T23:11:28.940446 | 2016-05-18T22:04:09 | 2016-05-18T22:04:09 | 50,506,730 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.625778079032898,
"alphanum_fraction": 0.6401690244674683,
"avg_line_length": 38.797725677490234,
"blob_id": "261dffce8001b3fd23534019bb3f446ab71714bb",
"content_id": "d94655e3bd7e8ca6faf8efa861a6cba9b4b1c03b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17511,
"license_type": "no_license",
"max_line_length": 201,
"num_lines": 440,
"path": "/algorithms/similarity_matrix.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env sage -python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom os import getcwd\nsys.path.append(getcwd())\n\nfrom sage.all import Set, matrix\nfrom itertools import permutations, combinations\nfrom data_gestion.generation import generation\nfrom time import time\n\n\ndef dissimilarity_and_n(structure, candidate1, candidate2):\n \"\"\"\n Calculates the dissimilarity between candidate1 and candidate2 like this:\n 1 - number_of_ballots_with_candidate1_AND_candidate2/total_number_of_ballots\n :param structure: data extracted from an election file\n :param candidate1: ID of a candidate\n :param candidate2: ID of a candidate\n :return: dissimilarity score\n :rtype: int\n \"\"\"\n # if the candidates are identical, their dissimilarity is 0, nothing else to do\n if candidate1 == candidate2:\n return 0\n\n # Variables initialization\n ballots = structure[\"preferences\"]\n vote_count = float(structure[\"sum_vote_count\"])\n score = 0\n # Calculating number of ballots with the 2 candidates\n for ballot in ballots:\n if candidate1 in ballot[1] and candidate2 in ballot[1]:\n score += ballot[0]\n\n return 1 - float(score) / vote_count\n\n\ndef dissimilarity_and_or(structure, candidate1, candidate2):\n \"\"\"\n Calculates the dissimilarity between candidate1 and candidate2 like this:\n 1 - number_of_ballots_with_candidate1_AND_candidate2/number_of_ballots_with_candidate1_OR_candidate2\n :param structure: data extracted from an election file\n :param candidate1: ID of a candidate\n :param candidate2: ID of a candidate\n :return: dissimilarity score\n :rtype: int\n \"\"\"\n # if the candidates are identical, their dissimilarity is 0, nothing else to do\n if candidate1 == candidate2:\n return 0\n\n # Variables initialization\n ballots = structure[\"preferences\"]\n score = 0\n nb_cand1_or_cand2 = 0\n # Calculating number of ballots with the 2 candidates\n for ballot in ballots:\n if candidate1 in ballot[1] and candidate2 in ballot[1]:\n score += ballot[0]\n if candidate1 in ballot[1] or candidate2 in ballot[1]:\n nb_cand1_or_cand2 += ballot[0]\n\n return 1 - score / float(nb_cand1_or_cand2)\n\n\ndef dissimilarity_over_over(structure, candidate1, candidate2):\n \"\"\"\n Calculates the dissimilarity between candidate1 and candidate2 like this:\n 1 - sum(1/|ballot_with_candidate1_and_candidate2| * number_of_ballots_with_candidate1_AND_candidate2)/sum(1/|ballot_with_candidate1_OR_candidate2| * number_of_ballots_with_candidate1_OR_candidate2)\n :param structure: data extracted from an election file\n :param candidate1: ID of a candidate\n :param candidate2: ID of a candidate\n :return: dissimilarity score\n :rtype: int\n \"\"\"\n # if the candidates are identical, their dissimilarity is 0, nothing else to do\n if candidate1 == candidate2:\n return 0\n\n # Variables initialization\n ballots = structure[\"preferences\"]\n score = 0\n nb_cand1_or_cand2 = 0\n # Calculating number of ballots with the 2 candidates\n for ballot in ballots:\n n = len(ballot[1]) if type(ballot[1][-1]) == int else len(ballot[1]) - 1 # number of candidates approved in the ballot\n if candidate1 in ballot[1] and candidate2 in ballot[1]:\n score += ballot[0] / float(n)\n if candidate1 in ballot[1] or candidate2 in ballot[1]:\n nb_cand1_or_cand2 += ballot[0] / float(n)\n\n return 1 - score / float(nb_cand1_or_cand2)\n\n\ndef create_similarity_matrix(structure, dissimilarity_function):\n \"\"\"\n Creates the similarity matrix between candidates, given the preferences stored in the structure\n :param structure: data extracted from an election file\n :type structure: dict\n :param dissimilarity_function: function to use to calculate dissimilarity between 2 candidates\n :type dissimilarity_function: function pointer\n :return: similarity matrix\n :rtype: matrix\n \"\"\"\n # Variables initialization\n nb_candidates = structure[\"nb_candidates\"]\n similarity_m = [[0 for _ in range(nb_candidates)] for _ in range(nb_candidates)]\n pairs_of_candidates = list(combinations(structure[\"candidates\"].keys(), 2))\n\n for candidate1, candidate2 in pairs_of_candidates:\n similarity = dissimilarity_function(structure, candidate1, candidate2)\n similarity_m[candidate1 - 1][candidate2 - 1] = similarity\n similarity_m[candidate2 - 1][candidate1 - 1] = similarity\n\n return matrix(similarity_m)\n\n\ndef get_matrix_score(mat):\n \"\"\"\n Calculates matrix gradient score\n :param mat: similarity matrix between candidates\n :type mat: matrix\n :return: matrix gradient score\n :rtype: int\n \"\"\"\n rows = mat.nrows()\n cols = mat.ncols()\n score_m = 0\n\n # Calculates score\n for i in range(rows - 1):\n for j in range(i + 1, cols - 1):\n for k in range(j + 1, cols):\n # per row\n if mat[i][j] > mat[i][k]:\n score_m += 1\n # per column\n if mat[rows-j - 1][cols-i - 1] > mat[rows-k - 1][cols-i - 1]:\n score_m += 1\n\n return score_m\n\n\ndef get_weighted_matrix_score(mat):\n \"\"\"\n Calculates the matrix's weighted gradient score\n :param mat: similarity matrix between candidates\n :type mat: matrix\n :return: weighted gradient score\n :rtype: int\n \"\"\"\n rows = mat.nrows()\n cols = mat.ncols()\n wscore_m = 0\n\n # Calculates score\n for i in range(rows - 1):\n for j in range(i + 1, cols - 1):\n for k in range(j + 1, cols):\n # per row\n if mat[i][j] > mat[i][k]:\n wscore_m += mat[i][j] - mat[i][k]\n # per column\n if mat[rows-j - 1][cols-i - 1] > mat[rows-k - 1][cols-i - 1]:\n wscore_m += mat[rows-j - 1][cols-i - 1] > mat[rows-k - 1][cols-i - 1]\n\n return wscore_m\n\n\ndef find_permutation_naive(similarity_matrix, weighted=False):\n \"\"\"\n Calculate the similarity matrix between candidates then finds the permutation maximizing the matrix gradient score\n by testing all possible permutation\n :param similarity_matrix: similarity matrix between candidates\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :type weighted: bool\n :return: list of candidates indices = axis of candidates\n :rtype: list\n \"\"\"\n # Variables initialization\n nb_candidates = similarity_matrix.nrows()\n candidates_id = range(nb_candidates)\n candidates_permutations = list(permutations(candidates_id, nb_candidates))\n\n # We can work only on the first half of the permutations, the other ones are symetric\n # candidates_permutations = candidates_permutations[:len(candidates_permutations) / 2]\n\n # Calculating scores for all possible permutations\n if weighted:\n score = get_weighted_matrix_score(\n similarity_matrix.matrix_from_rows_and_columns(list(candidates_permutations[0]),\n list(candidates_permutations[0])))\n else:\n score = get_matrix_score(similarity_matrix.matrix_from_rows_and_columns(list(candidates_permutations[0]),\n list(candidates_permutations[0])))\n optimal_permutations = [candidates_permutations[0]]\n for i in range(1, len(candidates_permutations)):\n if weighted:\n temp = get_weighted_matrix_score(\n similarity_matrix.matrix_from_rows_and_columns(list(candidates_permutations[i]),\n list(candidates_permutations[i])))\n else:\n temp = get_matrix_score(similarity_matrix.matrix_from_rows_and_columns(list(candidates_permutations[i]),\n list(candidates_permutations[i])))\n if temp < score:\n score = temp\n optimal_permutations = [candidates_permutations[i]]\n elif temp == score:\n optimal_permutations.append(candidates_permutations[i])\n\n return [[i+1 for i in permutation] for permutation in optimal_permutations]\n\n\ndef get_distance_score(mat, candidate_index):\n \"\"\"\n Calculates matrix gradient score from a candidate\n :param mat: similarity matrix between candidates\n :param candidate_index: index of the candidate to be compared to the others, in the similarity matrix\n :type mat: matrix\n :type candidate_index: int\n :return: matrix gradient score\n :rtype: int\n \"\"\"\n cols = mat.ncols()\n score_m = 0\n\n # Calculates score\n for line in range(candidate_index):\n for col in range(candidate_index + 1, cols):\n # per row\n if mat[line][candidate_index] > mat[line][col]:\n score_m += 1\n\n # per column\n if mat[candidate_index][col] > mat[line][col]:\n score_m += 1\n\n return score_m\n\n\ndef get_weighted_distance_score(mat, candidate_index):\n \"\"\"\n Calculates matrix's weighted gradient score from a candidate\n :param mat: similarity matrix between candidates\n :param candidate_index: index of the candidate to be compared to the others, in the similarity matrix\n :type mat: matrix\n :type candidate_index: int\n :return: matrix gradient score\n :rtype: int\n \"\"\"\n cols = mat.ncols()\n score_m = 0\n\n # Calculates score\n for line in range(candidate_index):\n for col in range(candidate_index + 1, cols):\n # per row\n if mat[line][candidate_index] > mat[line][col]:\n score_m += mat[line][candidate_index] - mat[line][col]\n\n # per column\n if mat[candidate_index][col] > mat[line][col]:\n score_m += mat[candidate_index][col] - mat[line][col]\n\n return score_m\n\n\ndef distance(similarity_matrix, candidates_set, candidate, function_map, weighted=False):\n \"\"\"\n Calculatest the \"distance\" between a set of candidates and a given candidate,\n by adding the dissimilarity between each candidate in the set and the given candidate\n :param similarity_matrix: similarity matrix between candidates\n :param candidates_set: set of candidates\n :param candidate: candidate to be compared to the others\n :param function_map: map whose keys are sets of candidates and values are a tuple (score, optimal permutation so far)\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :type similarity_matrix: matrix\n :type candidates_set: Set\n :type candidate: int\n :type function_map: dict\n :type weighted: bool\n :return: \"distance\" between the set of candidates and the given candidate\n :rtype: list\n \"\"\"\n candidates_permutations = [[i - 1 for i in j] + [candidate - 1] for j in function_map[candidates_set][1]]\n candidates_permutations = [j + [i for i in range(similarity_matrix.nrows()) if i not in j] for j in\n candidates_permutations]\n\n if weighted:\n scores = [(get_weighted_distance_score(similarity_matrix.matrix_from_rows_and_columns(i, i),\n candidates_set.cardinality()),\n [j + 1 for j in i[:candidates_set.cardinality() + 1]]) for i in candidates_permutations]\n else:\n scores = [(get_distance_score(similarity_matrix.matrix_from_rows_and_columns(i, i),\n candidates_set.cardinality()),\n [j + 1 for j in i[:candidates_set.cardinality() + 1]]) for i in candidates_permutations]\n\n minimum = min(scores, key=lambda x: x[0])\n return [score for score in scores if score[0] == minimum[0]]\n\n\ndef find_permutation_dynamic_programming(similarity_matrix, candidates_set, function_map, weighted=False):\n \"\"\"\n Finds the permutation maximizing the gradient score with a dynamic programming algorithm\n :param similarity_matrix: similarity matrix between candidates\n :param candidates_set: set of candidates used on this iteration\n :param function_map: map whose keys are sets of candidates and values are a tuple (score, optimal permutations so far)\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :type similarity_matrix: matrix\n :type candidates_set: Set\n :type function_map: dict\n :type weighted: bool\n :return: map of sets of candidates and tuple, containing the optimal permutation\n :rtype: dict\n \"\"\"\n # If it has already been calculated, just return the result\n if candidates_set in function_map:\n return function_map\n\n # else if it has only one element, the score is 0 and the optimal permutation is a 1-element list\n if candidates_set.cardinality() == 1:\n function_map[candidates_set] = (0, [[candidates_set.an_element()]])\n return function_map\n\n # else, recursive call on all combinations\n candidates_combinations = list(combinations(candidates_set, candidates_set.cardinality() - 1))\n temp = []\n for combination in candidates_combinations:\n comb = Set(combination)\n current_candidate = candidates_set.symmetric_difference(comb).an_element()\n function_map = find_permutation_dynamic_programming(similarity_matrix, comb, function_map, weighted) # recursive call\n\n scores = distance(similarity_matrix, comb, current_candidate, function_map, weighted)\n for score in scores:\n temp.append((score[1], score[0] + function_map[comb][0]))\n\n minimum = min(temp, key=lambda x: x[1]) # tuple with the minimum score of this iteration\n minimums = [i for i in temp if i[1] == minimum[1]] # keep all combinations giving the minimum score\n # print(\"taille liste minimums : \" + str(len(minimums)))\n\n function_map[candidates_set] = (minimum[1], [i for i, j in minimums])\n # print (\"temp : \" + str(temp))\n\n return function_map\n\n\n################################################\n# Functions used in development, to remove !!! #\n################################################\ndef example1():\n structure, axis = generation(7, 10000, 3)\n print(\"Generated axis: \" + str(axis))\n mat = create_similarity_matrix(structure, dissimilarity_function=dissimilarity_over_over)\n print(\"Similarity Matrix\")\n print(mat)\n print(\"Gradient score: \" + str(get_matrix_score(mat)))\n print(\"Weighted Gradient score: \" + str(get_weighted_matrix_score(mat)))\n\n t = time()\n dico = find_permutation_dynamic_programming(mat, Set(structure[\"candidates\"].keys()), {})\n t = time() - t\n print(\"Dynamic programming algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(dico[Set(structure[\"candidates\"].keys())][1]))\n\n t = time()\n optimal_permutation = find_permutation_naive(mat)\n t = time() - t\n print(\"Naive algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(optimal_permutation))\n\n\ndef example2():\n structure, axis = generation(7, 10000, 3)\n print(\"Generated axis: \" + str(axis))\n mat = create_similarity_matrix(structure, dissimilarity_function=dissimilarity_over_over)\n print(\"Similarity Matrix\")\n print(mat)\n print(\"Gradient score: \" + str(get_matrix_score(mat)))\n print(\"Weighted Gradient score: \" + str(get_weighted_matrix_score(mat)))\n\n t = time()\n dico = find_permutation_dynamic_programming(mat, Set(structure[\"candidates\"].keys()), {}, weighted=True)\n t = time() - t\n print(\"Dynamic programming algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(dico[Set(structure[\"candidates\"].keys())][1]))\n\n t = time()\n optimal_permutation = find_permutation_naive(mat)\n t = time() - t\n print(\"Naive algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(optimal_permutation))\n\n\ndef example3():\n mat = matrix([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n print(\"Similarity Matrix\")\n print(mat)\n print(\"Gradient score: \" + str(get_matrix_score(mat)))\n print(\"Weighted Gradient score: \" + str(get_weighted_matrix_score(mat)))\n\n t = time()\n dico = find_permutation_dynamic_programming(mat, Set([1, 2, 3, 4]), {}, weighted=True)\n t = time() - t\n print(\"Dynamic programming algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(dico[Set(Set([1, 2, 3, 4]))][1]))\n\n t = time()\n optimal_permutation = find_permutation_naive(mat)\n t = time() - t\n print(\"Naive algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(optimal_permutation))\n\n\ndef example4():\n mat = matrix([[0, 1, 4, 5], [1, 0, 3, 4], [4, 3, 0, 1], [5, 4, 1, 0]])\n print(\"Similarity Matrix\")\n print(mat)\n print(\"Gradient score: \" + str(get_matrix_score(mat)))\n print(\"Weighted Gradient score: \" + str(get_weighted_matrix_score(mat)))\n\n t = time()\n dico = find_permutation_dynamic_programming(mat, Set([1, 2, 3, 4]), {}, weighted=True)\n t = time() - t\n print(\"Dynamic programming algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(dico[Set(Set([1, 2, 3, 4]))][1]))\n\n t = time()\n optimal_permutation = find_permutation_naive(mat)\n t = time() - t\n print(\"Naive algorithm: \" + str(t) + \"seconds\")\n print(\"Optimal permutations: \" + str(optimal_permutation))\n\n\nif __name__ == '__main__':\n # example1()\n example2()\n # example3()\n # example4()\n"
},
{
"alpha_fraction": 0.594129741191864,
"alphanum_fraction": 0.609594464302063,
"avg_line_length": 39.10759353637695,
"blob_id": "09a12b2afe8dfebc37de99dcc1861572030404d6",
"content_id": "0b967f37bd9ef725531d6cf22f5666ad65493e91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6337,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 158,
"path": "/ui/upper_frame/algo_menu.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom Tkinter import *\n\n\nclass AlgoMenu(Frame):\n \"\"\"\n Class displaying all widgets allowing to choose the algorithm to launch\n \"\"\"\n def __init__(self, master):\n Frame.__init__(self, master, width=500, height=200)\n self.parent = master\n self.algo = IntVar()\n self.mode = IntVar()\n self.dissimilarity = IntVar()\n self.weighted = BooleanVar()\n self.dissimilarity.set(2)\n self.weighted.set(True)\n self.filtered = BooleanVar()\n self.frame_algos = None\n self.frame_mode = None\n self.frame_params = None\n self.launch_btn = None\n self.add_widgets()\n\n def add_widgets(self):\n \"\"\"\n Adds all widgets relative to algorithms\n \"\"\"\n self.frame_choice_algo()\n self.frame_seriation_parameters()\n self.add_launch_btn()\n\n def frame_choice_algo(self):\n \"\"\"\n Adds a LabelFrame to choose the algorithm to launch\n :return:\n \"\"\"\n # Variable initialization\n self.frame_mode = LabelFrame(self, text=\"Choose Mode\")\n self.mode.set(1)\n\n # Widgets declaration\n label_mode = Label(self.frame_mode, text=\"Choose the mode:\", font=(\"\", 16))\n radio_benchmark = Radiobutton(self.frame_mode, text=\"Benchmark\", variable=self.mode, value=0)\n radio_interactive = Radiobutton(self.frame_mode, text=\"Interactive\", variable=self.mode, value=1)\n\n # Widgets display\n label_mode.grid(row=0, column=0, columnspan=3)\n radio_interactive.grid(row=1, column=0, columnspan=3, sticky=W)\n radio_benchmark.grid(row=2, column=0, columnspan=3, sticky=W)\n\n self.frame_mode.pack(side=LEFT,fill=Y, padx=10, pady=(0, 28))\n\n # Variable initialization\n self.frame_algos = LabelFrame(self, text=\"Choose Algorithm\")\n self.algo.set(1)\n\n # Widgets declaration\n label_algo = Label(self.frame_algos, text=\"Choose the algorithm to launch:\", font=(\"\", 16))\n radio_bnb = Radiobutton(self.frame_algos, text=\"Branch & Bound\", variable=self.algo, value=0)\n radio_seriation = Radiobutton(self.frame_algos, text=\"Seriation\", variable=self.algo, value=1)\n\n # Widgets display\n label_algo.grid(row=0, column=0, columnspan=3)\n radio_bnb.grid(row=1, column=0, columnspan=3, sticky=W)\n radio_seriation.grid(row=2, column=0, columnspan=3, sticky=W)\n\n self.algo.trace(\"w\", lambda name, index, m: self.enable_or_disable())\n self.frame_algos.pack(fill=X, padx=10)\n\n def frame_seriation_parameters(self):\n \"\"\"\n Adds widgets to choose parameters\n :return:\n \"\"\"\n if self.frame_params:\n self.frame_params.destroy()\n\n # Variables declaration\n self.frame_params = LabelFrame(self, text=\"Parameters for Seriation\")\n\n # Widgets declaration\n label_dissimilarity = Label(self.frame_params, text=\"Choose the function used to calculate the dissimilarity matrix:\",\n font=(\"\", 14))\n radio_and_n = Radiobutton(self.frame_params, text=\"Both candidates over all ballots\", variable=self.dissimilarity, value=0)\n radio_and_or = Radiobutton(self.frame_params, text=\"Both candidates over ballots with one or the other\",\n variable=self.dissimilarity, value=1)\n radio_over_over = Radiobutton(self.frame_params,\n text=\"Taking consideration of the ballots size\",\n variable=self.dissimilarity, value=2)\n label_weighted = Label(self.frame_params, text=\"Weighted calculation :\", font=(\"\", 12))\n radio_weighted = Radiobutton(self.frame_params, text=\"Weighted\", variable=self.weighted, value=True)\n radio_unweighted = Radiobutton(self.frame_params, text=\"Not Weighted\", variable=self.weighted, value=False)\n\n # Widgets display\n label_dissimilarity.grid(row=0, column=0, columnspan=3, padx=10)\n radio_and_n.grid(row=1, column=0, columnspan=3, sticky=W, padx=10)\n radio_and_or.grid(row=2, column=0, columnspan=3, sticky=W, padx=10)\n radio_over_over.grid(row=3, column=0, columnspan=3, sticky=W, padx=10)\n label_weighted.grid(row=4, column=0, padx=10)\n radio_weighted.grid(row=4, column=1)\n radio_unweighted.grid(row=4, column=2)\n\n self.frame_params.pack(padx=10)\n\n def frame_bnb_parameters(self):\n \"\"\"\n Adds widgets to choose parameters\n :return:\n \"\"\"\n if self.frame_params:\n self.frame_params.destroy()\n\n # Variables declaration\n self.frame_params = LabelFrame(self, text=\"Parameters for Branch & Bound\")\n\n # Widgets declaration\n # label_filtered = Label(self.frame_params, text=\"\", font=(\"\", 14))\n checkbtn = Checkbutton(self.frame_params, text=\"Remove last ballots\",\n variable=self.filtered, onvalue=True, offvalue=False, padx=10)\n\n # Widgets display\n # label_filtered.grid(row=0, column=0, columnspan=3, padx=10)\n checkbtn.grid(row=1, column=0, columnspan=3, sticky=W, padx=10)\n\n self.frame_params.pack(fill=X, padx=10)\n\n def add_launch_btn(self):\n \"\"\"\n Adds a button to launch the choosed algorithm with right parameters\n \"\"\"\n self.launch_btn = Button(self, text=\"Launch Algorithm\", command=self.launch)\n self.launch_btn.pack()\n\n def launch(self):\n \"\"\"\n Launch the algorithm with the files and options the user selected\n \"\"\"\n if self.parent.right_frame.list_files:\n if self.mode.get() == 0:\n self.master.master.display_benchmark_results()\n else:\n self.master.master.display_interactive_results()\n\n def enable_or_disable(self):\n \"\"\"\n Enables the choice for a dissimilarity function and a weighted calculation if seriation algorithm selected\n Disables them otherwise\n \"\"\"\n if self.algo.get() == 0:\n self.launch_btn.destroy()\n self.frame_bnb_parameters()\n self.add_launch_btn()\n\n else:\n self.launch_btn.destroy()\n self.frame_seriation_parameters()\n self.add_launch_btn()\n"
},
{
"alpha_fraction": 0.5663265585899353,
"alphanum_fraction": 0.5905998945236206,
"avg_line_length": 49.13953399658203,
"blob_id": "fdd47aa67e6e764921efb181647ed3fb66eee3b2",
"content_id": "e23328dbf90f3a2991baaf0a23315a8912d94b2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6478,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 129,
"path": "/ui/lower_frame/lower_benchmark.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom Tkinter import *\nimport ttk\nimport sys\nfrom os import getcwd\n\nfrom algorithms.b_and_b import bnb, find_axes2, remove_last_ballots\n\nsys.path.append(getcwd())\n\nfrom algorithms.find_axis_from_file import find_axis_from_structure\nfrom algorithms.similarity_matrix import *\nfrom data_gestion.file_gestion import read_file\nfrom Data.axesPAndroide import listFiles\n\n\nclass Benchmark(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.parent = parent\n self.files = self.parent.upper_frame.right_frame.list_files\n self.display_table()\n\n def display_table(self):\n algo = self.parent.upper_frame.left_frame.algo.get()\n weighted = self.parent.upper_frame.left_frame.weighted.get()\n dissimilarity_var = self.parent.upper_frame.left_frame.dissimilarity.get()\n filtered = self.parent.upper_frame.left_frame.filtered.get()\n\n if dissimilarity_var == 0:\n dissimilarity = dissimilarity_and_n\n elif dissimilarity_var == 1:\n dissimilarity = dissimilarity_and_or\n else:\n dissimilarity = dissimilarity_over_over\n\n # Horizontal and Vertical separators\n ttk.Separator(self, orient=HORIZONTAL).grid(row=0, columnspan=16, sticky=\"ew\")\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=0, rowspan=len(self.files) * 2 + 3, padx=(0, 10),\n sticky=\"ns\")\n\n # Horizontal labels with separators\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=2, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n Label(self, text=\"Bulletins\").grid(row=1, column=3)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=4, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n Label(self, text=\"Bulletins uniques\").grid(row=1, column=5)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=6, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n if algo == 0:\n Label(self, text=\"Bulletins sélectionnés\").grid(row=1, column=7)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=8, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n Label(self, text=\"Bulletins (uniques) sélectionnés\").grid(row=1, column=9)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=10, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n Label(self, text=\"Proportion\").grid(row=1, column=11)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=12, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n Label(self, text=\"Axes trouvés\").grid(row=1, column=13)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=14, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n Label(self, text=\"Durée d'exécution\").grid(row=1, column=15, padx=(0, 10))\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=16, rowspan=len(self.files) * 2 + 3, sticky=\"ns\")\n else:\n Label(self, text=\"Axes trouvés\").grid(row=1, column=7)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=8, rowspan=len(self.files) * 2 + 3, padx=10,\n sticky=\"ns\")\n Label(self, text=\"Durée d'exécution\").grid(row=1, column=9)\n ttk.Separator(self, orient=VERTICAL).grid(row=0, column=10, rowspan=len(self.files) * 2 + 3, padx=(10, 0),\n sticky=\"ns\")\n\n ttk.Separator(self, orient=HORIZONTAL).grid(row=2, columnspan=16, sticky=\"ew\")\n\n # Vertical file names\n vrow = 3\n for f in self.files:\n Label(self, text=f).grid(row=vrow, column=1)\n ttk.Separator(self, orient=HORIZONTAL).grid(row=vrow + 1, columnspan=16, sticky=\"ew\")\n if algo == 0:\n calculate_bnb(self, f, vrow, filtered)\n else:\n calculate_seriation(self, f, vrow, dissimilarity, weighted)\n vrow += 2\n\n def destroy_elements(self):\n return\n\n\ndef calculate_bnb(self, file, vrow, filtered):\n structure = read_file(\"Data/all/\" + str(file), file in listFiles)\n if not filtered:\n preferences = structure[\"preferences\"]\n candidates = structure[\"candidates\"]\n Label(self, text=str(structure[\"nb_voters\"])).grid(row=vrow, column=3)\n Label(self, text=str(structure[\"nb_unique_orders\"])).grid(row=vrow, column=5)\n else:\n preferences_bis = structure[\"preferences\"]\n preferences, nb_voters, nb_unique = remove_last_ballots(preferences_bis)\n candidates = structure[\"candidates\"]\n Label(self, text=str(nb_voters)).grid(row=vrow, column=3)\n Label(self, text=str(nb_unique)).grid(row=vrow, column=5)\n\n t1 = time()\n ensemble, best = bnb(len(preferences), preferences, candidates)\n t2 = time()\n Label(self, text=str(best[1])).grid(row=vrow, column=7)\n Label(self, text=str(len(best[0][0]))).grid(row=vrow, column=9)\n if not filtered:\n Label(self, text=str(best[1] * 100.0 / structure[\"nb_voters\"])).grid(row=vrow, column=11)\n else:\n Label(self, text=str(best[1] * 100.0 / nb_voters)).grid(row=vrow, column=11)\n axes, card = find_axes2(best[0][0], candidates)\n\n Label(self, text=str(len(axes))).grid(row=vrow, column=13)\n Label(self, text=str(t2 - t1)).grid(row=vrow, column=15)\n\n\ndef calculate_seriation(self, file, vrow, dissimilarity, weighted):\n structure = read_file(\"Data/all/\" + str(file), file in listFiles)\n Label(self, text=str(structure[\"nb_voters\"])).grid(row=vrow, column=3)\n Label(self, text=str(structure[\"nb_unique_orders\"])).grid(row=vrow, column=5)\n if file in listFiles:\n t, axes = find_axis_from_structure(structure, dissimilarity, weighted)\n else:\n t, axes = find_axis_from_structure(structure, dissimilarity, weighted, unwanted_candidates=[2, 3, 7, 11])\n Label(self, text=str(len(axes[1]))).grid(row=vrow, column=7)\n Label(self, text=str(t)).grid(row=vrow, column=9)\n"
},
{
"alpha_fraction": 0.6439512372016907,
"alphanum_fraction": 0.647389829158783,
"avg_line_length": 39.23899459838867,
"blob_id": "f82abfedabff82e7ba287a53f7e7643a01db2a83",
"content_id": "c57a3bb5584d83ab2aade8ea00f3cd393afe5abf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6398,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 159,
"path": "/algorithms/find_axis_from_file.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport sys\nfrom os import getcwd\nsys.path.append(getcwd())\n\nfrom sage.all import Set\nfrom data_gestion.file_gestion import *\nfrom similarity_matrix import *\nfrom time import time\nfrom os import listdir\nfrom os.path import join\nfrom getopt import getopt\nimport sys\n\n\ndef find_axis_from_structure(structure, dissimilarity_function=dissimilarity_over_over,\n weighted=False, unwanted_candidates=[]):\n \"\"\"\n Finds the axes coherent with the data in the structure\n :param structure: data extracted from an election file\n :param dissimilarity_function: function to use to calculate dissimilarity between 2 candidates\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :param unwanted_candidates: list of candidates to exclude from the search\n :return: calculation time and optimal permutations for this structure\n \"\"\"\n # candidates_set = Set(structure[\"candidates\"].keys())\n\n # Creating a conversion table to make the calculations easier\n i = 1\n conversion_table = {}\n for candidate in structure[\"candidates\"].keys():\n if candidate not in unwanted_candidates:\n conversion_table[i] = candidate\n i += 1\n\n\n similarity_matrix = create_similarity_matrix(structure, dissimilarity_function)\n\n candidates_set = Set(range(1, len(conversion_table)+1))\n candidates = [i-1 for i in conversion_table.values()]\n candidates.sort()\n\n t = time()\n optimal_permutations = find_permutation_dynamic_programming(similarity_matrix.matrix_from_rows_and_columns(candidates, candidates),\n candidates_set, {}, weighted)\n\n res = (optimal_permutations[candidates_set][0], [[conversion_table[i] for i in l] for l in optimal_permutations[candidates_set][1]])\n\n return time()-t, res\n\n\ndef write_results_on_file(input_directory, output_file, dissimilarity_function=dissimilarity_over_over,\n weighted=False, unwanted_candidates=[], strict=False):\n \"\"\"\n For each file in the directory, calculates the optimal axes coherent with the data and writes it on the output file\n :param input_directory: directory where the .toc files are located\n :param output_file: output file\n :param dissimilarity_function: function to use to calculate dissimilarity between 2 candidates\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :param unwanted_candidates: list of candidates to exclude from the search\n :param strict: True if the file depicts strict preferences, False otherwise\n \"\"\"\n files = [join(input_directory, i) for i in listdir(input_directory) if i[-3:] == \"toc\"]\n fp = open(output_file, \"w\")\n\n for f in files:\n structure = read_file(f, strict)\n print(f)\n fp.write(str(f) + \"\\n\")\n t, optimal_permutations = find_axis_from_structure(structure, dissimilarity_function, weighted, unwanted_candidates)\n print(t)\n fp.write(\"calculation time: \" + str(t) + \" seconds\\n\")\n fp.write(\"axes: \" + str(optimal_permutations) + \"\\n\")\n for axis in optimal_permutations[1]:\n fp.write(str([structure[\"candidates\"][i] for i in axis]) + \"\\n\")\n fp.write(\"\\n\")\n fp.close()\n\n\ndef write_directory_results_on_file(input_directory, output_file, dissimilarity_function=dissimilarity_over_over,\n weighted=False, unwanted_candidates=[], strict=False):\n \"\"\"\n Creates a structure combining the data from all election files in the directory,\n then calculates the optimal axes coherent with the data and writes it on the output file\n :param input_directory: directory where the .toc files are located\n :param output_file: output file\n :param dissimilarity_function: function to use to calculate dissimilarity between 2 candidates\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :param unwanted_candidates: list of candidates to exclude from the search\n :param strict: True if the file depicts strict preferences, False otherwise\n \"\"\"\n fp = open(output_file, \"w\")\n\n structure = read_directory(input_directory, strict)\n\n fp.write(str(input_directory) + \"\\n\")\n t, optimal_permutations = find_axis_from_structure(structure, dissimilarity_function, weighted, unwanted_candidates)\n print(t)\n fp.write(\"calculation time: \" + str(t) + \" seconds\\n\")\n fp.write(\"axes: \" + str(optimal_permutations) + \"\\n\")\n for axis in optimal_permutations[1]:\n fp.write(str([structure[\"candidates\"][i] for i in axis]) + \"\\n\")\n\n fp.close()\n\n\ndef launch():\n \"\"\"\n Function used to launch the program the way we want it, using command line options\n :return:\n \"\"\"\n dissimilarity_function = dissimilarity_over_over\n weighted = False\n input_directory = \"\"\n output_file = \"\"\n fusion = False\n strict = False\n unwanted_candidates = []\n\n opts, args = getopt(sys.argv[1:], \"d:o:w\", [\"func=\", \"fusion\", \"not=\", \"strict\"])\n\n for opt, value in opts:\n if opt == \"-d\":\n input_directory = value\n if opt == \"-o\":\n output_file = value\n if opt == \"-w\":\n weighted = True\n if opt == \"--func\":\n if value == \"0\":\n dissimilarity_function = dissimilarity_over_over\n elif value == \"1\":\n dissimilarity_function = dissimilarity_and_n\n elif value == \"2\":\n dissimilarity_function = dissimilarity_and_or\n if opt == \"--fusion\":\n fusion = True\n if opt == \"--not\":\n unwanted_candidates = list(map(int, value.split()))\n if opt == \"--strict\":\n strict = True\n\n if (not input_directory or not output_file) and len(args) != 2:\n raise IOError(\"Not enough arguments\")\n\n if len(args) == 2:\n input_directory = args[0]\n output_file = args[1]\n\n if fusion:\n write_directory_results_on_file(input_directory, output_file, dissimilarity_function,\n weighted, unwanted_candidates, strict)\n else:\n write_results_on_file(input_directory, output_file, dissimilarity_function,\n weighted, unwanted_candidates, strict)\n\n\nif __name__ == '__main__':\n launch()\n"
},
{
"alpha_fraction": 0.6351733803749084,
"alphanum_fraction": 0.6411889791488647,
"avg_line_length": 34.772151947021484,
"blob_id": "65da251b155bfa4c5492e2d35d2dec9cc739c0c2",
"content_id": "6a1c6d4dbc8df10cfa295420e1d768f26141c8ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2826,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 79,
"path": "/data_gestion/generation.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env sage -python\n# -*- coding: utf-8 -*-\nfrom random import shuffle, randint, choice\nfrom sage.all import Set\n\n\ndef bfs(cand_list, favorite):\n \"\"\"\n Creates a list from the element with the given index\n :param cand_list: sorted list of candidates (their ID)\n :param favorite: index of the favorite candidate, to place first in the list\n :return: list simulating a BFS from the element with the given index\n \"\"\"\n left_right = [list(reversed(cand_list[:favorite])), cand_list[favorite+1:]]\n pref = [cand_list[favorite]]\n # if there's an empty list, you can't choose from it...\n if [] in left_right:\n left_right.remove([])\n # picking a candidate to add to the list on one side (left or right) of the already chosen candidates\n while left_right:\n pick = choice(left_right)\n pref.append(pick.pop(0))\n if not pick:\n left_right.remove(pick)\n\n return pref\n\n\ndef generation(nb_candidates, nb_ballots, nb_prefs=None):\n \"\"\"\n Randomly generates fictional ballots\n :param nb_candidates: number of candidates\n :param nb_ballots: number of ballots to generate\n :param nb_prefs: if specified, number of strict preferences for each ballot\n :return: same structure returned by the lecture of a file\n \"\"\"\n\n # Creation of the map of candidates\n candidates = {i: \"Candidate \" + str(i) for i in range(1, nb_candidates+1)}\n # Creation of a list of candidates, shuffled to simulate some sort of classification\n cand_list = list(range(1, nb_candidates+1))\n shuffle(cand_list)\n # print(\"The randomly generated candidates order is: \" + str(cand_list))\n\n # Creation of ballots\n ballots = []\n rand_prefs = nb_prefs is None\n for _ in range(nb_ballots):\n favorite = randint(0, nb_candidates-1) # index of favorite candidate for this ballot\n pref = bfs(cand_list, favorite)\n if rand_prefs:\n nb_prefs = randint(1, nb_candidates) # number of strict preferences for this ballot\n temp = pref[:nb_prefs]\n temp.append(Set(pref[nb_prefs:]))\n ballots.append(temp)\n\n # Creation of the list of preferences\n prefs = []\n while ballots:\n ballot = ballots.pop()\n prefs.append((ballots.count(ballot)+1, ballot))\n while ballot in ballots:\n ballots.remove(ballot)\n\n # Sort preferences by number of voters\n prefs.sort(key=lambda x: x[0], reverse=True)\n\n return {\"nb_candidates\": nb_candidates,\n \"candidates\": candidates,\n \"nb_voters\": nb_ballots,\n \"sum_vote_count\": nb_ballots,\n \"nb_unique_orders\": len(prefs),\n \"preferences\": prefs}, cand_list\n\n\nif __name__ == '__main__':\n structure, c_list = generation(10, 10, 3)\n for i in structure[\"preferences\"]:\n print(i)\n"
},
{
"alpha_fraction": 0.6229261159896851,
"alphanum_fraction": 0.6244344115257263,
"avg_line_length": 23.518518447875977,
"blob_id": "ead6c6fd49d917591005d2d23e91c80988468ecd",
"content_id": "73680a0ab6b3f6d12dd842293c645b44f216e196",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 663,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 27,
"path": "/ui/upper_frame/upper_frame.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom Tkinter import *\nimport sys\nfrom os import getcwd\nsys.path.append(getcwd())\n\nfrom ui.upper_frame.algo_menu import AlgoMenu\nfrom ui.upper_frame.files_menu import FilesMenu\n\n\nclass UpperFrame(Frame):\n \"\"\"\n Upper frame of the window\n \"\"\"\n\n def __init__(self, master):\n Frame.__init__(self, master)\n self.parent = master\n self.left_frame = AlgoMenu(self)\n self.right_frame = FilesMenu(self)\n self.pack_elements()\n\n def pack_elements(self):\n if self.left_frame:\n self.left_frame.pack(side=LEFT)\n if self.right_frame:\n self.right_frame.pack(side=RIGHT)\n\n"
},
{
"alpha_fraction": 0.5997352004051208,
"alphanum_fraction": 0.603706955909729,
"avg_line_length": 25.057470321655273,
"blob_id": "cf5329ad0d581b84fd5a6c0998c673754de612e3",
"content_id": "cdd23bdd156c3d3e9220188452068400bac2b475",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2266,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 87,
"path": "/ui/main_window.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom Tkinter import *\nimport sys\nfrom os import getcwd\nsys.path.append(getcwd())\n\nfrom ui.top_menu import TopMenu\nfrom ui.upper_frame.upper_frame import UpperFrame\nfrom ui.lower_frame.lower_interactive import Interactive\nfrom ui.lower_frame.lower_benchmark import Benchmark\n\n\nclass MainWindow(Frame):\n \"\"\"\n Frame containing all other ui elements\n \"\"\"\n def __init__(self, master):\n Frame.__init__(self, master)\n self.parent = master\n self.top_menu = TopMenu(self)\n self.upper_frame = UpperFrame(self)\n self.lower_frame = None\n self.pack_elements()\n\n def pack_upper_frame(self):\n if self.upper_frame:\n self.upper_frame.pack(side=TOP, padx=10, pady=10)\n\n def pack_lower_frame(self):\n if self.lower_frame:\n self.lower_frame.pack(side=BOTTOM, padx=10, pady=10)\n\n def pack_elements(self):\n self.pack_upper_frame()\n self.pack_lower_frame()\n\n def set_upper_frame(self, frame):\n \"\"\"\n Change the upper_frame\n :param frame: widget that will replace the old one\n \"\"\"\n if self.upper_frame:\n self.upper_frame.destroy()\n self.upper_frame = frame\n self.pack_upper_frame()\n\n def set_lower_frame(self, frame):\n \"\"\"\n Change the lower_frame\n :param frame: widget that will replace the old one\n \"\"\"\n if self.lower_frame:\n self.lower_frame.destroy_elements()\n self.lower_frame.destroy()\n self.lower_frame = frame\n self.pack_lower_frame()\n\n def display_interactive_results(self):\n \"\"\"\n Displays the results of each file, one by one, on a graphic\n \"\"\"\n frame = Interactive(self)\n self.set_lower_frame(frame)\n\n def display_benchmark_results(self):\n \"\"\"\n Displays the results of all files in a table\n \"\"\"\n frame = Benchmark(self)\n self.set_lower_frame(frame)\n\n def set_mode(self, mode):\n self.upper_frame.left_frame.mode.set(mode)\n\n\ndef launch():\n root = Tk()\n # root.resizable(width=False, height=False)\n\n main_window = MainWindow(root)\n main_window.pack()\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n launch()"
},
{
"alpha_fraction": 0.600563108921051,
"alphanum_fraction": 0.6052148342132568,
"avg_line_length": 33.76170349121094,
"blob_id": "d626367d55921bbf5187b489d4b98c2613ca2ae9",
"content_id": "444443752aef99ff5735292060d757fe4259a7ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8169,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 235,
"path": "/data_gestion/file_gestion.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env sage -python\n# -*- coding: utf-8 -*-\nimport sys\nfrom os import listdir\nfrom os.path import join\nfrom sage.all import Set\n\n\ndef read_pref_approval(count, pref):\n \"\"\"\n Reads the line and returns the preferences\n :param count: number of voters having this preference\n :param pref: list of candidates voters approve or not\n :type count: int\n :type pref: str\n :return: preference formated to fit to our needs\n :rtype: list\n \"\"\"\n # temp is the preference for a set of people :\n # the number of people having this preference, a strict order of preferences\n # If indifferent between candidates, a Set (from sagemath) of them is added to the list of preferences\n\n #temp = [count, []]\n temp = (count, [])\n pref = [i.strip() for i in pref.split(\",\")]\n i = 0\n while i < len(pref):\n p = pref[i]\n if p[0] != '{' or i == 0:\n p = p.strip('{}')\n if len(p) > 0:\n temp[1].append(int(p))\n else:\n indiff = []\n p = p[1:]\n while p[-1] != '}':\n indiff.append(int(p))\n i += 1\n p = pref[i]\n p = p.strip('{}')\n if p:\n indiff.append(int(p))\n temp[1].append(Set(indiff))\n i += 1\n return [temp]\n\n\ndef read_strict_pref(count, pref):\n \"\"\"\n Reads the line and returns the preferences\n :param count: number of voters having this preference\n :param pref: ranking of candidates for these voters\n :type count: int\n :type pref: str\n :return: preference formated to fit to our needs\n :rtype: list\n \"\"\"\n # temp is the preference for a set of people :\n # the number of people having this preference, a strict order of preferences\n # If indifferent between candidates, a Set (from sagemath) of them is added to the list of preferences\n if '{' in pref:\n approved = list(map(int, pref[:pref.index(\"{\")].rstrip(\",\").split(\",\")))\n disapproved = list(map(int, pref[pref.index(\"{\") + 1:pref.index(\"}\")].split(\",\")))\n else:\n approved = list(map(int, pref.split(\",\")))\n disapproved = []\n res = []\n\n for i in range(len(approved)):\n temp = [count, approved[:i+1] + [Set(approved[i+1:] + disapproved)]]\n res.append(temp)\n\n return res\n\n\ndef read_file(filename, strict=False):\n \"\"\"\n Reads the file and returns data content\n :param filename: absolute or relative path to the file\n :param strict: True if the file depicts strict preferences, False otherwise\n :type filename: str\n :raise ValueError: if file doesn't exist or wrong file format\n :return: map with all data from the file\n \"\"\"\n\n # Opening file\n try:\n fp = open(filename, \"r\")\n except:\n raise ValueError(\"ERROR: File doesn't exist\")\n\n file_error_message = \"ERROR: Wrong file format\"\n # Candidates related data\n try:\n nb_candidates = int(fp.readline())\n except:\n raise ValueError(file_error_message)\n # Map of candidates\n try:\n candidates = {}\n for _ in range(nb_candidates):\n line = fp.readline().split(\",\")\n candidates[int(line[0].strip())] = line[1].strip()\n except:\n raise ValueError(file_error_message)\n\n # Number of voters, Sum of Vote Count, Number of Unique Orders\n try:\n nb_voters, sum_vote_count, nb_unique_orders = [int(i.strip()) for i in fp.readline().split(\",\")]\n except:\n raise ValueError(file_error_message)\n\n # Lists of preferences\n # try:\n prefs = []\n total_count = 0\n for _ in range(nb_unique_orders):\n line = fp.readline()\n count = int(line[:line.index(\",\")])\n total_count += count\n pref = line[line.index(\",\") + 1:]\n if strict:\n temp = read_strict_pref(count, pref)\n else:\n temp = read_pref_approval(count, pref)\n prefs.extend(temp)\n # except:\n # raise ValueError(file_error_message)\n\n test = fp.readline()\n if test:\n sys.exit(file_error_message)\n\n if total_count != sum_vote_count:\n sys.exit(\"ERROR: Vote numbers don't match\")\n\n # Closing file\n fp.close()\n\n # Just making sure the preferences are sorted by decreasing order of number of voters\n prefs.sort(key=lambda x: x[0], reverse=True)\n\n return {\"nb_candidates\": nb_candidates,\n \"candidates\": candidates,\n \"nb_voters\": nb_voters,\n \"sum_vote_count\": sum_vote_count,\n \"nb_unique_orders\": nb_unique_orders,\n \"preferences\": prefs}\n\n\ndef read_directory(dirname, strict=False):\n \"\"\"\n Reads the files in the directory and returns concatenation of the data contents\n :param dirname: absolute or relative path to the directory\n :type dirname: str\n :raise ValueError: if wrong directory, empty directory, or wrong file format\n :return: map with all data from the files\n \"\"\"\n try:\n files = [join(dirname, i) for i in listdir(dirname) if i[-4:] == \".toc\"]\n except:\n raise ValueError(\"No such directory\")\n if not files:\n raise ValueError(\"No .toc files detected\")\n\n structure = read_file(files[0], strict)\n\n unique_orders = [pref[1] for pref in structure[\"preferences\"]]\n\n for f in files[1:]:\n temp_struct = read_file(f, strict)\n structure[\"nb_voters\"] += temp_struct[\"nb_voters\"]\n structure[\"sum_vote_count\"] += temp_struct[\"sum_vote_count\"]\n for pref in temp_struct[\"preferences\"]:\n if pref[1] in unique_orders:\n i = unique_orders.index(pref[1])\n structure[\"preferences\"][i] = (structure[\"preferences\"][i][0] + pref[0], pref[1])\n else:\n structure[\"preferences\"].append(pref)\n structure[\"nb_unique_orders\"] += 1\n unique_orders.append(pref[1])\n\n structure[\"preferences\"].sort(key=lambda x: x[0], reverse=True)\n return structure\n\n\ndef remove_unwanted_candidates(structure, unwanted_candidates):\n \"\"\"\n Removes unwanted candidates from the structure, including in the preferences\n :param structure: structure we want to remove the candidates from\n :param unwanted_candidates: list of IDs of unwanted candidates\n :type unwanted_candidates: list\n :return: the new structure without the unwanted candidates\n \"\"\"\n new_structure = {\"nb_candidates\": structure[\"nb_candidates\"] - len(unwanted_candidates), \"candidates\": {},\n \"nb_voters\": structure[\"nb_voters\"], \"sum_vote_count\": structure[\"sum_vote_count\"],\n \"preferences\": []}\n\n # Modifying the candidates set and creating a conversion table to make the preferences transformation easier\n i = 1\n conversion_table = {}\n for candidate in structure[\"candidates\"].keys():\n if candidate not in unwanted_candidates:\n new_structure[\"candidates\"][i] = structure[\"candidates\"][candidate]\n conversion_table[candidate] = i\n i += 1\n\n # Adding preferences after deleting unwanted_candidates\n unique_orders = []\n for nb_votes, pref in structure[\"preferences\"]:\n # cleaning the list of preferences from its unwanted candidates\n temp = [conversion_table[i] for i in pref if i not in unwanted_candidates and isinstance(i, int)]\n if not isinstance(pref[-1], int):\n temp.append(Set([conversion_table[i] for i in pref[-1] if i not in unwanted_candidates]))\n # adding of the list of preferences to the structure\n if temp in unique_orders:\n new_structure[\"preferences\"][unique_orders.index(temp)][0] += nb_votes\n else:\n new_structure[\"preferences\"].append([nb_votes, temp])\n unique_orders.append(temp)\n\n new_structure[\"nb_unique_orders\"] = len(unique_orders)\n\n return new_structure\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n sys.exit(\"This program takes one and only one argument\")\n f = sys.argv[1]\n structure = read_file(f, strict=True)\n for c in structure[\"candidates\"].items():\n print(c)\n for j in structure[\"preferences\"]:\n print(j)\n"
},
{
"alpha_fraction": 0.5976966619491577,
"alphanum_fraction": 0.6054781675338745,
"avg_line_length": 43.93356704711914,
"blob_id": "2599f5c8865b6d3dd9a9cf595342a4709b1064fb",
"content_id": "4dfb7f188744b3b868a3b3e904fd9f54e1d97e1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12851,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 286,
"path": "/algorithms/display_axes.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "from os.path import join\nimport sys\nfrom os import getcwd\n\nsys.path.append(getcwd())\n\nfrom Data.axesPAndroide import *\nfrom data_gestion.file_gestion import read_file, read_directory\nfrom find_axis_from_file import find_axis_from_structure\nfrom algorithms.b_and_b import bnb, find_axes2\nfrom algorithms.similarity_matrix import *\n\n\ndef get_matches(axis):\n \"\"\"\n Returns the index of the group where the candidate is, for each candidate\n :param axis: axis in which the search is done\n :return: index of the group where the candidate is, for each candidate\n :rtype: dict[int, int]\n \"\"\"\n res = {}\n for i in range(len(axis)):\n for candidate in axis[i]:\n res[candidate] = i+1\n return res\n\n\ndef filter_symmetric_axes(permutations):\n \"\"\"\n Filter the list of permutations to keep only one axis when 2 are symmetric\n :param permutations: list of permutations found\n :type permutations: list[list[int]]\n :return: the filtered list of permutations\n :rtype: list[list[int]]\n \"\"\"\n filtered_permutations = []\n for permutation in permutations:\n if list(reversed(permutation)) not in filtered_permutations:\n filtered_permutations.append(permutation)\n return filtered_permutations\n\n\ndef axes_to_latex_graph(structure, axis, name=None,\n dissimilarity_function=dissimilarity_over_over, weighted=False,\n unwanted_candidates=[]):\n \"\"\"\n Returns the LaTeX code to display the axis\n :param structure: structure extracted from a file or directory\n :param axis: axis corresponding to the file, according to wikipedia\n :param name: name that will be given to the figures\n :param dissimilarity_function: function to use to calculate dissimilarity between 2 candidates\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :param unwanted_candidates: list of candidates to exclude from the search\n :return: string containing the LaTeX code\n \"\"\"\n t, permutations = find_axis_from_structure(structure, dissimilarity_function, weighted, unwanted_candidates)\n permutations = filter_symmetric_axes(permutations[1])\n matches = get_matches(axis)\n length_unit = str(len(permutations[0]) + 1)\n res = \"\\\\section{\" + str(name) + \"}\" if name else \"\"\n res += \"\\\\begin{center}\\n\"\n\n for permutation in permutations:\n # beginning of the environment\n res += \"\\\\begin{figure}\\n\"\n res += \"\\\\begin{tikzpicture}[x=\\\\textwidth/\" + length_unit + \", y=\\\\textwidth/\" + length_unit + \"]\\n\"\n # Adding the x and y axes\n res += \"\\\\draw[->, >=latex] (0,0) -- (\" + length_unit + \",0);\\n\"\n res += \"\\\\draw[->, >=latex] (0,0) -- (0, 5.5);\\n\"\n #res += \"\\\\draw[->, >=latex] (0,0) -- (\" + length_unit + \",0);\\n\"\n #res += \"\\\\draw[->, >=latex] (0,0) -- (0,\" + length_unit + \");\\n\"\n # Adding the legend below the x axis\n for i in range(len(permutation)):\n res += \"\\\\draw (\" + str(i+1) + \",-0.1) node[below] {\" + str(permutation[i]) + \"};\\n\"\n\n res += \"\\\\draw plot[ultra thick]coordinates{\"\n temp = \"\" # string for the circles marking the dots\n for i in range(len(permutation)):\n if permutation[i] in matches:\n res += \"(\" + str(i+1) + \",\" + str(matches[permutation[i]]) + \")\"\n temp += \"\\\\draw[fill=black] (\" + str(i+1) + \",\" + str(matches[permutation[i]]) + \") circle (0.1);\\n\"\n res += \"};\\n\"\n res += temp # ading the circles\n res += \"\\\\end{tikzpicture}\\n\"\n if name:\n res += \"\\\\caption{Permutation \" + str(permutations.index(permutation)) + \" of \" + name + \"}\\n\"\n res += \"\\\\end{figure}\\n\"\n\n res += \"\\\\end{center}\\n\\\\clearpage\\n\"\n\n return res\n\ndef axes_to_latex_graph_bnb(f, axis, name=None):\n \"\"\"\n Returns the LaTeX code to display the axis\n :param structure: structure extracted from a file or directory\n :param axis: axis corresponding to the file, according to wikipedia\n :param name: name that will be given to the figures\n :param dissimilarity_function: function to use to calculate dissimilarity between 2 candidates\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :param unwanted_candidates: list of candidates to exclude from the search\n :return: string containing the LaTeX code\n \"\"\"\n\n structure = read_file(f)\n preferences = structure[\"preferences\"]\n candidates = [i+1 for i in range(len(structure[\"candidates\"]))]\n bb, best = bnb(len(preferences), preferences, candidates)\n permutations, card = find_axes2(best[0][0], candidates)\n\n matches = get_matches(axis)\n length_unit = str(len(permutations[0]) + 1)\n res = \"\\\\section{\" + str(name) + \"}\" if name else \"\"\n res += \"\\\\begin{center}\\n\"\n\n for permutation in permutations:\n # beginning of the environment\n res += \"\\\\begin{figure}\\n\"\n res += \"\\\\begin{tikzpicture}[x=\\\\textwidth/\" + length_unit + \", y=\\\\textwidth/\" + length_unit + \"]\\n\"\n # Adding the x and y axes\n res += \"\\\\draw[->, >=latex] (0,0) -- (\" + length_unit + \",0);\\n\"\n res += \"\\\\draw[->, >=latex] (0,0) -- (0, 5.5);\\n\"\n #res += \"\\\\draw[->, >=latex] (0,0) -- (\" + length_unit + \",0);\\n\"\n #res += \"\\\\draw[->, >=latex] (0,0) -- (0,\" + length_unit + \");\\n\"\n # Adding the legend below the x axis\n for i in range(len(permutation)):\n res += \"\\\\draw (\" + str(i+1) + \",-0.1) node[below] {\" + str(permutation[i]) + \"};\\n\"\n\n res += \"\\\\draw plot[ultra thick]coordinates{\"\n temp = \"\" # string for the circles marking the dots\n for i in range(len(permutation)):\n if permutation[i] in matches:\n res += \"(\" + str(i+1) + \",\" + str(matches[permutation[i]]) + \")\"\n temp += \"\\\\draw[fill=black] (\" + str(i+1) + \",\" + str(matches[permutation[i]]) + \") circle (0.1);\\n\"\n res += \"};\\n\"\n res += temp # ading the circles\n res += \"\\\\end{tikzpicture}\\n\"\n if name:\n res += \"\\\\caption{Permutation \" + str(permutations.index(permutation)) + \" of \" + name + \"}\\n\"\n res += \"\\\\end{figure}\\n\"\n\n res += \"\\\\end{center}\\n\\\\clearpage\\n\"\n\n return res\n\ndef all_files_to_latex(directory, files_list, axes_list, names_list,\n dissimilarity_function=dissimilarity_over_over, weighted=False, strict=None,\n unwanted_candidates=[]):\n \"\"\"\n Returns a string with all graphics from a list of files\n :param directory: path to the directory where all the files in files_list are\n :param files_list: list of files\n :param axes_list: axes corresponding to the files\n :param names_list: names to give to the figures, for instance the wards corresponding to the elections\n :param dissimilarity_function: function to use to calculate dissimilarity between 2 candidates\n :param weighted: if True, matrices scores are calculated with the weighted gradient\n :param strict: list of booleans, True if the corresponding file in the list is depicting strict preferences\n :param unwanted_candidates: list of candidates to exclude from the search\n :return: LaTeX code to display the graphs corresponding to all the files\n \"\"\"\n if not strict:\n strict = [False] * len(files_list)\n res = \"\"\n for i in range(len(files_list)):\n print(files_list[i])\n structure = read_file(join(directory, files_list[i]), strict[i])\n res += axes_to_latex_graph(structure, axes_list[i], names_list[i],\n dissimilarity_function, weighted, unwanted_candidates)\n return res\n\n\ndef launch_irish_glasgow():\n # on Irish and Glasgow data\n s = all_files_to_latex(\"Data/all\", listFiles, listAxes, listWards, dissimilarity_function=dissimilarity_over_over,\n weighted=False, strict=[True]*len(listFiles))\n fp = open(\"Data/TeX/irish_glasgow_over_over.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFiles, listAxes, listWards, dissimilarity_function=dissimilarity_over_over,\n weighted=True, strict=[True]*len(listFiles))\n fp = open(\"Data/TeX/irish_glasgow_over_over_weighted.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFiles, listAxes, listWards, dissimilarity_function=dissimilarity_and_n,\n weighted=False, strict=[True]*len(listFiles))\n fp = open(\"Data/TeX/irish_glasgow_and_n.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFiles, listAxes, listWards, dissimilarity_function=dissimilarity_and_n,\n weighted=True, strict=[True]*len(listFiles))\n fp = open(\"Data/TeX/irish_glasgow_and_n_weighted.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFiles, listAxes, listWards, dissimilarity_function=dissimilarity_and_or,\n weighted=False, strict=[True]*len(listFiles))\n fp = open(\"Data/TeX/irish_glasgow_and_or.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFiles, listAxes, listWards, dissimilarity_function=dissimilarity_and_or,\n weighted=True, strict=[True]*len(listFiles))\n fp = open(\"Data/TeX/irish_glasgow_and_or_weighted.tex\", \"w\")\n fp.write(s)\n fp.close()\n\ndef launch_french():\n # on French data\n s = all_files_to_latex(\"Data/all\", listFrenchFiles, listFrenchAxes, listFrenchWards,\n dissimilarity_function=dissimilarity_over_over,\n weighted=False, strict=[False]*len(listFiles),\n unwanted_candidates=[2, 3, 7, 11])\n fp = open(\"Data/TeX/french_over_over.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFrenchFiles, listFrenchAxes, listFrenchWards,\n dissimilarity_function=dissimilarity_over_over,\n weighted=True, strict=[False]*len(listFiles),\n unwanted_candidates=[2, 3, 7, 11])\n fp = open(\"Data/TeX/french_over_over_weighted.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFrenchFiles, listFrenchAxes, listFrenchWards,\n dissimilarity_function=dissimilarity_and_n,\n weighted=False, strict=[False]*len(listFiles),\n unwanted_candidates=[2, 3, 7, 11])\n fp = open(\"Data/TeX/french_and_n.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFrenchFiles, listFrenchAxes, listFrenchWards,\n dissimilarity_function=dissimilarity_and_n,\n weighted=True, strict=[False]*len(listFiles),\n unwanted_candidates=[2, 3, 7, 11])\n fp = open(\"Data/TeX/french_and_n_weighted.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFrenchFiles, listFrenchAxes, listFrenchWards,\n dissimilarity_function=dissimilarity_and_or,\n weighted=False, strict=[False]*len(listFiles),\n unwanted_candidates=[2, 3, 7, 11])\n fp = open(\"Data/TeX/french_and_or.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n s = all_files_to_latex(\"Data/all\", listFrenchFiles, listFrenchAxes, listFrenchWards,\n dissimilarity_function=dissimilarity_and_or,\n weighted=True, strict=[False]*len(listFiles),\n unwanted_candidates=[2, 3, 7, 11])\n fp = open(\"Data/TeX/french_and_or_weighted.tex\", \"w\")\n fp.write(s)\n fp.close()\n\n\ndef launch_french_fusion():\n structure = read_directory(\"Data/frenchapproval\")\n functions = [dissimilarity_and_n, dissimilarity_and_or, dissimilarity_over_over]\n names = [\"dissimilarity\\_and\\_n weighted\", \"dissimilarity\\_and\\_n not weighted\",\n \"dissimilarity\\_and\\_or weighted\", \"dissimilarity\\_and\\_or not weighted\",\n \"dissimilarity\\_over\\_over weighted\", \"dissimilarity\\_over\\_over not weighted\"]\n s = \"\"\n for i in range(6):\n print \"Iteration \" + str(i)\n s += axes_to_latex_graph(structure, listFrenchAxes[0], names[i], functions[i/2], i % 2 == 0,\n unwanted_candidates=[2, 3, 7, 11])\n fp = open(\"Data/TeX/french_fusion.tex\", \"w\")\n fp.write(s)\n fp.close()\n\ndef launch_french_bnb():\n s = axes_to_latex_graph_bnb(sys.argv[1], listFrenchAxes[0], name=str(sys.argv[1]))\n fp = open(\"results\",\"w\")\n fp.write(s)\n fp.close()\n\nif __name__ == '__main__':\n # launch_irish_glasgow()\n # launch_french()\n # launch_french_fusion()\n launch_french_bnb()\n"
},
{
"alpha_fraction": 0.573309600353241,
"alphanum_fraction": 0.5844602584838867,
"avg_line_length": 39.92232894897461,
"blob_id": "2faa55c49ba4c7bfcc6f05600db4d25424fb2521",
"content_id": "9965a6b097660f21a5b7053b4d841e9da99596c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8431,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 206,
"path": "/ui/lower_frame/lower_interactive.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom Tkinter import *\nimport sys\nfrom os import getcwd\nfrom os.path import join\n\nsys.path.append(getcwd())\n\nfrom algorithms.find_axis_from_file import find_axis_from_structure\nfrom data_gestion.file_gestion import read_file\nfrom Data.axesPAndroide import *\nfrom algorithms.similarity_matrix import dissimilarity_and_n, dissimilarity_and_or, dissimilarity_over_over\nfrom algorithms.display_axes import filter_symmetric_axes, get_matches\nfrom algorithms.b_and_b import bnb, find_axes2, remove_last_ballots\n\n\nclass Interactive(Frame):\n \"\"\"\n Lower Frame, with the interactive display, showing results for each file\n \"\"\"\n\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.parent = parent\n self.left_image = None\n self.right_image = None\n self.label_name = None\n self.left_arrow = None\n self.right_arrow = None\n self.results = []\n self.get_results()\n self.current_graph = 0\n self.graph = None\n self.display_current_graph()\n self.pack_elements()\n\n def make_left_arrow(self, active):\n \"\"\"\n Creates a canvas on the left, with a left arrow, whose color depends on the \"active\" boolean\n :param active: If True, changes the color of the arrow, and it's action\n :type active: bool\n :return: the created canvas\n \"\"\"\n canvas = Canvas(self, width=100, height=100)\n if active:\n img = PhotoImage(file=\"ui/lower_frame/images/left_arrow.gif\")\n else:\n img = PhotoImage(file=\"ui/lower_frame/images/left_arrow_inactive.gif\")\n self.left_image = img\n canvas.create_image(0, 0, image=img, anchor=\"nw\", tag=\"leftarrow\")\n if active:\n canvas.tag_bind(\"leftarrow\", \"<Button-1>\", self.display_previous_result)\n return canvas\n\n def make_right_arrow(self, active):\n \"\"\"\n Creates a canvas on the left, with a left arrow, whose color depends on the \"active\" boolean\n :param active: If True, changes the color of the arrow, and it's action\n :type active: bool\n :return: the created canvas\n \"\"\"\n canvas = Canvas(self, width=100, height=100)\n if active:\n img = PhotoImage(file=\"ui/lower_frame/images/right_arrow.gif\")\n else:\n img = PhotoImage(file=\"ui/lower_frame/images/right_arrow_inactive.gif\")\n self.right_image = img\n canvas.create_image(0, 0, anchor=\"nw\", image=img, tag=\"rightarrow\")\n if active:\n canvas.tag_bind(\"rightarrow\", \"<Button-1>\", self.display_next_result)\n return canvas\n\n def display_previous_result(self, event):\n self.current_graph -= 1\n self.display_current_graph()\n self.pack_elements()\n\n def display_next_result(self, event):\n self.current_graph += 1\n self.display_current_graph()\n self.pack_elements()\n\n def display_current_graph(self):\n self.destroy_elements()\n self.left_arrow = self.make_left_arrow(self.current_graph > 0)\n self.right_arrow = self.make_right_arrow(self.current_graph < len(self.results) - 1)\n result = self.results[self.current_graph]\n if result[0] in listFiles:\n self.label_name = Label(self, text=\"File name:\\n\" + result[0] + \"\\n\\nWard name:\\n\" + listWards[\n listFiles.index(result[0])])\n elif result[0] in listFrenchFiles:\n self.label_name = Label(self, text=\"File name:\\n\" + result[0] + \"\\n\\nWard name:\\n\" + listFrenchWards[\n listFrenchFiles.index(result[0])])\n self.graph = Graph(self.results[self.current_graph])\n self.graph.afficher(10)\n\n def pack_elements(self):\n self.graph.pack()\n self.left_arrow.pack(side=LEFT)\n self.label_name.pack(side=LEFT)\n self.right_arrow.pack(side=RIGHT)\n\n def destroy_elements(self):\n if self.left_arrow:\n self.left_arrow.destroy()\n if self.right_arrow:\n self.right_arrow.destroy()\n if self.label_name:\n self.label_name.destroy()\n if self.graph:\n self.graph.destroy()\n\n def get_results(self):\n for f in self.parent.upper_frame.right_frame.list_files:\n structure = read_file(join(\"Data/all\", f), f in listFiles)\n if self.parent.upper_frame.left_frame.algo.get() == 0:\n preferences = structure[\"preferences\"]\n if self.parent.upper_frame.left_frame.filtered.get():\n preferences, nb_voters, nb_unique = remove_last_ballots(preferences)\n candidates = structure[\"candidates\"]\n ensemble, best = bnb(len(preferences), preferences, candidates)\n axes, card = find_axes2(best[0][0], candidates)\n axes = filter_symmetric_axes(axes)\n for axis in axes:\n self.results.append((f, axis))\n else:\n if self.parent.upper_frame.left_frame.dissimilarity.get() == 0:\n dissimilarity_function = dissimilarity_and_n\n elif self.parent.upper_frame.left_frame.dissimilarity.get() == 1:\n dissimilarity_function = dissimilarity_and_or\n else:\n dissimilarity_function = dissimilarity_over_over\n if f in listFiles:\n t, permutations = find_axis_from_structure(structure, dissimilarity_function,\n self.parent.upper_frame.left_frame.weighted.get())\n else:\n t, permutations = find_axis_from_structure(structure, dissimilarity_function,\n self.parent.upper_frame.left_frame.weighted.get(),\n unwanted_candidates=[2, 3, 7, 11])\n permutations = filter_symmetric_axes(permutations[1])\n for permutation in permutations:\n self.results.append((f, permutation))\n\n\nclass Graph(Canvas):\n MAX_WIDTH = 800 # taille maxi choisi pour mon écran\n MAX_HEIGHT = 200 # idem\n\n def __init__(self, result):\n \"\"\"\n Creates the graph of one of the permutation found for this file\n :param result: tuple (file, permutation) found by the algorithm\n \"\"\"\n Canvas.__init__(self, width=Graph.MAX_WIDTH + 20, height=Graph.MAX_HEIGHT + 20)\n self.xmin = 0\n self.xmax = len(result[1]) + 1\n\n f = result[0]\n permutation = result[1]\n\n self.file = f\n self.permutation = permutation\n\n if f in listFiles:\n self.axis = listAxes[listFiles.index(f)]\n else:\n self.axis = listFrenchAxes[listFrenchFiles.index(f)]\n\n self.ymin = 0\n self.ymax = len(self.axis) + 1\n\n matches = get_matches(self.axis)\n\n self.values = []\n\n for i in range(len(permutation)):\n if permutation[i] in matches:\n self.values.append((permutation[i], i + 1, matches[permutation[i]]))\n\n self.coeffx = (Graph.MAX_WIDTH - 20) / (self.xmax - self.xmin)\n self.coeffy = (Graph.MAX_HEIGHT - 20) / (self.ymax - self.ymin)\n\n def afficher(self, diametre):\n zerox = 20\n zeroy = Graph.MAX_HEIGHT\n\n # Display axes\n self.create_line(zerox, zeroy, Graph.MAX_WIDTH, zeroy, arrow=\"last\")\n self.create_line(zerox, zeroy, zerox, zeroy - self.ymax * self.coeffy, arrow=\"last\")\n\n # Display dots\n for name, x, y in self.values:\n x, y = self.coeffx * x + zerox, zeroy - self.coeffy * y\n self.create_oval(x - diametre / 2, y - diametre / 2, x + diametre / 2, y + diametre / 2, fill=\"black\")\n\n # Display graduation on x axis\n for ind, name in enumerate(self.permutation):\n x = self.coeffx * (ind + 1) + zerox\n self.create_line(x, zeroy - 5, x, zeroy + 5)\n self.create_text(x, zeroy + 15, text=name)\n\n # Display Lines between dots\n for i in range(1, len(self.values)):\n x0, y0 = self.coeffx * self.values[i - 1][1] + zerox, zeroy - self.coeffy * self.values[i - 1][2]\n x1, y1 = self.coeffx * self.values[i][1] + zerox, zeroy - self.coeffy * self.values[i][2]\n self.create_line(x0, y0, x1, y1, width=diametre / 3)\n"
},
{
"alpha_fraction": 0.6234718561172485,
"alphanum_fraction": 0.634881854057312,
"avg_line_length": 28.190475463867188,
"blob_id": "6e07b4e7976400721ba1cbf290babbaa7c61b57e",
"content_id": "93236749f1cb3e9320e08f700bd692bc33c14d92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1227,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 42,
"path": "/ui/top_menu.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom Tkinter import *\nfrom ttk import Combobox\nfrom tkFileDialog import *\nfrom tkMessageBox import *\n\napropos_message = \"\"\"\nP-ANDROIDE Project\n\"\"\"\n\n\nclass TopMenu(Menu):\n \"\"\"\n Class creating a Top Menu, on top of the window, or in the menu bar for Mac\n Each method creates a pulldown menu\n \"\"\"\n def __init__(self, parent):\n Menu.__init__(self, parent)\n self.parent = parent\n self.initialize()\n\n def initialize(self):\n self.menu_mode()\n self.menu_aide()\n self.parent.parent.config(menu=self)\n\n def menu_mode(self):\n menu1 = Menu(self, tearoff=0)\n menu1.add_command(label=\"Interactive\", command=lambda: self.parent.set_mode(1))\n menu1.add_command(label=\"Benchmark\", command=lambda: self.parent.set_mode(0))\n menu1.add_separator()\n menu1.add_command(label=\"Close\", command=self.parent.quit)\n self.add_cascade(label=\"Mode\", menu=menu1)\n\n def menu_aide(self):\n menu2 = Menu(self, tearoff=0)\n menu2.add_command(label=\"About\", command=self.apropos)\n self.add_cascade(label=\"About\", menu=menu2)\n\n @staticmethod\n def apropos():\n showinfo(\"P-ANDROIDE\", apropos_message)\n\n"
},
{
"alpha_fraction": 0.5855893492698669,
"alphanum_fraction": 0.5934925079345703,
"avg_line_length": 31.145946502685547,
"blob_id": "840a41fdc316e36a2d190ac9d9c10d9a7f275b99",
"content_id": "74bca0890777e997e978d24c97c25ccdd0f1f986",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11901,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 370,
"path": "/algorithms/b_and_b.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env sage -python\n# -*- coding: utf-8 -*-\n\n\nimport sys\nfrom os import getcwd\n\nsys.path.append(getcwd())\n\nfrom data_gestion.generation import generation\nfrom data_gestion.file_gestion import read_file, read_directory\nfrom copy import copy\nfrom time import time\nfrom sage.all import Set\nfrom sage.graphs.pq_trees import reorder_sets, P, Q\nfrom compiler.ast import flatten\n\n\ndef filter_symmetric_axes(permutations):\n \"\"\"\n Filter the list of permutations to keep only one axis when 2 are symmetric\n :param permutations: list of permutations found\n :type permutations: list[list[int]]\n :return: the filtered list of permutations\n :rtype: list[list[int]]\n \"\"\"\n filtered_permutations = []\n for permutation in permutations:\n if list(reversed(permutation)) not in filtered_permutations:\n filtered_permutations.append(permutation)\n return filtered_permutations\n\n\ndef bnb(nvar, preferences, candidates, node=([], 0), enum_list=[], best=([], 0), i=0):\n if preferences:\n nb_voters, ballot = preferences[0]\n new_preferences = copy(preferences)\n new_preferences.remove((nb_voters, ballot))\n\n copyNodeSet, copyNodeBound = copy(node)\n\n nodeSetL = copyNodeSet+[(nb_voters, ballot)]\n nodeSetR = copyNodeSet\n i+=1\n\n #Find axes, number of axes\n axesL, nb_axesL = find_axes(nodeSetL, candidates)\n if axesL:\n nodeBoundL = upper_bound(nodeSetL, new_preferences, candidates, axesL)\n nodeL = (nodeSetL, nodeBoundL)\n\n # If at leaf node\n if i==nvar:\n # Calculate optimal local solution\n v = sol(nodeL, candidates, new_preferences)\n\n # If local solution > current optimal solution, replace\n if v[1] > best[1]:\n best = v\n\n #If upper bound > current optimal solution, continue branching\n if nodeBoundL > best[1]:\n enum_list += [nodeL]\n enum_list, best = bnb(nvar, new_preferences, candidates, nodeL, enum_list, best, i)\n\n if not nodeSetR:\n nodeBoundR = sum_nb_voters(nodeSetR, new_preferences)\n else:\n nodeBoundR = copyNodeBound\n nodeR = (nodeSetR, nodeBoundR)\n\n #If at node leaf\n if i==nvar:\n v = sol(nodeR, candidates, new_preferences)\n\n # If local solution > current optimal solution, replace\n if v[1] > best[1]:\n best = v\n\n #If upper bound > current optimal solution, continue branching\n if nodeBoundR > best[1]:\n enum_list += [nodeR]\n enum_list, best = bnb(nvar, new_preferences, candidates, nodeR, enum_list, best, i)\n\n return enum_list, best\n\ndef transform_ballots(nodeSet):\n \"\"\"\n Transforms ballots in node to Sets\n :param nodeSet: Current node ([ballot_set], upper_bound)\n :return: List of ballots as Set\n \"\"\"\n L = []\n for nb_voters, ballot in nodeSet:\n if isinstance(ballot[-1], int):\n L.append(Set(ballot))\n else:\n L.append(Set(ballot[:-1]))\n return L\n\ndef upper_bound(nodeSet, remaining_prefs, candidates, axes):\n \"\"\"\n Calculates the node's upper bound\n :param nodeSet: Current node set of prefs\n :param remaining_prefs: List of remaining ballots in preferences\n :param candidates: List of candidates\n :param axes: Compatible axes found at node\n :return: int\n \"\"\"\n new_bound = 0\n for pref in nodeSet:\n new_bound += pref[0]\n\n for nb_voters, pref in remaining_prefs:\n # Transform preference to set of ballot without indifference\n if isinstance(pref[-1], int):\n ballot = Set(pref)\n else:\n ballot = Set(pref[:-1])\n # Determine whether the ballot is coherent with one of the axes\n for axis in axes:\n if is_coherent(ballot, axis):\n # If coherent, add ballot number of voters to the new bound\n new_bound += nb_voters\n break\n return new_bound\n\ndef sol(node, candidates, remaining_prefs):\n \"\"\"\n Calculates a local optimal solution\n :param v:\n :param node: Current node\n :param candidates: List of candidates\n :param remaining_prefs: List of remaining ballots in preferances\n :param axes: Compatible axes found at node\n :return: Value of local optimal solution\n \"\"\"\n v = 0\n for (nb_voters, prefs) in node[0]:\n v += nb_voters\n\n for (nb_voters, prefs) in remaining_prefs:\n if isinstance(prefs[-1], int):\n ballot = Set(prefs)\n else:\n ballot = Set(prefs[:-1])\n axes, nb_axes = find_axes2(node[0], candidates)\n for axis in axes:\n if is_coherent(ballot, axis):\n v += nb_voters\n node = (node[0] + [(nb_voters, prefs)], node[1])\n break\n return (node, v)\n\ndef find_axes(nodeSet, candidates):\n \"\"\"\n Returns coherent axes with given ballot set\n :param nodeSet: Current node ([ballot_set], upper_bound)\n :param candidates: List of candidates\n :return: List of possible axes, False if none found\n \"\"\"\n L = []\n candL = []\n ballots = transform_ballots(nodeSet)\n\n # Regrouper les bulletins qui contiennent le candidat c\n for c in candidates:\n S = Set([])\n for ballot in ballots:\n if c in ballot:\n S += Set([ballot])\n if S:\n S += Set([Set([c])])\n L += Set([S])\n candL += Set([S])\n # Transformer liste de Set en PQ-tree et aligner\n axes = P(L)\n for ballot in ballots:\n try:\n axes.set_contiguous(ballot)\n except:\n return False, 0\n # Determiner les axes à partir des alignements trouvés\n all_axes = [] # Liste des axes\n for axis in axes.orderings():\n A = []\n for ballot_set in flatten(axis):\n if ballot_set:\n A += [candL.index(ballot_set)+1]\n all_axes += [A]\n axes_filtered = filter_symmetric_axes(all_axes)\n return axes_filtered, axes.cardinality()\n\ndef find_axes2(nodeSet, candidates):\n \"\"\"\n Returns all possible coherent axes\n :param node: Current node ([ballot_set], upper_bound)\n :param candidates: List of candidates\n :return: List of possible axes, False if none found\n \"\"\"\n L = []\n ballots = transform_ballots(nodeSet)\n # Regrouper les bulletins qui contiennent le candidat c\n for c in candidates:\n S = Set([])\n for ballot in ballots:\n if c in ballot:\n S += Set([ballot])\n S += Set([Set([c])])\n L += Set([S])\n\n # Transformer liste de Set en PQ-tree et aligner\n axes = P(L)\n for ballot in ballots:\n try:\n axes.set_contiguous(ballot)\n except:\n return False, 0\n # Determiner les axes à partir des alignements trouvés\n all_axes = [] # Liste des axes\n for axis in axes.orderings():\n A = []\n for ballot_set in flatten(axis):\n A += [L.index(ballot_set)+1]\n all_axes += [A]\n axes_filtered = filter_symmetric_axes(all_axes)\n return axes_filtered, axes.cardinality()\n\ndef is_coherent(ballot, axes):\n \"\"\"\n Determines if a ballot is coherent with a given axis\n :param ballot: a ballot for candidates\n :param axis: axis of preference\n :return: True if the ballot is coherent with the axis\n \"\"\"\n return any(ballot == Set(axes[i:i + len(ballot)]) for i in range(len(axes) - len(ballot) + 1))\n\ndef sum_nb_voters(prefs, remaining_prefs):\n \"\"\"\n Calculcates upper_bound of an empty node\n :param remaining_prefs: List of remaining ballots in preferances\n :return: upper bound\n \"\"\"\n bound = 0\n for pref in prefs:\n bound += pref[0]\n\n for node in remaining_prefs:\n bound += node[0]\n return bound\n\ndef nodes(n):\n \"\"\"\n Calcultes the number of nodes in a graph of n ballots\n :param n: number of ballots\n :return: number of nodes\n \"\"\"\n t_nodes = 0\n for i in range(n):\n t_nodes += 2**(i+1)\n return t_nodes\n\ndef remove_last_ballots(preferences):\n \"\"\"\n Remove last ballots of preferences\n :param preferences: List of ballots\n :return: Reuced list of ballots, the total number of ballots, the number of unique ballots\n \"\"\"\n filtered = []\n unique = 0\n total = 0\n for nb_voters, ballot in preferences:\n if nb_voters>1:\n filtered += [(nb_voters, ballot)]\n total += nb_voters\n unique += 1\n return filtered, total, unique\n\ndef example_generation():\n structures, candidates = generation(8, 10)\n candidates = [i+1 for i in range(8)]\n preferences = structures[\"preferences\"]\n print(\"Preferences : \" + str(preferences))\n print(\"Candidats : \" + str(candidates))\n t1 = time()\n bb, best = bnb(len(preferences), preferences, candidates)\n t2 = time()\n print (\"Best solution : \" + str(best))\n #print bb\n print(\"Duration : \" + str(t2-t1))\n print(\"On explore \" + str(len(bb)) + \" noeuds parmi \" + str(nodes(len(preferences))) + \" noeuds.\")\n\ndef example_file():\n structure = read_file(sys.argv[1])\n preferences = structure[\"preferences\"]\n candidates = [i+1 for i in range(len(structure[\"candidates\"]))]\n print(\"Preferences : \" + str(preferences))\n print(\"Candidats : \" + str(candidates))\n t1 = time()\n bb, best = bnb(len(preferences), preferences, candidates)\n t2 = time()\n print(\"done\")\n f = sys.argv[1].split(\".\")[0] + \"_resultat.txt\"\n wfile = open(f, 'w')\n wfile.write(\"Plus large ensemble cohérent : \")\n for bull in best[0][0]:\n wfile.write(str(bull) + \"\\n\")\n wfile.write(\"Resultat : \" + str(best[1]) + \"\\n\")\n wfile.write(\"Duration : \" + str(t2-t1) + \"\\n\")\n wfile.write(\"Axes :\\n\")\n axes, card = find_axes2(best[0][0], candidates)\n if axes:\n for a in axes:\n wfile.write(str(a)+\"\\n\")\n wfile.close()\n\ndef example_all_files():\n structure = read_directory(sys.argv[1])\n preferences = structure[\"preferences\"]\n candidates = [i+1 for i in range(len(structure[\"candidates\"]))]\n print(\"Preferences : \" + str(preferences))\n print(\"Candidats : \" + str(candidates))\n t1 = time()\n bb, best = bnb(len(preferences), preferences, candidates)\n t2 = time()\n print(\"done\")\n f = sys.argv[1].split(\".\")[0] + \"_resultat.txt\"\n wfile = open(f, 'w')\n wfile.write(\"Plus large ensemble cohérent : \")\n for bull in best[0][0]:\n wfile.write(str(bull) + \"\\n\")\n wfile.write(\"Resultat : \" + str(best[1]) + \"\\n\")\n wfile.write(\"Duration : \" + str(t2-t1) + \"\\n\")\n wfile.write(\"Axes :\\n\")\n axes, card = find_axes2(best[0][0], candidates)\n if axes:\n for a in axes:\n wfile.write(str(a)+\"\\n\")\n wfile.close()\n\ndef example_filtered():\n structure = read_file(sys.argv[1])\n preferences_bis = structure[\"preferences\"]\n preferences, nb_voters, uniq = remove_last_ballots(preferences_bis)\n candidates = [i+1 for i in range(len(structure[\"candidates\"]))]\n print(\"Preferences : \" + str(preferences))\n print(\"Candidats : \" + str(candidates))\n t1 = time()\n bb, best = bnb(len(preferences), preferences, candidates)\n t2 = time()\n print(\"done\")\n f = sys.argv[1].split(\".\")[0] + \"_resultat.txt\"\n wfile = open(f, 'w')\n wfile.write(\"Plus large ensemble cohérent : \")\n for bull in best[0][0]:\n wfile.write(str(bull) + \"\\n\")\n wfile.write(\"Resultat : \" + str(best[1]) + \"\\n\")\n wfile.write(\"Duration : \" + str(t2-t1) + \"\\n\")\n wfile.write(\"Axes :\\n\")\n axes, card = find_axes2(best[0][0], candidates)\n if axes:\n for a in axes:\n wfile.write(str(a)+\"\\n\")\n wfile.close()\n\nif __name__ == '__main__':\n #example()\n #example_file()\n #example_all_files()\n example_filtered()\n"
},
{
"alpha_fraction": 0.5335882306098938,
"alphanum_fraction": 0.5417804718017578,
"avg_line_length": 40.6136360168457,
"blob_id": "e3ac6d1babb8ffba4c4fe33a90494b728e84a2de",
"content_id": "6d1bce4a6dd9db823746d53619237d0f898ae00b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1831,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 44,
"path": "/ui/upper_frame/files_menu.py",
"repo_name": "lauragreige/P-ANDROIDE",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom Tkinter import *\nfrom Data.axesPAndroide import *\n\n\nclass FilesMenu(LabelFrame):\n \"\"\"\n Adds all widgets to select files to test\n \"\"\"\n\n def __init__(self, master):\n LabelFrame.__init__(self, master, text=\"Choose files\")\n self.parent = master\n self.list_files = []\n self.check_buttons = []\n self.add_files()\n\n def add_files(self):\n label = Label(self, text=\"Choose files:\", font=(\"\", 16))\n label.grid()\n # for f in range(len(listFiles)):\n # checkbtn = Checkbutton(self, text=listFiles[f], command=lambda name=f: self.add_file(listFiles[name]),\n # padx=10)\n # checkbtn.grid(row=(f % (len(listFiles) / 2)), column=(f / (len(listFiles) / 2)))\n for f in range(len(listFiles)):\n checkbtn = Checkbutton(self, text=listFiles[f], command=lambda name=f: self.add_file(listFiles[name]),\n padx=10)\n checkbtn.grid(row=(f % ((len(listFiles) + len(listFrenchFiles)) / 2)),\n column=(f / ((len(listFiles) + len(listFrenchFiles)) / 2)))\n for f in range(len(listFrenchFiles)):\n checkbtn = Checkbutton(self, text=listFrenchFiles[f],\n command=lambda name=f: self.add_file(listFrenchFiles[name]), padx=10)\n checkbtn.grid(row=((f + len(listFiles)) % ((len(listFiles) + len(listFrenchFiles)) / 2)),\n column=((f + len(listFiles)) / ((len(listFiles) + len(listFrenchFiles)) / 2)))\n\n def add_file(self, filename):\n \"\"\"\n :param filename:\n :return:\n \"\"\"\n if filename in self.list_files:\n self.list_files.remove(filename)\n else:\n self.list_files.append(filename)\n"
}
] | 13 |
mkhambat/Binary-search-tree
|
https://github.com/mkhambat/Binary-search-tree
|
ffb13ceb8d4e55056a860b76db5eac2b086e9ef3
|
4d5b1ffc161f49bf07c9e8a4c2881eeeeb4929f8
|
b6a1485cfcbb530e6ef1c6c29cb93aaa8b81d8c9
|
refs/heads/master
| 2021-09-02T02:46:44.907210 | 2017-12-29T20:52:10 | 2017-12-29T20:52:10 | 115,752,618 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7669376730918884,
"alphanum_fraction": 0.7859078645706177,
"avg_line_length": 35.900001525878906,
"blob_id": "222f39b9b62be7a4cff5b99e2e2c2059da26154b",
"content_id": "e499133eae3fd68a168ec66d70da0f2e0bab2641",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 10,
"path": "/README.md",
"repo_name": "mkhambat/Binary-search-tree",
"src_encoding": "UTF-8",
"text": "# Binary-search-tree\n\nThe implemented BST supports a number of operations\n1. Insert into tree\n2. Search an element\n3. Print elements in inorder\n4. Returns the size of the tree\n5. Returns the largest element in the tree\n6. Returns the smallest element in the tree\n7. Returns a tree such that each node contains sum of all nodes greater than that node (greaterSumTree())\n"
},
{
"alpha_fraction": 0.6122015714645386,
"alphanum_fraction": 0.6286472082138062,
"avg_line_length": 15.801886558532715,
"blob_id": "7b8a052b7b5f12c8b05f6b53da5a9b5c553d313b",
"content_id": "f9c6f58fd5a0c153c216e8186cb508bcbf412f4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1885,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 106,
"path": "/bst.py",
"repo_name": "mkhambat/Binary-search-tree",
"src_encoding": "UTF-8",
"text": "class Node:\r\n\tdef __init__(self,data):\r\n\t\tself.data=data\r\n\t\tself.left=None\r\n\t\tself.right=None\r\n\r\ndef insert_element(data,node):\r\n\r\n\tif(node.data):\r\n\t\t\r\n\t\tif(data>=node.data):\r\n\t\t\tif(node.right == None):\r\n\t\t\t\tnode.right=Node(data)\r\n\t\t\telse:\r\n\t\t\t\tinsert_element(data,node.right)\r\n\t\tif(data<node.data):\r\n\t\t\tif(node.left == None):\r\n\t\t\t\tnode.left=Node(data)\r\n\t\t\telse:\r\n\t\t\t\tinsert_element(data,node.left)\r\n\telse:\r\n\t\tnode.data=data\r\nl=[]\r\ndef inorder(node):\r\n\tglobal l\r\n\tif(node.left):\r\n\t\tinorder(node.left)\r\n\tl.append( node.data)\r\n\r\n\tif(node.right):\r\n\t\tinorder(node.right)\r\n\treturn l\r\n\r\n\r\ndef contains(k,node):\r\n\r\n\tif(node == None):\r\n\t\tprint False\r\n\telse:\r\n\t\tif(node.data==k):\r\n\t\t\tprint True\r\n\t\tif(node.data<k):\r\n\t\t\tcontains(k,node.right)\r\n\t\tif(node.data>k):\r\n\t\t\tcontains(k,node.left)\r\n\r\n\r\ndef smallest(node):\r\n\tif(node==None):\r\n\t\tprint \"No elements present in Tree\"\r\n\tif(node.left):\r\n\t\tsmallest(node.left)\r\n\telse:\r\n\t\tprint node.data\r\n\r\ndef largest(node):\r\n\tif(node==None):\r\n\t\tprint \"No elements present in Tree\"\r\n\tif(node.right):\r\n\t\tlargest(node.right)\r\n\telse:\r\n\t\tprint node.data\r\n\r\n\r\ndef size(node):\r\n\t\r\n\tprint(len(inorder(node)))\r\n\t\r\n\r\nsum=0\r\ndef greaterSumTree(node):\r\n\tglobal sum\r\n\tif(node):\r\n\t\tgreaterSumTree(node.right)\r\n\t\ttemp= node.data\r\n\t\tnode.data=sum\r\n\t\tsum+=temp\r\n\t\tgreaterSumTree(node.left)\r\n\r\n\telse:\r\n\t\treturn\r\n\r\narray = [5, 3, 9, 7, 1, 4, 0, 12, 11, 13, 15, 6, 2, 8, 10, 14]\r\nnode=Node(array[0])\r\n\r\nfor i in array[1:]:\r\n\tinsert_element(i,node)\r\n\r\nprint \"contains(5): \" \r\ncontains(5,node)\r\nprint \"contains(21): \"\r\ncontains(21,node)\r\nprint \"Print tree: \", inorder(node)\r\nl=[]\r\nprint \"size of tree: \"\r\nsize(node)\r\nprint \"Smallest node in tree: \"\r\nsmallest(node)\r\nprint \"Largest node in tree: \"\r\nlargest(node)\r\nl=[]\r\nprint \"Inorder before calling greaterSumTree(): \", inorder(node)\r\ngreaterSumTree(node)\r\nl=[]\r\nl=inorder(node)\r\nprint \"Inorder after calling greaterSumTree(): \", l"
}
] | 2 |
nwilken/aws-sso
|
https://github.com/nwilken/aws-sso
|
d0a41e697193917a460608e8cef2f0fcdbbcf0fb
|
ef830164ae7b4465ce6015f357c6f4802c682397
|
78fb1748979c91b7d6242ba53fe1d565b39b3947
|
refs/heads/master
| 2022-12-10T05:48:00.177284 | 2022-05-04T14:51:18 | 2022-05-04T14:51:18 | 209,867,355 | 7 | 1 | null | 2019-09-20T19:34:50 | 2022-05-13T21:27:22 | 2022-12-08T05:28:00 |
Python
|
[
{
"alpha_fraction": 0.48148149251937866,
"alphanum_fraction": 0.6949890851974487,
"avg_line_length": 15.962963104248047,
"blob_id": "c8c511e48bdccfe804af802a409b3bf2a7e32e99",
"content_id": "f69dff2cfb303a83267e36216a9092a94c4d72a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 27,
"path": "/requirements.txt",
"repo_name": "nwilken/aws-sso",
"src_encoding": "UTF-8",
"text": "astroid==1.6.6\nbackports.functools-lru-cache==1.5\nbeautifulsoup4==4.7.1\nboto==2.49.0\nbotocore==1.12.168\nbs4==0.0.1\ncertifi==2019.3.9\nchardet==3.0.4\nconfigparser==3.7.4\ndocutils==0.14\nenum34==1.1.6\nfutures==3.2.0\nidna==2.8\nisort==4.3.20\njmespath==0.9.4\nlazy-object-proxy==1.4.1\nlxml==4.6.5\nmccabe==0.6.1\npylint==1.9.4\npython-dateutil==2.8.0\nrequests==2.22.0\ns3transfer==0.2.1\nsingledispatch==3.4.0.3\nsix==1.12.0\nsoupsieve==1.9.1\nurllib3==1.26.5\nwrapt==1.11.1\n\n"
},
{
"alpha_fraction": 0.6899999976158142,
"alphanum_fraction": 0.6930000185966492,
"avg_line_length": 23.390243530273438,
"blob_id": "5b16a25e842bba4ae3b13311c53dd9d5cecdffa5",
"content_id": "1177c372abff40e34690c2e3beb67eb6cebe108e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1000,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 41,
"path": "/README.md",
"repo_name": "nwilken/aws-sso",
"src_encoding": "UTF-8",
"text": "## Assume role in AWS using ASU Single Sign-On\n\n#### Usage:\n\n`docker run -it --rm -v ~/.aws:/root/.aws asuuto/aws-sso:latest`\n\n(or `-v %userprofile%\\.aws:/root/.aws` when running on Windows)\n\nthen you can, for example\n\n`aws --profile saml s3 ls`\n\nor, if you don't have the aws cli installed locally\n\n`docker run -it --rm -v ~/.aws:/root/.aws asuuto/awscli:latest aws --profile saml s3 ls`\n\nif you don't already have a `~/.aws/credentials` file, start with this one\n\n```\n[default]\noutput = json\nregion = us-west-2\naws_access_key_id = \naws_secret_access_key = \n```\n\n#### Side Effects:\n\n* Creates/updates a `saml` profile in `~/.aws/credentials`\n* Creates/updates `~/.aws/sso_session_cookies` to cache your ASU SSO and MFA sessions\n\n#### Build:\n\n```\ndocker build --pull -t asuuto/aws-sso:latest .\ndocker push asuuto/aws-sso:latest\n```\n\n#### Known issues:\n- An error may be thrown if you have access to only one AWS account/role\n- Process fails if you are being prompted to change your password at login\n"
},
{
"alpha_fraction": 0.6423357725143433,
"alphanum_fraction": 0.6496350169181824,
"avg_line_length": 16.0625,
"blob_id": "5ddb7259e09bbc5a4d29e882d7fb6cb3f6331be1",
"content_id": "30d93d73ac83c8a3b7a7ff69067f5d2af47a3cf0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 16,
"path": "/Dockerfile",
"repo_name": "nwilken/aws-sso",
"src_encoding": "UTF-8",
"text": "FROM python:2.7\n\nCOPY requirements.txt /\nRUN pip install -r /requirements.txt && \\\n rm -rf ~/.cache && \\\n rm -rf ~/.wget-hsts\n\nCOPY aws-sso.py /app/aws-sso.py\nRUN chmod +x /app/aws-sso.py\n\nWORKDIR /root/.aws\nCOPY credentials .\n\nWORKDIR /root\n\nCMD [\"/app/aws-sso.py\"]\n\n"
},
{
"alpha_fraction": 0.647584080696106,
"alphanum_fraction": 0.6537002921104431,
"avg_line_length": 35.90519332885742,
"blob_id": "08875b5648a28cead8559832f8a2141b847ee0af",
"content_id": "6442264d1299fc43ae533c13ea545242ee98463f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16350,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 443,
"path": "/aws-sso.py",
"repo_name": "nwilken/aws-sso",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys\nimport boto.sts\nimport boto.s3\nimport requests\nimport getpass\nimport ConfigParser\nimport base64\nimport logging\nimport xml.etree.ElementTree as ET\nimport re\nimport json\nimport time\ntry:\n import httplib\nexcept ImportError:\n import http.client as httplibfrom\nfrom bs4 import BeautifulSoup\nfrom os.path import expanduser, isfile\nfrom urlparse import urlparse, urlunparse\nimport pickle\n\n##########################################################################\n# Variables\n\n# region: The default AWS region that this script will connect\n# to for all API calls\nregion = 'us-west-2'\n\n# output format: The AWS CLI output format that will be configured in the\n# saml profile (affects subsequent CLI calls)\noutputformat = 'json'\n\n# awsconfigfile: The file where this script will store the temp\n# credentials under the saml profile\nawsconfigfile = '/.aws/credentials'\n\n# SSL certificate verification: Whether or not strict certificate\n# verification is done, False should only be used for dev/test\nsslverification = True\n\n# Where to store session cookies for future logins\ncookiefile = expanduser('~/.aws/sso_session_cookies')\n\n# idpentryurl: The initial url that starts the authentication process.\nidpentryurl = 'https://shibboleth2.asu.edu/idp/profile/SAML2/Unsolicited/SSO?providerId=urn:amazon:webservices'\nduourl = \"https://weblogin.asu.edu/cas/login?service=https%3A%2F%2Fshibboleth2.asu.edu%2Fidp%2FAuthn%2FExternal%3Fconversation%3De1s1&entityId=urn%3Aamazon%3Awebservices\"\n\n\n# Uncomment to enable low level debugging\n#httplib.HTTPConnection.debuglevel = 9\n#logging.basicConfig()\n#logging.getLogger().setLevel(logging.DEBUG)\n#requests_log = logging.getLogger(\"requests.packages.urllib3\")\n#requests_log.setLevel(logging.DEBUG)\n#requests_log.propagate = True\n\n##########################################################################\n\n# Initiate session handler\nsession = requests.Session()\n\n# If there is no cached login, or it's expired, go through the login process again\nneed_login = True\nassertion = ''\n\nif isfile(cookiefile):\n with open(cookiefile, 'rb') as f:\n session.cookies.update(pickle.load(f))\n response = session.post(idpentryurl, data={}, verify=sslverification)\n # print response.text\n # Decode the response and extract the SAML assertion\n soup = BeautifulSoup(response.text.decode('utf8'), 'lxml')\n # Look for the SAMLResponse attribute of the input tag (determined by\n # analyzing the debug print lines above)\n for inputtag in soup.find_all('input'):\n if(inputtag.get('name') == 'SAMLResponse'):\n #print(inputtag.get('value'))\n assertion = inputtag.get('value')\n if (assertion != ''):\n need_login = False\n\nif need_login:\n # Get the federated credentials from the user\n if len(sys.argv) > 4:\n username = sys.argv[1]\n password = sys.argv[2]\n duration = sys.argv[3]\n organization = sys.argv[4]\n else:\n print \"ASURITE Username:\",\n username = raw_input()\n password = getpass.getpass()\n print ''\n duration = 30\n organization = 'production'\n\n # Programmatically get the SAML assertion\n # Opens the initial IdP url and follows all of the HTTP302 redirects, and\n # gets the resulting login page\n formresponse = session.get(idpentryurl, verify=sslverification)\n # Capture the idpauthformsubmiturl, which is the final url after all the 302s\n idpauthformsubmiturl = formresponse.url\n\n # Parse the response and extract all the necessary values\n # in order to build a dictionary of all of the form values the IdP expects\n formsoup = BeautifulSoup(formresponse.text.decode('utf8'), 'lxml')\n payload = {}\n\n for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):\n name = inputtag.get('name','')\n value = inputtag.get('value','')\n #print \"name=%s, value=%s\" %(name,value)\n if \"user\" in name.lower():\n #Make an educated guess that this is the right field for the username\n payload[name] = username\n elif \"email\" in name.lower():\n #Some IdPs also label the username field as 'email'\n payload[name] = username\n elif \"pass\" in name.lower():\n #Make an educated guess that this is the right field for the password\n payload[name] = password\n elif \"auth\" in name.lower():\n #print \"Setting AuthState to %s\" %value\n payload['AuthState'] = value\n else:\n #Simply populate the parameter with the existing value (picks up hidden fields in the login form)\n payload[name] = value\n\n #payload['_eventId_proceed'] = ''\n\n # Populate the following from input or defaults \n payload['session-duration'] = duration\n payload['organization'] = organization\n\n # Debug the parameter payload if needed\n # Use with caution since this will print sensitive output to the screen\n #print payload\n\n # Some IdPs don't explicitly set a form action, but if one is set we should\n # build the idpauthformsubmiturl by combining the scheme and hostname \n # from the entry url with the form action target\n # If the action tag doesn't exist, we just stick with the \n # idpauthformsubmiturl above\n #for inputtag in formsoup.find_all(re.compile('(FORM|form)')):\n # action = inputtag.get('action')\n # print (action)\n # if action:\n # parsedurl = urlparse(idpentryurl)\n # if \"?\" != action.lower():\n # idpauthformsubmiturl = parsedurl.scheme + \"://\" + parsedurl.netloc + action\n # else:\n # # empty action so will use the hardcoded login URL\n # idpauthformsubmiturl = loginurl\n\n\n # Performs the submission of the IdP login form with the above post data\n\n #print \"=====Posting URL: %s\" %( idpauthformsubmiturl)\n response = session.post(\n idpauthformsubmiturl, \n data=payload, verify=sslverification)\n\n # Store the final URL to use after successful authentication with DUO\n parenturl=response.url\n\n # Overwrite and delete the credential variables, just for safety\n username = '##############################################'\n password = '##############################################'\n del username\n del password\n\n\n # Debug the response if needed\n #print (response.text)\n\n print \"Logging you in...\"\n\n # Decode the response and extract the iframe info to authenticate with Duo\n soup = BeautifulSoup(response.text.decode('utf8'), 'lxml')\n datahost = ''\n datasigrequest = ''\n sigresponse = ''\n\n # print (soup.prettify())\n for iframetag in soup.find_all(\"iframe\", id=\"duo_iframe\"):\n # print \"iframetag=%s\" % iframetag\n datahost = iframetag['data-host']\n u = iframetag['data-sig-request']\n #Exctract only the TX portion\n i = u.find(':APP')\n datasigrequest = u[0:i]\n sigresponseappstr = u[i:len(u)]\n #print \"datahost=%s, sigresponseappstr=%s, datasigrequesigresponseappstr=%s\" %(datahost,sigresponseappstr,datasigrequest)\n\n if datahost == '':\n print(\"Couldn't log you in. Check your username and password.\")\n sys.exit(1)\n\n casexecution = soup.find(\"input\", attrs={\"name\": \"execution\"})['value']\n\n # Create the Duo session\n duosession = requests.Session()\n\n # Opens the duo authentication url and follows all of the HTTP302 redirects, and\n # gets to the prompt\n urlpayload = {}\n urlpayload['tx'] = datasigrequest\n urlpayload['parent'] = parenturl\n duoauthurl = \"https://\" + datahost + \"/frame/web/v1/auth\" \n #print \"===== duoauthurl=%s\" %duoauthurl\n response = duosession.get(duoauthurl, params=urlpayload, verify=sslverification)\n #print (response.text)\n\n urlpayload['StateId'] = idpauthformsubmiturl\n response = duosession.post(duoauthurl, data=urlpayload, verify=sslverification)\n #print (response.text)\n duourlpromt = response.url\n\n #for some reason we need to GET and POST to Duo's prompt URL\n formresponse = duosession.get(duourlpromt, verify=sslverification)\n #print (formresponse.text)\n #print formresponse.url\n duourlprompt = formresponse.url\n\n formsoup = BeautifulSoup(formresponse.text.decode('utf8'), 'lxml')\n # Post duo prompt paramenters \n payload = {}\n #store sid to use later when we query duo status after authentication push\n sid = \"\"\n\n for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):\n # print inputtag\n name = inputtag.get('name','')\n value = inputtag.get('value','')\n # print \"name=%s, value=%s\" %(name,value)\n if \"sid\" in name.lower():\n payload[name] = value\n sid = value\n elif \"preferred_device\" in name.lower():\n payload['device'] = value\n elif \"preferred_factor\" in name.lower():\n #Make an educated guess that this is the right field for the password\n payload['factor'] = value\n elif \"out_of_date\" in name.lower():\n payload[name] = value\n\n print 'Enter an authentication factor (\"push\", \"phone\", \"sms\") or Duo passcode. Or press enter to use your default factor: ',\n auth_factor = raw_input()\n\n # Only throw up the \"press enter after approving\" prompt for phone and push flows, it's not needed for SMS or passcode\n wait_for_confirm = True\n\n if auth_factor == '':\n print('Using default auth factor (%s)' % payload['factor'])\n elif auth_factor == 'push':\n print('Sending a Duo Push to your phone.')\n if 'device' not in payload or payload['device'] == '':\n payload['device'] = 'phone1'\n payload['factor'] = 'Duo Push'\n elif auth_factor == 'phone':\n print('Calling you.')\n if 'device' not in payload or payload['device'] == '':\n payload['device'] = 'phone1'\n payload['factor'] = 'Phone Call'\n elif auth_factor == 'sms':\n payload['factor'] = 'sms'\n session.post(\n duourlprompt,\n data=payload, verify=sslverification)\n print 'Please enter the code we texted you: ',\n passcode = raw_input()\n payload['factor'] = 'Passcode'\n payload['passcode'] = passcode\n wait_for_confirm = False\n else:\n print('Using passcode.')\n payload['factor'] = 'Passcode'\n payload['passcode'] = auth_factor\n wait_for_confirm = False\n\n response = session.post(\n duourlprompt,\n data=payload, verify=sslverification)\n\n duourlprompt = formresponse.url\n # print(duourlprompt)\n\n # Debug the response if needed\n # print (response.text)\n d = json.loads(response.text)\n payload = {}\n payload['txid'] = d[\"response\"][\"txid\"]\n payload['sid'] = sid\n\n duourlstatus = \"https://\" + datahost + \"/frame/status\"\n\n if wait_for_confirm:\n print \"Please press enter after you've accepted the Duo request\",\n junkin = raw_input()\n\n response = session.post(\n duourlstatus,\n data=payload, verify=sslverification)\n # print (response.text)\n\n duourlstatus = response.url\n # print(duourlstatus)\n\n response = session.post(\n duourlstatus + '/' + payload['txid'],\n data=payload, verify=sslverification)\n # print (response.text)\n\n payload = {}\n d = json.loads(response.text)\n sig_response = d[\"response\"][\"cookie\"] + sigresponseappstr\n payload[\"signedDuoResponse\"] = sig_response\n payload[\"_eventId\"] = \"submit\"\n payload[\"execution\"] = casexecution\n parenturl = d[\"response\"][\"parent\"]\n response = session.post(\n duourl, \n data=payload, verify=sslverification)\n\n # print (response.text)\n\n\n # Decode the response and extract the SAML assertion\n soup = BeautifulSoup(response.text.decode('utf8'), 'lxml')\n # Look for the SAMLResponse attribute of the input tag (determined by\n # analyzing the debug print lines above)\n for inputtag in soup.find_all('input'):\n if(inputtag.get('name') == 'SAMLResponse'):\n #print(inputtag.get('value'))\n assertion = inputtag.get('value')\n\n # Better error handling is required for production use.\n if (assertion == ''):\n #TODO: Insert valid error checking/handling\n print 'Response did not contain a valid SAML assertion. Are you being prompted to change your password at login?'\n sys.exit(0)\n\n# Debug only\n# print(base64.b64decode(assertion))\n\n# Parse the returned assertion and extract the authorized roles\nawsroles = []\n#print base64.b64decode(assertion)\nroot = ET.fromstring(base64.b64decode(assertion))\nfor saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):\n# print saml2attribute\n if (saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role'):\n for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):\n awsroles.append(saml2attributevalue.text)\n\n# Note the format of the attribute value should be role_arn,principal_arn\n# but lots of blogs list it as principal_arn,role_arn so let's reverse\n# them if needed\nfor awsrole in awsroles:\n chunks = awsrole.split(',')\n if'saml-provider' in chunks[0]:\n newawsrole = chunks[1] + ',' + chunks[0]\n index = awsroles.index(awsrole)\n awsroles.insert(index, newawsrole)\n awsroles.remove(awsrole)\n\n# If I have more than one role, ask the user which one they want,\n# otherwise just proceed\nprint \"\"\nif len(awsroles) > 1:\n i = 0\n print \"Please choose the role you would like to assume:\"\n for awsrole in awsroles:\n print '[', i, ']: ', awsrole.split(',')[0]\n i += 1\n print \"Selection: \",\n selectedroleindex = raw_input()\n\n # Basic sanity check of input\n if int(selectedroleindex) > (len(awsroles) - 1):\n print 'You selected an invalid role index, please try again'\n sys.exit(0)\n\n role_arn = awsroles[int(selectedroleindex)].split(',')[0]\n principal_arn = awsroles[int(selectedroleindex)].split(',')[1]\nelse:\n try:\n role_arn = awsroles[0].split(',')[0]\n except IndexError:\n print \"Could not find role ARN\"\n sys.exit(0)\n principal_arn = awsroles[0].split(',')[1]\n\nwith open(cookiefile, 'wb') as f:\n pickle.dump(session.cookies, f)\n\n# Use the assertion to get an AWS STS token using Assume Role with SAML\nconn = boto.sts.connect_to_region(region)\ntoken = conn.assume_role_with_saml(role_arn, principal_arn, assertion, duration_seconds=32400)\n\n# Write the AWS STS token into the AWS credential file\nhome = expanduser(\"~\")\nfilename = home + awsconfigfile\n\n# Read in the existing config file\nconfig = ConfigParser.RawConfigParser()\nconfig.read(filename)\n\n# Put the credentials into a saml specific section instead of clobbering\n# the default credentials\nif not config.has_section('saml'):\n config.add_section('saml')\n\nconfig.set('saml', 'output', outputformat)\nconfig.set('saml', 'region', region)\nconfig.set('saml', 'aws_access_key_id', token.credentials.access_key)\nconfig.set('saml', 'aws_secret_access_key', token.credentials.secret_key)\nconfig.set('saml', 'aws_session_token', token.credentials.session_token)\n\n# Write the updated config file\nwith open(filename, 'w+') as configfile:\n config.write(configfile)\n\n# Give the user some basic info as to what has just happened\nprint '\\n\\n----------------------------------------------------------------'\nprint 'Your new access key pair has been stored in the AWS configuration file {0} under the saml profile.'.format(filename)\nprint 'Note that it will expire at {0}.'.format(token.credentials.expiration)\nprint 'After this time, you may safely rerun this script to refresh your access key pair.'\nprint 'To use this credential, call the AWS CLI with the --profile option (e.g. aws --profile saml ec2 describe-instances).'\nprint '----------------------------------------------------------------\\n\\n'\n\n# Use the AWS STS token to list all of the S3 buckets\ns3conn = boto.s3.connect_to_region(region,\n aws_access_key_id=token.credentials.access_key,\n aws_secret_access_key=token.credentials.secret_key,\n security_token=token.credentials.session_token)\n\nbuckets = s3conn.get_all_buckets()\n\nprint 'Simple API example listing all S3 buckets:'\nprint(buckets)\n\n"
}
] | 4 |
GastonRAraujo/Sistemas-discretos
|
https://github.com/GastonRAraujo/Sistemas-discretos
|
eef5f99ec5e4736f4a1a07e2d288494e5b89bafc
|
25a5fba458f8cf5e2188be25829192bdcd6911e8
|
2fffd76a20b092c755112fef64acf0031223aaa6
|
refs/heads/main
| 2023-03-05T20:08:44.816706 | 2021-02-13T04:20:07 | 2021-02-13T04:20:07 | 335,429,606 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5389652848243713,
"alphanum_fraction": 0.5933202505111694,
"avg_line_length": 16.130952835083008,
"blob_id": "74208ca8e2d81ba130293f23004815f9d554101a",
"content_id": "3ee1bbe5673db509006ea4ffdc73fd9b07e4df99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1527,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 84,
"path": "/Disprey_ClickCI.py",
"repo_name": "GastonRAraujo/Sistemas-discretos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 24 16:47:35 2020\r\n\r\n@author: GastonRA\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 24 16:11:46 2020\r\n\r\n@author: GastonRA\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib\r\nimport tkinter as tk\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\r\nfrom matplotlib.figure import Figure\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n#Parametros -----------------------------\r\na = 3.43\r\nb = 0.31\r\n\r\n\r\nt0 = 100\r\n\r\n\r\nprint(np.cos(a), np.sin(a))\r\n\r\ndef disprey(a,b,xn,yn):\r\n xk = a * xn*(1-xn)-xn*yn\r\n yk = (1/b) * xn*yn\r\n\r\n return xk, yk\r\n\r\n\r\ndef Disprey_Cicle(xi,yi,a_,b_,tf):\r\n x1 = [xi]*(tf)\r\n x2 = [yi]*(tf)\r\n for i in range(tf-1):\r\n x0 = x1[i]\r\n y0 = x2[i]\r\n x1[i+1], x2[i+1] = disprey(a_,b_,x0,y0)\r\n a.add_artist(plt.scatter(x1,x2, s = 0.1, marker='.'))\r\n canvas.draw()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef onclick(event):\r\n ix, iy = float(event.xdata), float(event.ydata)\r\n \r\n Disprey_Cicle(ix,iy,2.7,0.31,1000)\r\n print(ix, iy)\r\n\r\n\r\n\r\nroot = tk.Tk()\r\ncircle1 = plt.Circle((0, 0), 1, color='white')\r\n\r\nf = plt.figure()\r\na = f.add_subplot(111)\r\nf, a = plt.subplots()\r\na.add_artist(circle1)\r\na.set_xlim(0.1,0.5)\r\na.set_ylim(0,1.6)\r\n\r\n#a.plot(circle1)\r\ncanvas = FigureCanvasTkAgg(f, master=root)\r\ncanvas.draw()\r\ncanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\r\n\r\ncanvas.mpl_connect('button_press_event', onclick)\r\n\r\ncanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\r\n\r\nroot.mainloop()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7825396656990051,
"alphanum_fraction": 0.7825396656990051,
"avg_line_length": 62,
"blob_id": "34410d1b9799e01ff26a1e2ff64114ccf240ff9d",
"content_id": "2d0280d79d2ced7370aa08721d68f67492e5d8bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 641,
"license_type": "no_license",
"max_line_length": 330,
"num_lines": 10,
"path": "/README.md",
"repo_name": "GastonRAraujo/Sistemas-discretos",
"src_encoding": "UTF-8",
"text": "# Sistemas-discretos\nPermite graficar los mapas de \"Hénon\", \"Disprey\" y \"Cremona.\n\n## Miniproyecto\nEste codigo permite graficar los sistemas \"Hénon\" y \"Disprey\" para asi obtener diferentes gráficos: de alta resolución en el caso de Hénon, y variaciones de los parámetros en el caso de Disprey. A su vez cuenta con el codigo para simular la aproximacón semicontinua del mapa de \"Hénon\" y gráficos de alta resolución de la misma. \n\n## \"ClickCI\"\nEstos archivos permiten obtener un gráfico a partir de una condicion inicial haciendo click en el punto deseado. Se implemento para los sistemas \"Cremona\" y \"Disprey\"\n\nContacto: [email protected]\n"
},
{
"alpha_fraction": 0.5179855823516846,
"alphanum_fraction": 0.5689448714256287,
"avg_line_length": 18.292682647705078,
"blob_id": "5fd93fcba7a845a0ec10810da1b468d3fdc3c016",
"content_id": "d90904a824532d5aa2199aadf2b48c81deaac6b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1668,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 82,
"path": "/Cremona_ClickCI.py",
"repo_name": "GastonRAraujo/Sistemas-discretos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 24 16:11:46 2020\r\n\r\n@author: GastonRA\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib\r\nimport tkinter as tk\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\r\nfrom matplotlib.figure import Figure\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n#Parametros -----------------------------\r\nt0 = 0\r\ntf = 2000\r\n\r\na = 1.32843\r\n\r\n#Memoria ---------------------------------\r\n\r\n#Iteraciones finales\r\nx1 = [0.5]*(tf-t0)\r\nx2 = [0]*(tf-t0)\r\n\r\nprint(np.cos(a), np.sin(a))\r\n\r\ndef cremona(a,xn,yn):\r\n xk = xn * np.cos(a) - (yn - xn*xn) * np.sin(a)\r\n yk = xn * np.sin(a) + (yn - xn*xn) * np.cos(a)\r\n\r\n return xk, yk\r\n\r\n\r\ndef Cremona_Cicle(xi,yi,a_,tf = 2000,):\r\n x1 = [xi]*(tf)\r\n x2 = [yi]*(tf)\r\n for i in range(tf-1):\r\n x0 = x1[i]\r\n y0 = x2[i]\r\n x1[i+1] = x0 * np.cos(a_) - (y0 - x0*x0) * np.sin(a_)\r\n x2[i+1] = x0 * np.sin(a_) + (y0 - x0*x0) * np.cos(a_)\r\n a.add_artist(plt.scatter(x1,x2, s = 0.1, marker='.'))\r\n canvas.draw()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef onclick(event):\r\n ix, iy = float(event.xdata), float(event.ydata)\r\n \r\n Cremona_Cicle(ix, iy, 1.32843)\r\n print(ix, iy)\r\n\r\n\r\n\r\nroot = tk.Tk()\r\ncircle1 = plt.Circle((0, 0), 1, color='white')\r\n\r\nf = plt.figure()\r\na = f.add_subplot(111)\r\nf, a = plt.subplots()\r\na.add_artist(circle1)\r\na.set_xlim(-1.1, +1.1)\r\na.set_ylim(-1.1, +1.1)\r\n\r\n#a.plot(circle1)\r\ncanvas = FigureCanvasTkAgg(f, master=root)\r\ncanvas.draw()\r\ncanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\r\n\r\ncanvas.mpl_connect('button_press_event', onclick)\r\n\r\ncanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\r\n\r\nroot.mainloop()\r\n\r\n\r\n"
}
] | 3 |
pablogiaccaglia/KairosBot
|
https://github.com/pablogiaccaglia/KairosBot
|
1afa6deb0dfc447f34050c255a293ded4bd8fc17
|
dd4c2fa42a83cf3c8a07dca07acdcb0305e586be
|
04571dda03aaf2d93601f2cf2c92692f397f808e
|
refs/heads/master
| 2023-08-23T19:03:33.631598 | 2021-10-14T06:25:15 | 2021-10-14T06:25:15 | 414,581,237 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6578335762023926,
"alphanum_fraction": 0.67689448595047,
"avg_line_length": 24.60714340209961,
"blob_id": "72413c4f469821279704115f2a95327c7aad6b78",
"content_id": "3b8ec3aa2d627178e1f17a7587923a3fdd8ea038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2151,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 84,
"path": "/kairos/guiutils.py",
"repo_name": "pablogiaccaglia/KairosBot",
"src_encoding": "UTF-8",
"text": "from enum import Enum\nfrom tkinter import Button\nfrom validator_collection import checkers\n\n\nclass View(Enum):\n LOGIN_VIEW = \"loginView\"\n CALENDAR_VIEW = \"calendarView\"\n BOOKING_VIEW = \"bookingView\"\n BOOKING_FAILED_VIEW = \"bookingFailedView\"\n BOOKING_OK_VIEW = \"bookingOkView\"\n\n\n## RELATIVE_PATHS\nbookButtonRelPath = \"button_book.png\"\nloginButtonRelPath = \"button_login.png\"\nchangeDateButtonRelPath = \"button_change_date.png\"\ncloseAppButtonRelPath = \"button_close.png\"\nretryButtonRelPath = \"button_retry.png\"\nloginBackgroundRelPath = \"login_bg.png\"\npasswordEntryRelPath = \"password_entry.png\"\nusernameEntryRelPath = \"username_entry.png\"\n\n## SIZE CONSTANTS\nloginWindowWidth = \"464\"\nloginWindowHeight = \"853\"\nwideWindowWidth = \"864\"\nwideWindowHeight = \"628\"\nregularWindowWidth = \"474\"\nregularWindowHeight = \"628\"\n\n\ndef getWindowSizeAsString(width, height) -> str:\n return str(width) + \"x\" + str(height)\n\n\ndef deleteTextOnCallback(event): # note that you must include the event as an arg, even if you don't use it.\n event.widget.delete(0, \"end\")\n return None\n\n\ndef validateUserInput(userId, password):\n return validatePassword(password) and validateID(userId)\n\n\ndef validateID(userId):\n try:\n return \\\n not checkers.is_none(userId) \\\n and checkers.is_string(userId) \\\n and checkers.has_length(userId, minimum=7, maximum=7) \\\n and checkers.is_integer(int(userId), minimum=5000000, maximum=8000000)\n except:\n return False\n\n\ndef validatePassword(password):\n try:\n return \\\n not checkers.is_none(password) \\\n and checkers.is_string(password) \\\n and checkers.has_length(password, minimum=1, maximum=200)\n except:\n return False\n\n\ndef addButtonToWindow(xPos, yPos, width, height, callback, buttonImage, cursor=\"hand\"):\n button = Button(\n image=buttonImage,\n borderwidth=0,\n highlightthickness=0,\n command=callback,\n relief=\"flat\",\n cursor=cursor\n )\n\n button.place(\n x=xPos,\n y=yPos,\n width=width,\n height=height\n )\n\n return button\n"
},
{
"alpha_fraction": 0.6746666431427002,
"alphanum_fraction": 0.6746666431427002,
"avg_line_length": 27.846153259277344,
"blob_id": "95275caadbd03d197805acf79b029543fd660e46",
"content_id": "db3b79d5c9e4ad6b6bb79e61481e381cf724bd9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 13,
"path": "/kairos/utils.py",
"repo_name": "pablogiaccaglia/KairosBot",
"src_encoding": "UTF-8",
"text": "from os import path\nimport sys\n\n\ndef relativeToAbsPath(relative_path: str):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = path.abspath(\".\")\n\n return path.join(base_path, relative_path)\n"
},
{
"alpha_fraction": 0.6184509992599487,
"alphanum_fraction": 0.6251220107078552,
"avg_line_length": 48.167999267578125,
"blob_id": "58282762f6bed614ff83e8b35bf24aa7d1dbbd19",
"content_id": "8aae67e9b5cce65efff3308c86e3ee6dd27c027c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6146,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 125,
"path": "/kairos/bot/KairosBot.py",
"repo_name": "pablogiaccaglia/KairosBot",
"src_encoding": "UTF-8",
"text": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver import ChromeOptions, Chrome\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport selenium.webdriver.support.expected_conditions as EC\nfrom kairos.bot.botutils import getDateTimeObjectFromItalianText, parseBookingInfo\nfrom kairos.utils import relativeToAbsPath\n\n\nclass KairosBot:\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.bookingInfoList = []\n self.bookingInfoDicts = []\n\n def book(self, dateToBook):\n\n # chromedriver setup\n chrome_options = ChromeOptions()\n # chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--window-size=1920,1080\")\n path = relativeToAbsPath(\"chromedriver\")\n driver = Chrome(executable_path=path, options=chrome_options)\n\n try:\n driver.get(\n \"https://kairos.unifi.it/agendaweb/index.php?view=login&include=login&from=prenotalezione&from_include=prenotalezione_home&_lang=it\")\n\n privacySliderXPath = '//*[@id=\"main-content\"]/div[4]/div[2]/div[2]/div/div[3]/div[2]/label/span'\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, privacySliderXPath)))\n self.__click(driver, privacySliderXPath)\n\n accessSliderXPath = '//*[@id=\"main-content\"]/div[4]/div[2]/div[2]/div/div[4]/div[2]/label/span'\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, accessSliderXPath)))\n self.__click(driver, accessSliderXPath)\n\n loginButtonXPath = '//*[@id=\"oauth_btn\"]'\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, loginButtonXPath)))\n self.__click(driver, loginButtonXPath)\n\n usernameInputXPath = '//*[@id=\"username\"]'\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, usernameInputXPath)))\n usernameWebElement = driver.find_element_by_xpath(usernameInputXPath)\n self.__fillData(driver, usernameWebElement, self.username)\n\n passwordInputXPath = '//*[@id=\"password\"]'\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, passwordInputXPath)))\n passwordWebElement = driver.find_element_by_xpath(passwordInputXPath)\n self.__fillData(driver, passwordWebElement, self.password)\n\n accessoButtonXPath = '/html/body/div/div/div/div[1]/form/div[5]/button'\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, accessoButtonXPath)))\n self.__click(driver, accessoButtonXPath)\n\n driver.get(\n \"https://kairos.unifi.it/agendaweb/index.php?view=prenotalezione&include=prenotalezione&_lang=it\")\n\n mainContainerXPath = '// *[ @ id = \"prenotazioni_container\"]'\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, mainContainerXPath)))\n mainContainerWebElement = driver.find_element_by_xpath(mainContainerXPath)\n WebDriverWait(mainContainerWebElement, 5).until(\n EC.visibility_of_element_located((By.CLASS_NAME, \"box-header-big\")))\n boxContainersList = mainContainerWebElement.find_elements_by_class_name(\"box-header-big\")\n\n dateFound = False\n\n for dateBox in boxContainersList:\n dateObject = getDateTimeObjectFromItalianText(dateBox.text).date()\n if dateObject == dateToBook:\n dateFound = True\n\n coloredBox = dateBox.find_element_by_xpath(\"./..\").find_element_by_xpath(\"./..\")\n coloredBoxSection1 = coloredBox.find_element_by_class_name(\"colored-box-section-1\")\n # classSectionList = coloredBoxSection1.find_elements_by_class_name(\"libretto-course-name\")\n WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, \"only-1-click\")))\n lessonBookLinksList = coloredBoxSection1.find_elements_by_class_name(\"only-1-click\")\n\n for link in lessonBookLinksList:\n WebDriverWait(driver, 5).until(EC.visibility_of(link))\n link.click()\n\n WebDriverWait(driver, 5).until(\n EC.visibility_of_element_located((By.XPATH, '//*[@id=\"popup_conferma_title\"]/span[2]')))\n bookingInfo = driver.find_element_by_xpath('//*[@id=\"popup_conferma_title\"]/span[2]')\n self.bookingInfoList.append(bookingInfo.text)\n\n closePopupButtonXpath = '//*[@id=\"popup_conferma_buttons_row\"]/button'\n WebDriverWait(driver, 5).until(\n EC.visibility_of_element_located((By.XPATH, closePopupButtonXpath)))\n self.__click(driver, closePopupButtonXpath)\n\n if not dateFound:\n raise Exception(\"Date not found\")\n\n driver.quit()\n\n if len(self.bookingInfoList) == 0:\n raise Exception(\"No available lessons to book\")\n\n for info in self.bookingInfoList:\n self.bookingInfoDicts.append(parseBookingInfo(info))\n\n except Exception as e:\n if driver is not None:\n driver.quit()\n raise e\n\n def __click(self, driver, XPath):\n button = driver.find_element_by_xpath(XPath)\n button.click()\n\n def __fillData(self, driver, XPath, data):\n # clear + autofill dei campi (clear con key_down(Keys.CONTROL).send_keys('a') )\n ActionChains(driver) \\\n .move_to_element(XPath) \\\n .click().key_down(Keys.CONTROL) \\\n .send_keys('a') \\\n .key_up(Keys.CONTROL) \\\n .send_keys(data) \\\n .perform()\n"
},
{
"alpha_fraction": 0.5752906203269958,
"alphanum_fraction": 0.5996215343475342,
"avg_line_length": 24,
"blob_id": "f7bd4054625f02c8039462e87ed1a7de43aa5cd0",
"content_id": "5c820ae6dffc770bdeeb96b9f6fb4b152f9888a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3707,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 148,
"path": "/kairos/bot/botutils.py",
"repo_name": "pablogiaccaglia/KairosBot",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\n\nmonthsDict = {\n\n 'gennaio': 'January',\n 'febbraio': 'February',\n 'marzo': 'March',\n 'aprile': 'April',\n 'maggio': 'May',\n 'giugno': 'June',\n 'luglio': 'July',\n 'agosto': 'August',\n 'settembre': 'September',\n 'ottobre': 'October',\n 'novembre': 'November',\n 'dicembre': 'December',\n 'gen': 'Jan',\n 'feb': 'Feb',\n 'mar': 'Mar',\n 'apr': 'Apr',\n 'mag': 'May',\n 'giu': 'Jun',\n 'lug': 'Jul',\n 'ago': 'Aug',\n 'set': 'Sep',\n 'ott': 'Oct',\n 'nov': 'Nov',\n 'dic': 'Dec'\n\n}\n\ndaysDict = {\n\n 'domenica': 'sunday',\n 'lunedì': 'monday',\n 'martedì': 'tuesday',\n 'mercoledì': 'wednesday',\n 'giovedì': 'thursday',\n 'venerdì': 'friday',\n 'sabato': 'saturday',\n 'dom': 'sun',\n 'lun': 'mon',\n 'mar': 'tue',\n 'mer': 'wed',\n 'gio': 'thu',\n 'ven': 'fri',\n 'sab': 'sat'\n\n}\n\n\ndef getDateTimeObjectFromItalianText(italianDateText):\n englishDateString = translateItalianDateToEng(italianDateText)\n # remove day text, we don't need it\n cleanEnglishDateTime = englishDateString.split(' ')\n cleanEnglishDateTime[1] = cleanEnglishDateTime[1].zfill(2)\n cleanEnglishDateTime.pop(0)\n cleanEnglishDateTime = ' '.join(cleanEnglishDateTime)\n cleanEnglishDateTimeObject = datetime.strptime(cleanEnglishDateTime, '%d %B %Y')\n return cleanEnglishDateTimeObject\n\n\ndef translateItalianDateToEng(italianDateText):\n italianDateTextLCase = italianDateText.lower()\n wordsList = italianDateTextLCase.split(\" \")\n\n for i in range(len(wordsList)):\n month = monthsDict.get(wordsList[i])\n if month is not None:\n wordsList[i] = month\n pass\n day = daysDict.get(wordsList[i])\n if day is not None:\n wordsList[i] = day\n\n dateString = ' '.join(wordsList)\n\n return dateString\n\n\n\"\"\" super hardcoded crappy parsing function :( pls don't roast me\n\n following comments will illustrate a real parsing case.\n string to parse : \n \n Hai prenotato la lezione:\n BILANCIO D ESERCIZIO (Cognomi M-Z)\n Martedì 19 Ottobre 2021, 08:20 - 09:40\n D6/0.18 [D6] \n \n\"\"\"\n\n\ndef parseBookingInfo(bookingInfo):\n infoDict = {\n \"courseName\": \"courseName\",\n \"lessonHall\": \"lessonHall\",\n \"lessonDate\": \"lessonDate\",\n \"lessonTime\": \"lessonTime\"\n }\n\n \"\"\" after first replacement we remove useless part. This is what remains :\n \n BILANCIO D ESERCIZIO (Cognomi M-Z)\n Martedì 19 Ottobre 2021, 08:20 - 09:40\n D6/0.18 [D6] \n \n \"\"\"\n\n bookingInfo = bookingInfo.replace(\"Hai prenotato la lezione:\\n\", \"\")\n\n # courseName = \"BILANCIO D ESERCIZIO\"\n infoDict['courseName'] = bookingInfo.split(' (')[0]\n\n \"\"\" Progressive bookingInfo reduction to obtain needed info :\n \n Martedì 19 Ottobre 2021, 08:20 - 09:40\n D6/0.18 [D6] \n \n \"\"\"\n bookingInfo = bookingInfo.split(')')[1]\n # date conversion from str to datetime <-(date string extracted)\n infoDict['lessonDate'] = getDateTimeObjectFromItalianText(bookingInfo.split(',')[0]).date()\n\n \"\"\" Progressive bookingInfo reduction to obtain needed info :\n\n 08:20 - 09:40\n D6/0.18 [D6] \n\n \"\"\"\n bookingInfo = bookingInfo.split(',')[1]\n\n # remove leading space\n bookingInfo = bookingInfo[1:]\n\n # lessonTime is composed by the first 13 characters in bookingInfo string\n infoDict['lessonTime'] = bookingInfo[:13]\n\n \"\"\" Progressive bookingInfo reduction to obtain needed info :\n \n D6/0.18 [D6] \n\n \"\"\"\n bookingInfo = bookingInfo[14:]\n\n infoDict['lessonHall'] = bookingInfo\n\n return infoDict"
},
{
"alpha_fraction": 0.5278928279876709,
"alphanum_fraction": 0.5555974245071411,
"avg_line_length": 27.55453109741211,
"blob_id": "e0945d579fdcd55e3b02b3e102c2b78875b26922",
"content_id": "725aab2a810a3ea71909aeb2b0b2fdfa6892e971",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18591,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 651,
"path": "/kairos/gui.py",
"repo_name": "pablogiaccaglia/KairosBot",
"src_encoding": "UTF-8",
"text": "import threading\nfrom tkinter import Tk, Canvas, Entry, PhotoImage, StringVar, END, ttk, Label, CENTER, Widget\nfrom abc import ABC, abstractmethod\nfrom tkcalendar import Calendar\nfrom kairos import guiutils\nfrom kairos.bot.KairosBot import KairosBot\nfrom kairos.utils import relativeToAbsPath\nfrom kairos.guiutils import View\nfrom PIL import Image, ImageTk\nfrom functools import partial\nfrom datetime import date, timedelta\n\n\nclass GUI:\n\n def __init__(self):\n self.window = Tk()\n self.window.resizable(False, False)\n self.date = None\n self.canvas = None\n self.isBookingOk = False\n self.kairosBot = None\n self.views = {\n View.LOGIN_VIEW: LoginView(self),\n View.CALENDAR_VIEW: CalendarView(self),\n View.BOOKING_VIEW: BookingView(self),\n View.BOOKING_FAILED_VIEW: BookingFailedView(self),\n View.BOOKING_OK_VIEW: BookingOkView(self)\n }\n self.userId = None\n self.password = None\n self.currentView = None\n\n def runView(self, view: View):\n self.currentView.destroyWidgets()\n self.__buildCommonGUIStructure()\n self.currentView = self.views[view]\n self.currentView.run()\n\n def start(self):\n ico = Image.open(relativeToAbsPath('deadline.png'))\n photo = ImageTk.PhotoImage(ico)\n self.window.wm_iconphoto(False, photo)\n\n self.window.title(\"KairosBot\")\n self.window.configure(bg=\"#FFFFFF\")\n self.canvas = Canvas(\n self.window,\n bg=\"#FFFFFF\",\n height=853,\n width=464,\n bd=0,\n highlightthickness=0,\n relief=\"ridge\"\n )\n\n self.canvas.place(x=0, y=0)\n self.currentView = self.views[View.LOGIN_VIEW]\n self.runView(View.LOGIN_VIEW)\n\n def __buildCommonGUIStructure(self):\n self.window.geometry(guiutils.getWindowSizeAsString(guiutils.regularWindowWidth, guiutils.regularWindowHeight))\n self.window.configure(bg=\"#FFFFFF\")\n\n self.canvas.create_text(\n 32.0,\n 152.0,\n anchor=\"nw\",\n text=\"Prenotazione lezioni dell’intera giornata\",\n fill=\"#000000\",\n font=(\"Roboto\", 13 * -1)\n )\n\n self.canvas.place(x=0, y=0)\n\n self.canvas.create_rectangle(\n 0.0,\n 0.0,\n 864.0,\n 628.0,\n fill=\"#FFFFFF\",\n outline=\"\")\n\n self.canvas.create_text(\n 32.0,\n 32.0,\n anchor=\"nw\",\n text=\"KairosBot\",\n fill=\"#000000\",\n font=(\"Montserrat Bold\", 32 * -1)\n )\n\n self.canvas.create_rectangle(\n 0.0,\n 132.0,\n 864.0,\n 192.0,\n fill=\"#FAFAFA\",\n outline=\"\")\n\n\nclass AbstractView(ABC):\n\n def __init__(self, name: str, gui: 'GUI'):\n self.name = name\n self.gui = gui\n self.canvasItems = []\n self.texts = []\n self.buttons = []\n self.labelWidgets = []\n self.widgets = [self.texts, self.buttons, self.labelWidgets]\n\n def addWidget(self, widget: Widget):\n self.widgets.append(widget)\n\n def addCanvasElement(self, elem):\n self.canvasItems.append(elem)\n\n def addButtonElement(self, elem: Widget):\n self.buttons.append(elem)\n\n def addLabelWidget(self, elem: Widget):\n self.labelWidgets.append(elem)\n\n def destroyWidgets(self):\n for widgetsList in self.widgets:\n for widget in widgetsList:\n widget.destroy()\n\n def destroyCanvasItems(self):\n for elem in self.canvasItems:\n self.gui.canvas.delete(elem)\n\n @abstractmethod\n def run(self):\n pass\n\n\nclass LoginView(AbstractView):\n\n def __init__(self, gui: 'GUI'):\n super().__init__(\"loginView\", gui)\n self.inputEntries = []\n self.widgets.append(self.inputEntries)\n self.passwordEntry = None\n self.idEntry = None\n\n def addInputEntry(self, widget: Widget):\n self.inputEntries.append(widget)\n\n def __setDefaultInput(self):\n self.idEntry.delete(0, END)\n self.passwordEntry.delete(0, END)\n self.idEntry.insert(END, 'Student ID')\n self.passwordEntry.insert(END, 'password')\n\n def setUserData(self, userId, password):\n self.gui.userId = userId\n self.gui.password = password\n\n def __performInputAction(self, userIdSVar, passwordSVar):\n userId = userIdSVar.get()\n password = passwordSVar.get()\n if guiutils.validateUserInput(userId, password):\n self.setUserData(userId, password)\n self.gui.runView(View.CALENDAR_VIEW)\n else:\n self.__setDefaultInput()\n self.gui.window.update()\n\n def run(self):\n self.gui.window.geometry(guiutils.getWindowSizeAsString(guiutils.loginWindowWidth, guiutils.loginWindowHeight))\n image_image_1 = PhotoImage(\n file=relativeToAbsPath(guiutils.loginBackgroundRelPath))\n\n smaller_image = image_image_1.subsample(5, 5)\n\n imgElem1 = self.gui.canvas.create_image(\n 220.0,\n 430.0,\n image=smaller_image\n )\n\n self.addCanvasElement(imgElem1)\n\n textElem1 = self.gui.canvas.create_text(\n 220.5,\n 411.0,\n anchor=\"nw\",\n text=\"Ciao!\",\n fill=\"#FFFFFF\",\n font=(\"SFProDisplay Semibold\", 20 * -1)\n )\n\n self.addCanvasElement(textElem1)\n\n textElem2 = self.gui.canvas.create_text(\n 85.0,\n 441.0,\n anchor=\"nw\",\n text=\"Accedi per prenotare\",\n fill=\"#FFFFFF\",\n font=(\"SFProDisplay Heavy\", 34 * -1)\n )\n\n self.addCanvasElement(textElem2)\n\n id_entry_image = PhotoImage(\n file=relativeToAbsPath(guiutils.usernameEntryRelPath))\n\n imgElem2 = self.gui.canvas.create_image(\n 235.0,\n 556.0,\n image=id_entry_image\n )\n\n self.addCanvasElement(imgElem2)\n\n IDEntryStringVar = StringVar()\n\n self.idEntry = Entry(\n bd=0,\n bg=\"#FFFFFF\",\n highlightthickness=0,\n textvariable=IDEntryStringVar\n )\n\n self.idEntry.place(\n x=100.0,\n y=519.0,\n width=268.0,\n height=72.0\n )\n\n self.idEntry.bind(\"<Button-1>\", guiutils.deleteTextOnCallback)\n\n self.addInputEntry(self.idEntry)\n\n password_entry_image = PhotoImage(\n file=relativeToAbsPath(guiutils.passwordEntryRelPath))\n\n imgElem3 = self.gui.canvas.create_image(\n 235.0,\n 643.0,\n image=password_entry_image\n )\n\n self.addCanvasElement(imgElem3)\n\n passwordEntryStringVar = StringVar()\n\n self.passwordEntry = Entry(\n bd=0,\n bg=\"#FFFFFF\",\n highlightthickness=0,\n textvariable=passwordEntryStringVar,\n show=\"*\",\n )\n self.passwordEntry.place(\n x=100.0,\n y=607.0,\n width=270.0,\n height=70.0\n )\n self.passwordEntry.bind(\"<Button-1>\", guiutils.deleteTextOnCallback)\n\n self.addInputEntry(self.passwordEntry)\n\n loginButtonImage = PhotoImage(\n file=relativeToAbsPath(guiutils.loginButtonRelPath))\n\n loginButton = guiutils.addButtonToWindow(\n xPos=180.0,\n yPos=704.0,\n width=95.0,\n height=41.0,\n callback=partial(self.__performInputAction, IDEntryStringVar, passwordEntryStringVar),\n buttonImage=loginButtonImage)\n\n self.addButtonElement(loginButton)\n self.__setDefaultInput()\n self.gui.window.mainloop()\n\n\nclass CalendarView(AbstractView):\n\n def __init__(self, gui: 'GUI'):\n super().__init__(\"calendarView\", gui)\n self.cal = None\n\n def destroyWidgets(self):\n self.cal.destroy()\n super().destroyWidgets()\n\n def __buildCalendar(self, mindate, maxdate):\n if self.cal is not None:\n self.cal.destroy()\n self.cal = Calendar(self.gui.window, font=\"Arial 14\", selectmode='day', locale='ita',\n mindate=mindate, maxdate=maxdate, disabledforeground='red', foreground='black',\n weekendbackground='white', disableddaybackground='gray',\n firstweekday=\"monday\", cursor=\"hand\")\n\n self.cal.grid(padx=110, pady=270)\n\n for i in range(6):\n self.cal._week_nbs[i].destroy() # evil trick going on here :) pls dont roast my code\n\n s = ttk.Style(self.gui.window)\n s.theme_use('classic')\n\n def __prepareBookingAction(self):\n self.gui.date = self.cal.selection_get()\n self.gui.runView(View.BOOKING_VIEW),\n\n def run(self):\n bookButtonImage = PhotoImage(\n file=relativeToAbsPath(guiutils.bookButtonRelPath))\n\n bookButton = guiutils.addButtonToWindow(\n xPos=47.0,\n yPos=522.0,\n width=380.0,\n height=44.0,\n callback=self.__prepareBookingAction,\n buttonImage=bookButtonImage)\n\n self.addButtonElement(bookButton)\n\n textElem1 = self.gui.canvas.create_text(\n 32.0,\n 86.0,\n anchor=\"nw\",\n text=\"Prenotazione\",\n fill=\"#111111\",\n font=(\"Roboto\", 16 * -1)\n )\n\n self.addCanvasElement(textElem1)\n\n textElem2 = self.gui.canvas.create_text(\n 148.0,\n 145.0,\n anchor=\"nw\",\n text=\"Seleziona la data\",\n fill=\"#000000\",\n font=(\"Montserrat Bold\", 20 * -1)\n )\n\n self.addCanvasElement(textElem2)\n\n today = date.today()\n\n mindate = today\n maxdate = today + timedelta(days=10)\n\n self.__buildCalendar(mindate, maxdate)\n self.gui.window.mainloop()\n\n\nclass BookingFailedView(AbstractView):\n def __init__(self, gui: 'GUI'):\n super().__init__(\"bookingFailedView\", gui)\n\n def run(self):\n textElem1 = self.gui.canvas.create_text(\n 107.0,\n 150.0,\n anchor=\"nw\",\n text=\"Prenotazione non andata a buon fine \",\n fill=\"#111111\",\n font=(\"Roboto\", 16 * -1)\n )\n\n self.addCanvasElement(textElem1)\n\n retryButtonImage = PhotoImage(\n file=relativeToAbsPath(guiutils.retryButtonRelPath))\n\n retryButton = guiutils.addButtonToWindow(\n xPos=119.0,\n yPos=430.0,\n width=236.0,\n height=44.0,\n callback=partial(self.gui.runView, View.BOOKING_VIEW),\n buttonImage=retryButtonImage)\n\n self.addButtonElement(retryButton)\n\n changeDateButtonImage = PhotoImage(\n file=relativeToAbsPath(guiutils.changeDateButtonRelPath))\n\n changeDateButton = guiutils.addButtonToWindow(\n xPos=119.0,\n yPos=372.0,\n width=236.0,\n height=44.0,\n callback=partial(self.gui.runView, View.CALENDAR_VIEW),\n buttonImage=changeDateButtonImage\n )\n\n self.addButtonElement(changeDateButton)\n\n textElem2 = self.gui.canvas.create_text(\n 32.0,\n 86.0,\n anchor=\"nw\",\n text=\"Prenotazione\",\n fill=\"#111111\",\n font=(\"Roboto\", 16 * -1)\n )\n\n self.addCanvasElement(textElem2)\n\n closeAppButtonImage = PhotoImage(\n file=relativeToAbsPath(guiutils.closeAppButtonRelPath))\n\n closeAppButton = guiutils.addButtonToWindow(\n xPos=119.0,\n yPos=314.0,\n width=236.0,\n height=44.0,\n callback=self.gui.window.destroy,\n buttonImage=closeAppButtonImage\n )\n\n self.addButtonElement(closeAppButton)\n\n self.gui.window.mainloop()\n\n\nclass BookingView(AbstractView):\n\n def __init__(self, gui: 'GUI'):\n super().__init__(\"bookingView\", gui)\n self.POLLING_DELAY = 50 # ms\n self.lock = threading.Lock() # Lock for shared resources.\n self.finished = False\n self.ind = -1\n self.currentLoadingBarXPos = None\n\n def __destroyLoadingBar(self):\n for widget in self.labelWidgets:\n widget.destroy()\n\n def __fun(self, j):\n self.__drawBlackBlock(j)\n self.gui.window.update_idletasks()\n\n def __drawBlackBlock(self, index):\n widget = Label(self.gui.window, bg=\"#1F2732\", width=1, height=1)\n widget.place(x=70 + index - 1 * 22, y=350)\n self.currentLoadingBarXPos = 70 + index - 1 * 22\n self.addLabelWidget(widget)\n\n def __check_status(self):\n with self.lock:\n if not self.finished:\n self.ind = self.ind + 1\n if self.currentLoadingBarXPos == 400:\n self.ind = -1\n self.__destroyLoadingBar()\n\n self.gui.window.after(1, self.__fun(self.ind))\n self.gui.window.update_idletasks()\n self.gui.window.after(self.POLLING_DELAY, self.__check_status) # Keep polling.\n\n if self.finished:\n self.ind = -1 # restore black block starting index\n if self.isBookingOk:\n self.gui.runView(View.BOOKING_OK_VIEW),\n else:\n self.gui.runView(View.BOOKING_FAILED_VIEW),\n\n def __book(self):\n try:\n self.gui.kairosBot = KairosBot(self.gui.userId, self.gui.password)\n self.gui.window.after(2000, self.gui.kairosBot.book(self.gui.date))\n self.isBookingOk = True\n except Exception as e:\n print(str(e))\n self.isBookingOk = False\n\n with self.lock:\n self.finished = True\n\n def __startLoadingBar(self):\n if self.lock.locked():\n self.lock.release()\n self.ind = -1\n\n with self.lock:\n self.finished = False\n t = threading.Thread(target=self.__book)\n t.daemon = True\n self.__check_status() # Start polling.\n t.start()\n\n def run(self):\n\n textElem1 = self.gui.canvas.create_text(\n 32.0,\n 86.0,\n anchor=\"nw\",\n text=\"Prenotazione in corso\",\n fill=\"#111111\",\n font=(\"Roboto\", 16 * -1)\n )\n\n self.addCanvasElement(textElem1)\n\n textElem2 = self.gui.canvas.create_text(\n 140.0,\n 250.0,\n anchor=\"nw\",\n text=\"Prenotazione in corso...\",\n fill=\"#000000\",\n font=(\"Montserrat Bold\", 20 * -1)\n )\n\n self.addCanvasElement(textElem2)\n\n s = ttk.Style(self.gui.window)\n s.theme_use('classic')\n\n self.__startLoadingBar()\n self.gui.window.update()\n self.gui.window.mainloop()\n\n\nclass BookingOkView(AbstractView):\n def __init__(self, gui: 'GUI'):\n super().__init__(\"bookingOkView\", gui)\n self.tree = None\n\n def destroyWidgets(self):\n self.tree.destroy()\n super().destroyWidgets()\n\n def __buildTreeView(self):\n # Add a self.treeview widget\n self.tree = ttk.Treeview(\n self.gui.window,\n column=(\"Corso\", \"Aula\", \"Orario\", \"Data\"),\n show='headings',\n height=5)\n self.tree.column(\"# 1\", anchor=CENTER)\n self.tree.heading(\"# 1\", text=\"Corso\")\n self.tree.column(\"# 2\", anchor=CENTER)\n self.tree.heading(\"# 2\", text=\"Aula\")\n self.tree.column(\"# 3\", anchor=CENTER)\n self.tree.heading(\"# 3\", text=\"Orario\")\n self.tree.column(\"# 4\", anchor=CENTER)\n self.tree.heading(\"# 4\", text=\"Data\")\n\n # Insert the data in treeview widget\n for dictEntry in self.gui.kairosBot.bookingInfoDicts:\n self.__fillEntry(dictEntry)\n self.tree.grid(padx=30, pady=270)\n\n s = ttk.Style(self.gui.window)\n s.theme_use('aqua')\n\n def __fillEntry(self, entry):\n self.tree.insert(\n '',\n 'end',\n text=\"1\",\n values=(entry[\"courseName\"], entry[\"lessonHall\"], entry['lessonTime'], entry['lessonDate'])\n )\n\n def run(self):\n self.gui.window.geometry(guiutils.getWindowSizeAsString(guiutils.wideWindowWidth, guiutils.wideWindowHeight))\n self.gui.canvas.config(width=864, height=628)\n\n textElem1 = self.gui.canvas.create_text(\n 342.0,\n 160.0,\n anchor=\"nw\",\n text=\"Prenotazione completata\",\n fill=\"#111111\",\n font=(\"Roboto\", 16 * -1)\n )\n\n self.addCanvasElement(textElem1)\n\n changeDateButtonImage = PhotoImage(\n file=relativeToAbsPath(guiutils.changeDateButtonRelPath))\n\n changeDateButton = guiutils.addButtonToWindow(\n xPos=310.0,\n yPos=532.0,\n width=236.0,\n height=44.0,\n callback=partial(self.gui.runView, View.CALENDAR_VIEW),\n buttonImage=changeDateButtonImage)\n\n self.addButtonElement(changeDateButton)\n\n closeAppButtonImage = PhotoImage(\n file=relativeToAbsPath(guiutils.closeAppButtonRelPath))\n\n closeAppButton = guiutils.addButtonToWindow(\n xPos=310.0,\n yPos=474.0,\n width=236.0,\n height=44.0,\n callback=self.gui.window.destroy,\n buttonImage=closeAppButtonImage)\n\n self.addButtonElement(closeAppButton)\n\n textElem2 = self.gui.canvas.create_text(\n 32.0,\n 86.0,\n anchor=\"nw\",\n text=\"Prenotazione\",\n fill=\"#111111\",\n font=(\"Roboto\", 16 * -1)\n )\n\n self.addCanvasElement(textElem2)\n\n textElem3 = self.gui.canvas.create_text(\n 28.0,\n 228.0,\n anchor=\"nw\",\n text=\"Lezioni prenotate\",\n fill=\"#000000\",\n font=(\"Roboto\", 22 * -1)\n )\n\n self.addCanvasElement(textElem3)\n\n textElem4 = self.gui.canvas.create_text(\n 28.0,\n 206.0,\n anchor=\"nw\",\n text=\"Posto a lezione\",\n fill=\"#77767E\",\n font=(\"Roboto\", 13 * -1)\n )\n\n self.addCanvasElement(textElem4)\n\n self.__buildTreeView()\n self.gui.window.mainloop()\n\n\nif __name__ == '__main__':\n guis = GUI()\n guis.start()\n"
}
] | 5 |
dittmer/Zh3lScratch
|
https://github.com/dittmer/Zh3lScratch
|
7eeecb2ff724b6ee9afcc329eea9d550282e076a
|
4398e35143afdd68a1e7bfa7ee0100a8680ee85f
|
94be31d32346fee88b3dfe9f9e0e16c8db686ac9
|
refs/heads/master
| 2021-08-07T04:20:18.411708 | 2017-11-07T14:10:18 | 2017-11-07T14:10:18 | 108,318,333 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4553772211074829,
"alphanum_fraction": 0.48357582092285156,
"avg_line_length": 62.4571418762207,
"blob_id": "d42bd35e7e9170e9428c31ee2d592f76c2e05065",
"content_id": "79fd9aa4e8d9c004fc28bc3808e5767ef8b040c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6667,
"license_type": "no_license",
"max_line_length": 372,
"num_lines": 105,
"path": "/forLimit/samples.py",
"repo_name": "dittmer/Zh3lScratch",
"src_encoding": "UTF-8",
"text": "# samples\n\n#samples = {}\n\n\n# data driven\nsamples['Fake'] = { 'name': [\n\t\t\t\t'latino_DoubleEG_fake.root',\n\t\t\t\t'latino_DoubleMuon_fake.root',\n\t\t\t\t'latino_SingleMuon_fake.root',\n\t\t\t\t'latino_SingleElectron_fake.root',\n\t\t\t\t'latino_MuonEG_fake.root'\n ],\n 'weight' : 'fakeW3l', # weight/cut \n #'weight' : 'fakeW3l', # weight/cut \n 'isData': ['all'], \n 'weights' : [\n 'std_vector_trigger[8] || std_vector_trigger[6]',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && (std_vector_trigger[13] || std_vector_trigger[11])',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && !(std_vector_trigger[13] || std_vector_trigger[11]) && (std_vector_trigger[42] || std_vector_trigger[43])',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && !(std_vector_trigger[13] || std_vector_trigger[11]) && !(std_vector_trigger[42] || std_vector_trigger[43]) && (std_vector_trigger[46])',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && !(std_vector_trigger[13] || std_vector_trigger[11]) && !(std_vector_trigger[42] || std_vector_trigger[43]) && !(std_vector_trigger[46]) && (std_vector_trigger[0] || std_vector_trigger[56])'\n ], \n }\n\n\n\nsamples['ttZ'] = { 'name': ['latino_TTZToLLNuNu_M-10.root'], \n 'weight' : 'puW*baseW*bPogSF*effTrigW3l*std_vector_lepton_idisoW[0]*std_vector_lepton_idisoW[1]*std_vector_lepton_idisoW[2]*std_vector_lepton_recoW[0]*std_vector_lepton_recoW[1]*std_vector_lepton_recoW[2]',\n # 'weights': ['1'] , \n 'isData': ['0'], \n }\n\n\nsamples['WZ'] = { 'name': [\n 'latino_WZTo3LNu.root'\n # 'latino_WZ.root'\n ],\n 'weight' : 'puW*baseW*bPogSF*effTrigW3l*std_vector_lepton_idisoW[0]*std_vector_lepton_idisoW[1]*std_vector_lepton_idisoW[2]*std_vector_lepton_recoW[0]*std_vector_lepton_recoW[1]*std_vector_lepton_recoW[2]*std_vector_lepton_genmatched[0]*std_vector_lepton_genmatched[1]*std_vector_lepton_genmatched[2]*GEN_weight_SM/abs(GEN_weight_SM)',\n# 'weights': ['0.002214825'] , \n #'isData': ['0'], \n }\n\n\nsamples['ZZ'] = { 'name': [\n 'latino_ZZTo4L.root'\n ],\n 'weight' : 'puW*baseW*bPogSF*effTrigW3l*std_vector_lepton_idisoW[0]*std_vector_lepton_idisoW[1]*std_vector_lepton_idisoW[2]*std_vector_lepton_recoW[0]*std_vector_lepton_recoW[1]*std_vector_lepton_recoW[2]*std_vector_lepton_genmatched[0]*std_vector_lepton_genmatched[1]*std_vector_lepton_genmatched[2]*GEN_weight_SM/abs(GEN_weight_SM)',\n# 'weights': ['0.002214825'] , \n #'isData': ['0'], \n }\n\n\nsamples['VVV'] = { 'name': [\n 'latino_WZZ.root', \n# 'latino_ZZZ.root',\n 'latino_WWW.root',\n 'latino_WWZ.root'\n ], \n 'weight' : 'puW*baseW*bPogSF*effTrigW3l*std_vector_lepton_idisoW[0]*std_vector_lepton_idisoW[1]*std_vector_lepton_idisoW[2]*std_vector_lepton_recoW[0]*std_vector_lepton_recoW[1]*std_vector_lepton_recoW[2]*std_vector_lepton_genmatched[0]*std_vector_lepton_genmatched[1]*std_vector_lepton_genmatched[2]*GEN_weight_SM/abs(GEN_weight_SM)', \n #'isData': ['0'], \n }\n\n\n# HWW \n\nsamples['ggZH_hww'] = { 'name': [\n 'latino_ggZH_HToWW_M125.root',\n ], \n 'weight' : 'puW*baseW*bPogSF*effTrigW3l*std_vector_lepton_idisoW[0]*std_vector_lepton_idisoW[1]*std_vector_lepton_idisoW[2]*std_vector_lepton_recoW[0]*std_vector_lepton_recoW[1]*std_vector_lepton_recoW[2]*std_vector_lepton_genmatched[0]*std_vector_lepton_genmatched[1]*std_vector_lepton_genmatched[2]*GEN_weight_SM/abs(GEN_weight_SM)', \n }\n\n\nsamples['ZH_hww'] = { 'name': ['latino_HZJ_HToWW_M125.root'], \n 'weight' : 'puW*baseW*bPogSF*effTrigW3l*std_vector_lepton_idisoW[0]*std_vector_lepton_idisoW[1]*std_vector_lepton_idisoW[2]*std_vector_lepton_recoW[0]*std_vector_lepton_recoW[1]*std_vector_lepton_recoW[2]*std_vector_lepton_genmatched[0]*std_vector_lepton_genmatched[1]*std_vector_lepton_genmatched[2]*GEN_weight_SM/abs(GEN_weight_SM)', \n }\n\n\n###########################################\n###########################################\n###########################################\n\nsamples['DATA'] = { 'name': [\n\t\t\t\t'latino_DoubleEG_data.root',\n\t\t\t\t'latino_DoubleMuon_data.root',\n\t\t\t\t'latino_MuonEG_data.root',\n\t\t\t\t'latino_SingleElectron_data.root',\n\t\t\t\t'latino_SingleMuon_data.root'\n\n\n ] , \n 'weight' : '1',\n # 'weight' : 'std_vector_trigger[43]', #single mu PD\n # 'weight' : 'std_vector_trigger[0]', #single ele PD\n 'isData': ['all'],\n\n 'weights' : [\n #\n 'std_vector_trigger[8] || std_vector_trigger[6]',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && (std_vector_trigger[13] || std_vector_trigger[11])',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && !(std_vector_trigger[13] || std_vector_trigger[11]) && (std_vector_trigger[42] || std_vector_trigger[43])',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && !(std_vector_trigger[13] || std_vector_trigger[11]) && !(std_vector_trigger[42] || std_vector_trigger[43]) && (std_vector_trigger[46])',\n '!(std_vector_trigger[8] || std_vector_trigger[6]) && !(std_vector_trigger[13] || std_vector_trigger[11]) && !(std_vector_trigger[42] || std_vector_trigger[43]) && !(std_vector_trigger[46]) && (std_vector_trigger[0] || std_vector_trigger[56])'\n ], \n }\n\n\n\n\n"
},
{
"alpha_fraction": 0.28142857551574707,
"alphanum_fraction": 0.3177551031112671,
"avg_line_length": 39.83333206176758,
"blob_id": "f2d1ab2fb386463bc9d6b12ce1b9efc6ee355dda",
"content_id": "8c3d24e012657a69d57a058c228f645a0c91570e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4900,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 120,
"path": "/forLimit/variables.py",
"repo_name": "dittmer/Zh3lScratch",
"src_encoding": "UTF-8",
"text": "# variables\n\n#variables = {}\n \n#'fold' : # 0 = not fold (default), 1 = fold underflowbin, 2 = fold overflow bin, 3 = fold underflow and overflow\n \nvariables['events'] = { 'name': '1', \n 'range' : (1,0,2), \n 'xaxis' : 'events', \n 'fold' : 3\n }\n\nvariables['njet'] = { \t'name' : 'njet',\n \t\t\t'range' : (10,0,10),\n\t\t\t'xaxis' : 'N_{jet}',\n 'fold' : 0\n }\n\nvariables['z4lveto'] = { 'name': 'z4lveto',\n 'range' : (20,0,200),\n 'xaxis' : '|m_{lll}-m_{Z}| [GeV]',\n 'fold' : 0\n }\n\nvariables['dmjjmW'] = { 'name': 'dmjjmW',\n 'range' : (15,-100,200),\n 'xaxis' : 'm_{jj}-m_{W}',\n 'fold' : 0\n }\n\nvariables['mtw_notZ'] = { 'name': 'mtw_notZ',\n 'range' : (20,0,200),\n 'xaxis' : 'mT(notZ) [GeV]',\n 'fold' : 0\n }\n\nvariables['dphilmetjj'] = { 'name': 'dphilmetjj',\n 'range' : (10,0,3.14159),\n 'xaxis' : '#Delta#Phi_{MET-jj} ',\n 'fold' : 0\n }\n \nvariables['mTlmetjj'] = { 'name': 'mTlmetjj',\n 'range' : (13,0,260),\n 'xaxis' : 'm_{T} (l #nu jj) [GeV]',\n 'fold' : 0\n }\n\nvariables['pt1'] = { 'name': 'std_vector_lepton_pt[0]', # variable name \n 'range' : (20,0.,200), # variable range\n 'xaxis' : 'lept1_p_{T} [GeV]', # x axis name\n 'fold' : 0\n }\n \nvariables['pt2'] = { 'name': 'std_vector_lepton_pt[1]', # variable name \n 'range' : (20,0.,200), # variable range\n 'xaxis' : 'lept2_p_{T} [GeV]', # x axis name\n 'fold' : 0\n }\n\nvariables['pt3'] = { 'name': 'std_vector_lepton_pt[2]', # variable name \n 'range' : (20,0.,200), # variable range\n 'xaxis' : 'lept3_p_{T} [GeV]', # x axis name\n 'fold' : 0\n }\n\nvariables['zveto_3l'] = { 'name': 'zveto_3l', # variable name \n 'range' : (20,0,100), # variable range\n 'xaxis' : 'm_{ll} - M_{Z} [GeV]', # x axis name\n 'fold' : 0\n }\n\n\nvariables['ptz'] = { 'name': 'ptz', # variable name \n 'range' : (20,0,200), # variable range\n 'xaxis' : 'p_{T}^{Z} [GeV]', # x axis name\n 'fold' : 0\n }\n\nvariables['checkmZ'] = { 'name': 'checkmZ', # variable name \n 'range' : (30,0,150), # variable range\n 'xaxis' : 'm_{ll} [GeV]', # x axis name\n 'fold' : 0\n }\n\n\nvariables['drllmin3l'] = { 'name': 'drllmin3l', # variable name \n# 'range' : (6,0,4), # variable range\n 'range' : ([0.,0.75, 1.5, 2.0, 2.5, 4.0],), # variable range\n# # 'range' : ([0.,0.5,1.0,1.5,2.0,3.0,4.0],), # variable range\n 'xaxis' : 'min #Delta R_{ll}', # x axis name\n 'fold' : 0\n }\n\nvariables['mllmin3l'] = { 'name': 'mllmin3l', # variable name \n 'range' : (15,10,150), # variable range\n 'xaxis' : 'min m_{ll} [GeV]', # x axis name\n 'fold' : 0\n }\n\n\nvariables['mlll'] = { 'name': 'mlll', # variable name \n 'range' : (50,0.,500), # variable range\n 'xaxis' : 'm_{lll} [GeV]', # x axis name\n 'fold' : 0\n }\n\nvariables['btagj0'] = {'name': 'std_vector_jet_cmvav2[0]', # variable name \n 'range' : (48,-1.2,1.2), # variable range\n 'xaxis' : 'cmvav2[0]', # x axis name\n 'fold' : 0\n }\n\n\n\nvariables['met'] = { 'name': 'metPfType1', # variable name \n 'range' : (10,0,200), # variable range\n 'xaxis' : 'pfmet [GeV]', # x axis name\n 'fold' : 0\n }\n"
},
{
"alpha_fraction": 0.5178611278533936,
"alphanum_fraction": 0.600177526473999,
"avg_line_length": 52.01176452636719,
"blob_id": "5152446efcd0e6613a4b11604cae834808239e89",
"content_id": "bee1b39bb4ca7abd5672c9ab23d41f0f20edaa4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4507,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 85,
"path": "/forLimit/soverb.C",
"repo_name": "dittmer/Zh3lScratch",
"src_encoding": "UTF-8",
"text": "\n{\n TFile *f0 = TFile::Open(\"rootFiles_repro/plots_ZH3l.root\");\n\n float n_ZH = ((TH1F*) f0->Get(\"preselection/events/histo_ZH_hww\"))->GetBinContent(1);\n float n_ggZH = ((TH1F*) f0->Get(\"preselection/events/histo_ggZH_hww\"))->GetBinContent(1);\n float n_WZ = ((TH1F*) f0->Get(\"preselection/events/histo_WZ\"))->GetBinContent(1);\n float n_ZZ = ((TH1F*) f0->Get(\"preselection/events/histo_ZZ\"))->GetBinContent(1);\n float n_Fake = ((TH1F*) f0->Get(\"preselection/events/histo_Fake\"))->GetBinContent(1);\n float n_ttZ = ((TH1F*) f0->Get(\"preselection/events/histo_ttZ\"))->GetBinContent(1);\n float n_VVV = ((TH1F*) f0->Get(\"preselection/events/histo_VVV\"))->GetBinContent(1);\n\n float n_S = n_ZH + n_ggZH;\n float n_B = n_WZ + n_ZZ + n_Fake + n_ttZ + n_VVV;\n\n//\t\t\t 12345612345612345612345612345612345678123456123456123456123456123456\n cout << endl;\n printf(\" Zh ggZh WZ ZZ Fake totBG S/B\\n\\n\");\n printf(\"preselection:%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%6.3f\\n\", \n n_ZH, n_ggZH, n_WZ, n_ZZ, n_Fake, n_B, (n_S/n_B));\n \n\n n_ZH = ((TH1F*) f0->Get(\"zh3l_13TeV/events/histo_ZH_hww\"))->GetBinContent(1);\n n_ggZH = ((TH1F*) f0->Get(\"zh3l_13TeV/events/histo_ggZH_hww\"))->GetBinContent(1);\n n_WZ = ((TH1F*) f0->Get(\"zh3l_13TeV/events/histo_WZ\"))->GetBinContent(1);\n n_ZZ = ((TH1F*) f0->Get(\"zh3l_13TeV/events/histo_ZZ\"))->GetBinContent(1);\n n_Fake = ((TH1F*) f0->Get(\"zh3l_13TeV/events/histo_Fake\"))->GetBinContent(1);\n n_ttZ = ((TH1F*) f0->Get(\"zh3l_13TeV/events/histo_ttZ\"))->GetBinContent(1);\n n_VVV = ((TH1F*) f0->Get(\"zh3l_13TeV/events/histo_VVV\"))->GetBinContent(1);\n\n n_S = n_ZH + n_ggZH;\n n_B = n_WZ + n_ZZ + n_Fake + n_ttZ + n_VVV;\n\n printf(\"base select :%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%6.3f\\n\", \n n_ZH, n_ggZH, n_WZ, n_ZZ, n_Fake, n_B, (n_S/n_B));\n \n\n n_ZH = ((TH1F*) f0->Get(\"zh3l_13TeV_step2/events/histo_ZH_hww\"))->GetBinContent(1);\n n_ggZH = ((TH1F*) f0->Get(\"zh3l_13TeV_step2/events/histo_ggZH_hww\"))->GetBinContent(1);\n n_WZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step2/events/histo_WZ\"))->GetBinContent(1);\n n_ZZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step2/events/histo_ZZ\"))->GetBinContent(1);\n n_Fake = ((TH1F*) f0->Get(\"zh3l_13TeV_step2/events/histo_Fake\"))->GetBinContent(1);\n n_ttZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step2/events/histo_ttZ\"))->GetBinContent(1);\n n_VVV = ((TH1F*) f0->Get(\"zh3l_13TeV_step2/events/histo_VVV\"))->GetBinContent(1);\n\n n_S = n_ZH + n_ggZH;\n n_B = n_WZ + n_ZZ + n_Fake + n_ttZ + n_VVV;\n\n printf(\"+ dphilmetjj:%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%6.3f\\n\", \n n_ZH, n_ggZH, n_WZ, n_ZZ, n_Fake, n_B, (n_S/n_B));\n \n n_ZH = ((TH1F*) f0->Get(\"zh3l_13TeV_step3/events/histo_ZH_hww\"))->GetBinContent(1);\n n_ggZH = ((TH1F*) f0->Get(\"zh3l_13TeV_step3/events/histo_ggZH_hww\"))->GetBinContent(1);\n n_WZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step3/events/histo_WZ\"))->GetBinContent(1);\n n_ZZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step3/events/histo_ZZ\"))->GetBinContent(1);\n n_Fake = ((TH1F*) f0->Get(\"zh3l_13TeV_step3/events/histo_Fake\"))->GetBinContent(1);\n n_ttZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step3/events/histo_ttZ\"))->GetBinContent(1);\n n_VVV = ((TH1F*) f0->Get(\"zh3l_13TeV_step3/events/histo_VVV\"))->GetBinContent(1);\n\n n_S = n_ZH + n_ggZH;\n n_B = n_WZ + n_ZZ + n_Fake + n_ttZ + n_VVV;\n\n printf(\"+ dmjjmW :%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%6.3f\\n\", \n n_ZH, n_ggZH, n_WZ, n_ZZ, n_Fake, n_B, (n_S/n_B));\n \n n_ZH = ((TH1F*) f0->Get(\"zh3l_13TeV_step4/events/histo_ZH_hww\"))->GetBinContent(1);\n n_ggZH = ((TH1F*) f0->Get(\"zh3l_13TeV_step4/events/histo_ggZH_hww\"))->GetBinContent(1);\n n_WZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step4/events/histo_WZ\"))->GetBinContent(1);\n n_ZZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step4/events/histo_ZZ\"))->GetBinContent(1);\n n_Fake = ((TH1F*) f0->Get(\"zh3l_13TeV_step4/events/histo_Fake\"))->GetBinContent(1);\n n_ttZ = ((TH1F*) f0->Get(\"zh3l_13TeV_step4/events/histo_ttZ\"))->GetBinContent(1);\n n_VVV = ((TH1F*) f0->Get(\"zh3l_13TeV_step4/events/histo_VVV\"))->GetBinContent(1);\n\n n_S = n_ZH + n_ggZH;\n n_B = n_WZ + n_ZZ + n_Fake + n_ttZ + n_VVV;\n\n printf(\"+ mTlmetjj :%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%6.3f\\n\", \n n_ZH, n_ggZH, n_WZ, n_ZZ, n_Fake, n_B, (n_S/n_B));\n \n\n // TH1F* h_ZH = (TH1F*) f0->Get(\"zh3l_13TeV_step2/mtw_notZ/histo_ZH_hww\");\n // TH1F* h_ggZH = (TH1F*) f0->Get(\"zh3l_13TeV_step2/mtw_notZ/histo_ggZH_hww\");\n // TH1F* h_WZ = (TH1F*) f0->Get(\"zh3l_13TeV_step2/mtw_notZ/histo_WZ\");\n\n cout << endl;\n}\n"
},
{
"alpha_fraction": 0.7882353067398071,
"alphanum_fraction": 0.8117647171020508,
"avg_line_length": 41.5,
"blob_id": "458c24918fcbb7c6459ad6084f8736f7c05552b6",
"content_id": "0ff1c1300471ec394356ce4bf8b9cd94f72afcfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 2,
"path": "/README.md",
"repo_name": "dittmer/Zh3lScratch",
"src_encoding": "UTF-8",
"text": "# Zh3lScratch\nRepository to contain Zh3l code, privately, before future merge to HWW\n"
},
{
"alpha_fraction": 0.520442008972168,
"alphanum_fraction": 0.5745856165885925,
"avg_line_length": 32.51852035522461,
"blob_id": "190b22869057f5a9ee143d9df7a8b1be4b9b6fb3",
"content_id": "a4e9347c5761122b4eb770441cdd4b2463b9f762",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 905,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 27,
"path": "/forLimit/quickplot.C",
"repo_name": "dittmer/Zh3lScratch",
"src_encoding": "UTF-8",
"text": "{\n TFile *f0 = TFile::Open(\"rootFiles_repro/plots_ZH3l.root\");\n\n TH1F* h_ZH = (TH1F*) f0->Get(\"zh3l_13TeV/ptz/histo_ZH_hww\");\n TH1F* h_ggZH = (TH1F*) f0->Get(\"zh3l_13TeV/ptz/histo_ggZH_hww\");\n TH1F* h_WZ = (TH1F*) f0->Get(\"zh3l_13TeV/ptz/histo_WZ\");\n\n cout << \"S/WZ = \" << (h_ZH->Integral() + h_ggZH->Integral()) / h_WZ->Integral() << endl;\n cout << \"low = \" << (h_ZH->Integral(0,10) + h_ggZH->Integral(0,10)) / h_WZ->Integral(0,10) << endl;\n cout << \"high = \" << (h_ZH->Integral(10,21) + h_ggZH->Integral(10,21)) / h_WZ->Integral(10,21) << endl;\n\n h_ZH->Rebin(2);\n h_ggZH->Rebin(2);\n h_WZ->Rebin(2);\n\n h_ZH->SetLineColor(kViolet+1);\n h_ggZH->SetLineColor(kRed);\n h_WZ->SetLineColor(kGreen+2);\n\n h_ZH->Scale(1./h_ZH->Integral());\n h_ggZH->Scale(1./h_ggZH->Integral());\n h_WZ->Scale(1./h_WZ->Integral());\n\n h_ZH->Draw(\"HIST\");\n h_WZ->Draw(\"HIST SAME\");\n h_ggZH->Draw(\"HIST SAME\");\n}\n"
}
] | 5 |
investigatronica/lora
|
https://github.com/investigatronica/lora
|
e76218e18fc062d47b9e481760f7943e7a7cf790
|
c85e615fa8014efecc09263631c872d66d1eba88
|
55e8fcc3cfd084a6cec25cbcd9d756a1985bcf8a
|
refs/heads/main
| 2023-08-03T11:52:40.593221 | 2021-09-10T18:44:21 | 2021-09-10T18:44:21 | 405,168,703 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5973467826843262,
"alphanum_fraction": 0.6106128692626953,
"avg_line_length": 36.95744705200195,
"blob_id": "4c20acadf194473979669e1be7b58b8e3dfad2e8",
"content_id": "ffc17a97c39a6ec27852e13cb1843d551d3afb7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5358,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 141,
"path": "/lorabot.py",
"repo_name": "investigatronica/lora",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport locale, logging, os, subprocess, json, routeros_api\nfrom telegram import (\n ParseMode,\n Update,\n Bot,\n ReplyKeyboardMarkup,\n ReplyKeyboardRemove\n)\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n MessageHandler,\n Filters,\n CallbackContext,\n)\n\nlocale.setlocale(locale.LC_ALL, 'es_AR.utf8')\nbot_id=os.getenv(\"LORA_BOT_ID\")\nhost_pasarela=os.getenv(\"HOST_PASARELA\")\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nautorizados=json.loads(os.environ['LORA_BOT_AUTORIZADOS'])\n\ndef start(update: Update, context: CallbackContext):\n #print(update.message.from_user.id)\n if update.message.from_user.id not in autorizados:\n context.bot.send_message(chat_id=update.message.chat_id, text=\"No estás autorizado. \\n Hasta luego!\")\n else:\n nombre=update.message.from_user.first_name +\" \"+update.message.from_user.last_name\n # kb = [[\"ip\"],[\"ssh\"],[\"sshkill\"]]\n # kb_markup = ReplyKeyboardMarkup(kb)\n context.bot.send_message(update.message.chat.id, text=\"Bienvenido \"+ nombre + \" a la pasarela Lora\", reply_markup=ReplyKeyboardRemove())\n\ndef iipp(update: Update, context: CallbackContext):\n context.bot.send_message(chat_id=update.message.chat_id, text=subprocess.check_output(['hostname', '--all-ip-addresses'])[:-2].decode())\n\ndef reboot(update: Update, context: CallbackContext):\n print(\"chau\")\n os.system(\"sudo reboot\")\n\ndef rebootgw(update: Update, context: CallbackContext):\n connection = routeros_api.RouterOsApiPool(\n os.getenv(\"GW_LORA_IP\"),\n username=os.getenv(\"GW_LORA_USR\"),\n password=os.getenv(\"GW_LORA_PASS\"),\n use_ssl=True,\n ssl_verify=False,\n ssl_verify_hostname=False,\n plaintext_login=True)\n api = connection.get_api()\n api.get_binary_resource('/').call('system/reboot')\n connection.disconnect()\n\n\ndef buscarssh(puerto,usuario):\n # puerto='22223'\n buscar= \"ssh -f -N -T -R\"+puerto+\":localhost:22 \"+usuario+\"@\"+host_pasarela\n p = subprocess.Popen(['ps', '-aux'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n for line in out.splitlines():\n # print(line)\n if buscar in line.decode():\n pid = (line.split(None, 2)[1]).decode()\n return pid\n return 0\n\ndef ssh(update: Update, context: CallbackContext):\n if(update.message.from_user.id==autorizados[0]):\n pid=buscarssh('22223','pi')\n if(int(pid)>0):\n mensaje=\"ya existe un tunel \"+pid \n else:\n p = subprocess.Popen([\"/usr/bin/sshpass\", \"-e\", \"/usr/bin/ssh\", \"-f\", \"-N\", \"-T\", \"-R22223:localhost:22\", \"pi@\"+host_pasarela], stdout=subprocess.PIPE)\n out, err = p.communicate()\n pid=buscarssh('22223', 'pi')\n if(int(pid)>0):\n mensaje=\"se creó el tunel con id: \"+pid+\"\\n\"\n else:\n # print(out)\n print(err)\n mensaje=\"NO se creó el tunel\"+\"\\n\"+err\n\n elif(update.message.from_user.id==autorizados[1]):\n pid=buscarssh('22222','milton')\n if(int(pid)>0):\n mensaje=\"ya existe un tunel \"+pid \n else:\n p = subprocess.Popen([\"/usr/bin/sshpass\", \"-e\", \"/usr/bin/ssh\", \"-f\", \"-N\", \"-T\", \"-R22222:localhost:22\", \"milton@\"+host_pasarela], stdout=subprocess.PIPE)\n out, err = p.communicate()\n pid=buscarssh('22222','milton')\n if(int(pid)>0):\n mensaje=\"se creó el tunel con id: \"+pid+\"\\n\"\n else:\n # print(out)\n print(err)\n mensaje=\"NO se creó el tunel\"+\"\\n\"+err\n\n context.bot.send_message(chat_id=update.message.chat_id, text=mensaje)\n # print(subprocess.check_output)\n\ndef sshkill(update: Update, context: CallbackContext):\n p = subprocess.Popen(['ps', '-aux'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n if(update.message.from_user.id==autorizados[0]):\n buscar= \"ssh -f -N -T -R22223:localhost:22 pi@\"+host_pasarela\n elif(update.message.from_user.id==autorizados[1]):\n buscar= \"ssh -f -N -T -R22222:localhost:22 milton@\"+host_pasarela\n for line in out.splitlines():\n if buscar in line.decode():\n print(\"encontrado\")\n pid = (line.split(None, 2)[1]).decode()\n print(pid)\n p=subprocess.Popen([\"kill\", pid])\n out, err = p.communicate()\n print(out)\n print(err)\n if err:\n mensaje=\"ERROR: \"+err\n else:\n mensaje= \"se eliminó el pid: \"+pid\n context.bot.send_message(chat_id=update.message.chat_id, text=mensaje)\n return\n\ndef main() -> None:\n # Create the Updater and pass it your bot's token.\n updater = Updater(bot_id)\n dispatcher = updater.dispatcher\n\n dispatcher.add_handler(CommandHandler('start', start, Filters.user(user_id=autorizados)))\n dispatcher.add_handler(CommandHandler('ip', iipp))\n dispatcher.add_handler(CommandHandler('ssh', ssh))\n dispatcher.add_handler(CommandHandler('sshkill', sshkill))\n dispatcher.add_handler(CommandHandler('reboot', reboot))\n dispatcher.add_handler(CommandHandler('rebootgw', rebootgw))\n\n updater.start_polling()\n updater.idle()\n\nif __name__ == '__main__':\n main()\n"
}
] | 1 |
BME-MIT-IET/iet-hf2021-elia
|
https://github.com/BME-MIT-IET/iet-hf2021-elia
|
5a95b9a80ecc82b2575995889adf52177caa5c6d
|
a12ef479718b181c238672351093cfa4973a65d7
|
28e5c084071a0592e42d8e2daf836801ed15c4f1
|
refs/heads/master
| 2023-04-27T07:03:28.575308 | 2021-05-16T22:06:40 | 2021-05-16T22:06:40 | 361,462,601 | 0 | 0 | null | 2021-04-25T15:15:59 | 2021-05-16T21:50:16 | 2021-05-16T21:50:31 |
JavaScript
|
[
{
"alpha_fraction": 0.5239294767379761,
"alphanum_fraction": 0.5239294767379761,
"avg_line_length": 15.541666984558105,
"blob_id": "ec98def9b6a419c793dcf5405bb635505dd32793",
"content_id": "69b789c179fc7c04276ff567c2578c62d7e4c8d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 24,
"path": "/source/components/core/LoggingComponent.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "/**\n * Component for handling the log's.\n */\nclass LoggingComponent {\n /**\n * Logs the message.\n *\n * @param {string} message the message\n */\n async log(message) {\n console.log(message);\n }\n\n /**\n * Logs the error.\n *\n * @param {*} error the error\n */\n error(error) {\n console.error(error);\n }\n}\n\nmodule.exports = LoggingComponent;\n"
},
{
"alpha_fraction": 0.7854406237602234,
"alphanum_fraction": 0.7854406237602234,
"avg_line_length": 42.5,
"blob_id": "d479148c9e2a69a9374b7d898d922f5faed594d9",
"content_id": "ffd544608def9b3f893bf4ae1712e8f2b20efa20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1127,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 24,
"path": "/IET-HF.md",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "# ELIA\nAz E.L.I.A. egy JavaScript-ben írt Discord bot, ami a Discord szerverek felhaszálói élményét bővíti saját funkcióival. Ilyen funkciók:\n- zene lejátszás\n - YouTube linkek, playlist-ek lejátszása\n- hangeffektek\n- szavazások lebonyolítása\n- üzenetek rögzítése egy külön csatornán\n\n## Tervek\nA projekt kezdetleges állapotban van, ezért sok lehetőségünk van javítani a projekten. Mi az alábbiakat választottuk:\n- Technológia fókusz\n - statikus kód ellenőrzés: a projekt JavaScript-ben íródott, amiben nincs statikus ellenőrzés, így ez sokat javít a kód minőségén\n - build + CI beüzemelése: a jövőre tekintve ezen funkciók megkönnyítik a fejlesztést\n - Egységtesztek készítése: \n- Termék/felhasználó fókusz\n - nem-funkcionális jellemzők vizsgálata:\n - BDD tesztek készítése: \n\n## Megvalósított funkciók\n- [statikus kód ellenőrzés](doc/static-code-analysis.md)\n- [egységtesztelés](doc/unit-testing.md)\n- [build + CI beüzemelése](doc/ci.md)\n- [nemfunkcionális tesztelés](doc/nonfunctional-testing.md)\n- [bdd tesztelés](doc/bdd-testing.md)\n"
},
{
"alpha_fraction": 0.6621212363243103,
"alphanum_fraction": 0.6621212363243103,
"avg_line_length": 33.73684310913086,
"blob_id": "ab55d912aa02cbea0bfd1330b74e326a068c6a2c",
"content_id": "7d201695217bc6c64338dc9967054125b228b083",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/source/commands/voice/music/LoopSongCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../../Command\");\nconst CommandTypeEnum = require(\"../../CommandTypeEnum\");\n\nclass LoopSongCommand extends Command {\n name = \"loopsong\";\n description = \"Start's or stops the current song from looping\";\n usage = \"\";\n type = CommandTypeEnum.MUSIC;\n async execute(message, _args, elia) {\n if (\n elia.dataComponent.getRadioMode() ||\n (elia.musicComponent.messageSenderInVoiceChannel(message) &&\n elia.musicComponent.messageSenderHasRightPermissions(message))\n )\n elia.musicComponent.musicQueue.loopCurrentSong(message);\n }\n}\n\nmodule.exports = LoopSongCommand;\n"
},
{
"alpha_fraction": 0.6410027146339417,
"alphanum_fraction": 0.6410027146339417,
"avg_line_length": 30.91428565979004,
"blob_id": "15bcc7bf5bbdeecb8f191dfda11d7b8e314f3a8b",
"content_id": "68b391c139db75b7f5afc5ee2bc964252614c661",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 35,
"path": "/source/components/CommandComponent.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "//basic command imports\nconst DeleteMessagesCommand = require(\"../commands/text/DeleteMessagesCommand\");\nconst HelpCommand = require(\"../commands/text/HelpCommand\");\nconst MemeCommand = require(\"../commands/text/MemeCommand\");\nconst PinCommand = require(\"../commands/text/PinCommand\");\nconst PingCommand = require(\"../commands/text/PingCommand\");\nconst PollCommand = require(\"../commands/text/PollCommand\");\nconst Elia = require(\"../Elia\");\n\n/**\n * Component for ELIA which adds basic commands\n */\nclass CommandComponent {\n /**\n * Adds the basic commands to the object in the parameter.\n *\n * @param {Elia} elia an ELIA object\n */\n init(elia) {\n // import generic commands\n let commands = [\n new DeleteMessagesCommand(),\n new HelpCommand(),\n new MemeCommand(),\n new PinCommand(),\n new PingCommand(),\n new PollCommand(),\n ];\n\n commands.forEach((cmd) => elia.commandMap.set(cmd.name, cmd));\n elia.loggingComponent.log(\"Basic commands added to Elia.\");\n }\n}\n\nmodule.exports = CommandComponent;\n"
},
{
"alpha_fraction": 0.6161971688270569,
"alphanum_fraction": 0.6161971688270569,
"avg_line_length": 31.769229888916016,
"blob_id": "032cc4bc04e264cd780e2f8d593338403f44140d",
"content_id": "ab5540b1763f6f45de3246ffc77313ea15e4bff1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 26,
"path": "/source/commands/text/PinCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../Command\");\nconst CommandTypeEnum = require(\"../CommandTypeEnum\");\n\nclass PinCommand extends Command {\n name = \"pin\";\n description = \"send messages to the server's pin channel\";\n usage = \" <message>\";\n hasArguments = true;\n type = CommandTypeEnum.OTHER;\n shouldDelete = false;\n async execute(message, args, elia) {\n const channelID = elia.dataComponent.getPinChannelId(message.guild.id);\n const channel = await message.client.channels.fetch(channelID);\n if (channel) {\n let messageText = args.join(\" \");\n channel.send(`${message.author.toString()} ` + messageText);\n message.delete();\n\n elia.loggingComponent.log(\n message.author.username + \" pinned a message\"\n );\n }\n }\n}\n\nmodule.exports = PinCommand;\n"
},
{
"alpha_fraction": 0.5968412160873413,
"alphanum_fraction": 0.5985037684440613,
"avg_line_length": 30.657894134521484,
"blob_id": "19059d3f173bcd19a8096dc627141f172d335966",
"content_id": "ff7258104a706dd1d63e2cd900a2b85c7fb84f6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1203,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 38,
"path": "/source/components/SoundEffectComponent.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const SoundEffectCommand = require(\"../commands/voice/SoundEffectCommand\");\nconst fs = require(\"fs\");\nconst Elia = require(\"../Elia\");\n\n/**\n * Component for ELIA which adds sound effect commands\n */\nclass SoundEffectComponent {\n /**\n * Adds the sound effect commands to the ELIA object in the parameter.\n *\n * @param {Elia} elia an ELIA object\n */\n init(elia) {\n //import sound effects\n elia.loggingComponent.log(\"Generating soundeffect commands:\");\n\n const soundEffects = fs\n .readdirSync(\"./resources/soundeffects\")\n .filter((file) => file.endsWith(\".mp3\"));\n\n for (const soundEffect of soundEffects) {\n const newSoundEffectCommand = new SoundEffectCommand(\n soundEffect.replace(\".mp3\", \"\").toLowerCase()\n );\n elia.commandMap.set(\n newSoundEffectCommand.name,\n newSoundEffectCommand\n );\n elia.loggingComponent.log(\n soundEffect + \" -> \" + newSoundEffectCommand.name\n );\n }\n elia.loggingComponent.log(\"Sound effect commands added to Elia.\");\n }\n}\n\nmodule.exports = SoundEffectComponent;\n"
},
{
"alpha_fraction": 0.6546875238418579,
"alphanum_fraction": 0.6546875238418579,
"avg_line_length": 32.68421173095703,
"blob_id": "593b89c510bc2bc28925377a2d922696440458f7",
"content_id": "76083048eb57bc307fb04f68651442450ae38daf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 640,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/source/commands/voice/music/GetQueueCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../../Command\");\nconst CommandTypeEnum = require(\"../../CommandTypeEnum\");\n\nclass GetQueueCommand extends Command {\n name = \"getqueue\";\n description = \"Get the songs in the queue\";\n usage = \" \";\n type = CommandTypeEnum.MUSIC;\n async execute(message, _args, elia) {\n if (\n elia.dataComponent.getRadioMode() ||\n (elia.musicComponent.messageSenderInVoiceChannel(message) &&\n elia.musicComponent.messageSenderHasRightPermissions(message))\n )\n elia.musicComponent.musicQueue.getQueuedMusic(message);\n }\n}\n\nmodule.exports = GetQueueCommand;\n"
},
{
"alpha_fraction": 0.6872679591178894,
"alphanum_fraction": 0.6994976997375488,
"avg_line_length": 37.4789924621582,
"blob_id": "66317aefb4f900cdde261ec39cb1b00f93335b23",
"content_id": "5e624f2834659ecd229f03c9b03b0df0e5d9030b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4579,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 119,
"path": "/test/bdd/main.py",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport sys\n\nfrom discord import Embed\nfrom distest import TestCollector\nfrom distest import run_dtest_bot\n\ntest_collector = TestCollector()\n\n\ndef get_base_embed(embed_title):\n base_embed = (\n Embed(\n title=embed_title,\n color=6402394,\n type=\"rich\",\n )\n .set_footer(\n icon_url=\"https://cdn.discordapp.com/embed/avatars/1.png\",\n text=\"ELIA - TESTER\"\n )\n )\n return base_embed\n\n\n@test_collector()\nasync def test_ping(interface):\n embed = get_base_embed(\"Pong!\")\n await interface.assert_reply_embed_equals(\"+ping\", embed)\n await asyncio.sleep(1)\n\n\n@test_collector()\nasync def test_meme(interface):\n await interface.assert_reply_has_image(\"+meme https://i.redd.it/7ptrlc47tuc51.jpg\")\n await asyncio.sleep(1)\n\n\n@test_collector()\nasync def test_pin(interface):\n await interface.assert_reply_contains(\"+pin pinned\", \"pinned\")\n await asyncio.sleep(1)\n\n\n@test_collector()\nasync def test_play_song_then_skip(interface):\n play_embed = get_base_embed(\":musical_note: Now Playing ***https://www.youtube.com/watch?v=dQw4w9WgXcQ***\")\n skip_embed = get_base_embed(\"You skipped a song!\")\n\n await interface.assert_reply_embed_equals(\"+play https://www.youtube.com/watch?v=dQw4w9WgXcQ\", play_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+skip\", skip_embed)\n await asyncio.sleep(1)\n\n\n@test_collector()\nasync def test_play_then_pause_then_resume_then_skip(interface):\n play_embed = get_base_embed(\":musical_note: Now Playing ***https://www.youtube.com/watch?v=dQw4w9WgXcQ***\")\n pause_embed = get_base_embed(\"You paused the music.\")\n resume_embed = get_base_embed(\"You resumed playing the music.\")\n skip_embed = get_base_embed(\"You skipped a song!\")\n\n await interface.assert_reply_embed_equals(\"+play https://www.youtube.com/watch?v=dQw4w9WgXcQ\", play_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+pause\", pause_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+resume\", resume_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+skip\", skip_embed)\n await asyncio.sleep(1)\n\n\n@test_collector()\nasync def test_queue_two_songs_then_double_skip(interface):\n play_embed = get_base_embed(\":musical_note: Now Playing ***https://www.youtube.com/watch?v=dQw4w9WgXcQ***\")\n queue_embed = get_base_embed(\":musical_note: Queued: ***https://www.youtube.com/watch?v=dQw4w9WgXcQ***\")\n leave_embed = get_base_embed(\"Bye Bye :smiling_face_with_tear:\")\n\n await interface.assert_reply_embed_equals(\"+play https://www.youtube.com/watch?v=dQw4w9WgXcQ\", play_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+queue https://www.youtube.com/watch?v=dQw4w9WgXcQ\", queue_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+leave\", leave_embed)\n await asyncio.sleep(1)\n\n\n@test_collector()\nasync def test_play_then_replay_then_double_skip(interface):\n play_embed = get_base_embed(\":musical_note: Now Playing ***https://www.youtube.com/watch?v=dQw4w9WgXcQ***\")\n replay_embed = get_base_embed(\"You replayed a song!\")\n leave_embed = get_base_embed(\"Bye Bye :smiling_face_with_tear:\")\n\n await interface.assert_reply_embed_equals(\"+play https://www.youtube.com/watch?v=dQw4w9WgXcQ\", play_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+replay\", replay_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+leave\", leave_embed)\n await asyncio.sleep(1)\n\n\n@test_collector()\nasync def test_play_then_loop_song(interface):\n play_embed = get_base_embed(\":musical_note: Now Playing ***https://www.youtube.com/watch?v=dQw4w9WgXcQ***\")\n loop_song_start_embed = get_base_embed(\"You started looping the current song!\")\n loop_song_stop_embed = get_base_embed(\"You stopped looping the current song!\")\n leave_embed = get_base_embed(\"Bye Bye :smiling_face_with_tear:\")\n\n await interface.assert_reply_embed_equals(\"+play https://www.youtube.com/watch?v=dQw4w9WgXcQ\", play_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+loopsong\", loop_song_start_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+loopsong\", loop_song_stop_embed)\n await asyncio.sleep(5)\n await interface.assert_reply_embed_equals(\"+leave\", leave_embed)\n await asyncio.sleep(1)\n\n\nif __name__ == '__main__':\n run_dtest_bot(sys.argv, test_collector)\n"
},
{
"alpha_fraction": 0.6495176553726196,
"alphanum_fraction": 0.6495176553726196,
"avg_line_length": 31.736841201782227,
"blob_id": "e8153778ce051b81b7663915154482fbfc8c18b8",
"content_id": "f2168e1006abddd83d887a872075c4f986e79f62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 622,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/source/commands/voice/music/PauseCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../../Command\");\nconst CommandTypeEnum = require(\"../../CommandTypeEnum\");\n\nclass PauseCommand extends Command {\n name = \"pause\";\n description = \"Pause playing the song\";\n usage = \"\";\n type = CommandTypeEnum.MUSIC;\n async execute(message, _args, elia) {\n if (\n elia.dataComponent.getRadioMode() ||\n (elia.musicComponent.messageSenderInVoiceChannel(message) &&\n elia.musicComponent.messageSenderHasRightPermissions(message))\n )\n elia.musicComponent.musicQueue.pauseMusic(message);\n }\n}\n\nmodule.exports = PauseCommand;\n"
},
{
"alpha_fraction": 0.7989690899848938,
"alphanum_fraction": 0.8195876479148865,
"avg_line_length": 31.5,
"blob_id": "d5b5315e02f4a07ae3f0b5e9d310b65bc08ac4c9",
"content_id": "485a3d628e3fb8373ffe528eeac202f480f20e2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 6,
"path": "/sonar-project.properties",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "sonar.organization=bme-mit-iet-org\nsonar.projectKey=BME-MIT-IET_iet-hf2021-elia\nsonar.sources=./source\nsonar.javascript.lcov.reportPaths=./coverage/lcov.info\nsonar.language=js\nsonar.tests=./test"
},
{
"alpha_fraction": 0.5407577753067017,
"alphanum_fraction": 0.553386926651001,
"avg_line_length": 36.869564056396484,
"blob_id": "d29bad9a5d717761cbcbd8f7100aafa845e805db",
"content_id": "c027b1d49ca5eca316f0695dbb35b4e68510e8fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 23,
"path": "/test/unit/utility/urlChecker.test.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "/* eslint-disable no-undef */\nvar assert = require(\"assert\");\nconst validURL = require(\"../../../source/components/music/UrlChecker.js\");\n\ndescribe(\"UrlChecker\", function () {\n describe(\"#validURL()\", function () {\n it(\"these URLs are valid\", function () {\n assert.strictEqual(\n validURL(\n \"https://www.youtube.com/watch?v=NYeLG0wG--k&list=RDGMEMTmC-2iNKH_l8gQ1LHo9FeQVMNYeLG0wG--k&start_radio=1\"\n ),\n true\n );\n assert.strictEqual(validURL(\"remelemEzNem.jo\"), true);\n assert.strictEqual(validURL(\"https://vagyMegis.jo\"), true);\n });\n\n it(\"these URLs are NOT valid\", function () {\n assert.strictEqual(validURL(\"htt:n3$e|.jo\"), false);\n assert.strictEqual(validURL(\"sptth:$|__1.44.jo\"), false);\n });\n });\n});\n"
},
{
"alpha_fraction": 0.8081395626068115,
"alphanum_fraction": 0.814784049987793,
"avg_line_length": 69.82353210449219,
"blob_id": "68c46cf47c414b9c82ffbcc2dc9002d89059239b",
"content_id": "d56ac56b5421739e6eb0ba2e79533f332223adb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1285,
"license_type": "no_license",
"max_line_length": 397,
"num_lines": 17,
"path": "/doc/nonfunctional-testing.md",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "# Nemfunkcionális tesztelés\n\nA feladat során a *Distest* python discord BDD tesztelő keretrendszer használatával készítettem teszteket, melyek arra szolgáltak, hogy egyes kérések válasz sebességét mérhessük fel.\n\nMivel ezt egy BDD tesztkeretrendszer segítségével készítettük el, ezért valós válaszidőket fogunk kapni, melyeket a felhasználók fognak tapasztalni. Az Elia projekt elég szerte ágazóan használható, azonban a legfejlettebb része a zenelejátszással kapcsolatos. Ezért a két legfontosabb teszt a voice channel-re való felkapcsolódás és lekapcsolódás, valamint a zenelejátszás elindításának sebessége.\n\nA teszt eredménye arra utal, hogy más hasonló funkciókat megvalósító discord botokhoz képest hasonló a teljesítmény, így a kód ilyen szempontból jól optimalizált.\n\nA tesztek lefuttatásához hasonlóan kell eljárni, mint a BDD tesztek lefuttatásával:\n\n```shell\npython main.py <bot_id> <tester_token> -c <channel_id> -r all\n```\n\nAzonban a tesztek nem a `test/bdd/`, hanem a `test/performance/` mappa alatt találhatók. A keretrendszerhez készült egy *pipenv* környezet, melyet a futtatáshoz használni kell.\n\nA kapcsolódó Github issue: [#23](https://github.com/BME-MIT-IET/iet-hf2021-elia/pull/23)\n"
},
{
"alpha_fraction": 0.5157567858695984,
"alphanum_fraction": 0.529960036277771,
"avg_line_length": 34.761905670166016,
"blob_id": "4a7b429a6abb1d73029d84deaf52fa46dd31ee52",
"content_id": "4615d805b8c96ce2623175eee2674109f0613d85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2302,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 63,
"path": "/source/commands/text/PollCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../Command\");\nconst Discord = require(\"discord.js\");\nconst CommandTypeEnum = require(\"../CommandTypeEnum\");\n\nclass PollCommand extends Command {\n name = \"poll\";\n description = \"Creates a poll, up to 10 choices\";\n usage = \" 'option1' 'option2' 'option3' ... 'option10' \";\n usage = \" \";\n hasArguments = true;\n type = CommandTypeEnum.OTHER;\n emojis = [\"0️⃣\", \"1️⃣\", \"2️⃣\", \"3️⃣\", \"4️⃣\", \"5️⃣\", \"6️⃣\", \"7️⃣\", \"8️⃣\", \"9️⃣\", \"🔟\"];\n async execute(message, args, elia) {\n if (!args.length)\n return elia.messageComponent.reply(\n message,\n \"You need to send the arguments!\"\n );\n\n let command = args.join(\" \");\n // eslint-disable-next-line quotes\n let rawPollArgs = command.split('\" \"');\n\n let pollArgs = new Array();\n\n rawPollArgs.forEach((item) => {\n pollArgs.push(item.slice(1, -1));\n });\n\n if (pollArgs.length > 10)\n return elia.messageComponent.reply(message, \"Too many arguments!\");\n\n let pollMessage = new Discord.MessageEmbed()\n .setColor(0x61b15a)\n .setDescription(\"Submitted by \" + message.member.displayName)\n .setThumbnail(message.author.avatarURL())\n .setFooter(message.guild.name);\n\n if (pollArgs.length == 1) {\n pollMessage.setTitle(pollArgs[0]);\n message.channel.send(pollMessage).then((messageReaction) => {\n messageReaction.react(\"👍\");\n messageReaction.react(\"👎\");\n });\n } else {\n let options = \"\";\n for (let i = 0; i < pollArgs.length; i++) {\n options += \"\\n\\n\" + this.emojis[i + 1] + \" \" + pollArgs[i];\n }\n pollMessage.setTitle(\"Choose one!\");\n pollMessage.addField(\"Avaliable options:\", options, false);\n message.channel.send(pollMessage).then((messageReaction) => {\n for (let i = 0; i < pollArgs.length; i++) {\n messageReaction.react(this.emojis[i + 1]);\n }\n });\n }\n\n elia.loggingComponent.log(message.author.username + \" created a poll\");\n }\n}\n\nmodule.exports = PollCommand;\n"
},
{
"alpha_fraction": 0.5288915038108826,
"alphanum_fraction": 0.53125,
"avg_line_length": 28.75438690185547,
"blob_id": "a359c43990862edd8706d9011732d03b9dd0b86c",
"content_id": "60771d09f5478c8f1c606dbce99eff717cf79a7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1696,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 57,
"path": "/source/commands/voice/SoundEffectCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../Command\");\nconst CommandTypeEnum = require(\"../CommandTypeEnum\");\n\n/**\n * Command for playing sound effects\n */\nclass SoundEffectCommand extends Command {\n /**\n * @param {string} name the command's name\n * @param {?number} volume the volume of the played sound\n */\n constructor(name, volume) {\n super();\n this.name = name;\n\n this.description = \"plays \" + this.name + \"soundeffect\";\n\n if (volume != null) this.soundEffectVolume = volume;\n }\n usage = \" \";\n type = CommandTypeEnum.SOUNDEFFECT;\n /**\n * the volume of the played sound\n *\n * @type {number}\n */\n soundEffectVolume = 0.8;\n\n async execute(message, _args, elia) {\n if (elia.musicComponent.messageSenderInVoiceChannel(message)) {\n // Only try to join the sender's voice channel if they are in one themselves\n message.delete();\n\n const voiceChannel = message.member.voice.channel;\n const connection = await voiceChannel.join();\n\n connection\n .play(\"./resources/soundeffects/\" + this.name + \".mp3\", {\n seek: 0,\n volume: this.soundEffectVolume,\n })\n .on(\"finish\", () => {\n elia.loggingComponent.log(\n message.author.username + \" played: \" + this.name\n );\n voiceChannel.leave();\n });\n } else {\n elia.messageComponent.reply(\n message,\n \"You need to join a voice channel first!\"\n );\n }\n }\n}\n\nmodule.exports = SoundEffectCommand;\n"
},
{
"alpha_fraction": 0.6008861660957336,
"alphanum_fraction": 0.6019086837768555,
"avg_line_length": 38.119998931884766,
"blob_id": "cc7384d1f9a496e1eb6598230154372324a5705e",
"content_id": "4f4180e57bbbbfc068274ec2033fa7abe3bdde0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2934,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 75,
"path": "/test/unit/musicqueue/playMusicFromQueue.test.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "/* eslint-disable no-undef */\nconst assert = require(\"assert\");\nconst { Message } = require(\"discord.js\");\nconst sinon = require(\"sinon\");\nconst MusicQueue = require(\"../../../source/components/music/MusicQueue\");\nconst Elia = require(\"../../../source/Elia\");\n\nvar message = Message;\nvar mockMusicQueueArray;\nvar spyShift;\nvar spyUnshift;\nvar mockActivityDisplayComponent;\nvar spySetMusicPlaying;\nvar musicQueue;\n\ndescribe(\"MusicQueue\", function () {\n describe(\"#playMusicFromQueue()\", function () {\n this.beforeEach(function () {\n mockMusicQueueArray = {\n length: Number,\n shift: function () {\n return \"currentSong\";\n },\n unshift: function () {},\n };\n spyShift = sinon.spy(mockMusicQueueArray, \"shift\");\n spyUnshift = sinon.spy(mockMusicQueueArray, \"unshift\");\n mockActivityDisplayComponent = { setMusicPlaying: function () {} };\n spySetMusicPlaying = sinon.spy(\n mockActivityDisplayComponent,\n \"setMusicPlaying\"\n );\n musicQueue = new MusicQueue(\n new Elia(null, null, null, mockActivityDisplayComponent, null)\n );\n musicQueue.lastSong = \"oldSong\";\n musicQueue.currentSong = \"newSong\";\n });\n\n it(\"queue is empty\", function () {\n mockMusicQueueArray.length = 0;\n musicQueue.musicQueueArray = mockMusicQueueArray;\n musicQueue.playMusicFromQueue(message);\n assert(spyShift.notCalled);\n assert.strictEqual(musicQueue.currentSong, \"newSong\");\n assert.strictEqual(musicQueue.lastSong, \"oldSong\");\n });\n\n it(\"queue is not empty and isLoopingSong is false\", function () {\n mockMusicQueueArray.length = 5;\n musicQueue.musicQueueArray = mockMusicQueueArray;\n musicQueue.isLoopingSong = false;\n musicQueue.playMusicFromQueue(message);\n assert(spyShift.calledOnce);\n spyShift.restore();\n assert(spySetMusicPlaying.calledOnce);\n assert.strictEqual(musicQueue.lastSong, \"newSong\");\n assert.strictEqual(musicQueue.currentSong, \"currentSong\");\n assert(spyUnshift.notCalled);\n });\n\n it(\"queue is not empty and isLoopingSong is true\", function () {\n mockMusicQueueArray.length = 5;\n musicQueue.musicQueueArray = mockMusicQueueArray;\n musicQueue.isLoopingSong = true;\n musicQueue.playMusicFromQueue(message);\n assert(spyShift.calledOnce);\n spyShift.restore();\n assert(spySetMusicPlaying.calledOnce);\n assert.strictEqual(musicQueue.lastSong, \"newSong\");\n assert.strictEqual(musicQueue.currentSong, \"currentSong\");\n assert(spyUnshift.calledOnce);\n });\n });\n});\n"
},
{
"alpha_fraction": 0.5585585832595825,
"alphanum_fraction": 0.5585585832595825,
"avg_line_length": 22.785715103149414,
"blob_id": "c1bfaafaa252105d5f65547405d2ff80a442f77e",
"content_id": "7f3ba33e464089ac42dfa928c1177ad7d9d513de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1001,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 42,
"path": "/source/components/core/ActivityDisplayComponent.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const { Client } = require(\"discord.js\");\nconst DataComponent = require(\"./DataComponent\");\n\n/**\n * Component which handles the Discord bot's displayed activity\n */\nclass ActivityDisplayComponent {\n /**\n * Setups the ActivityDisplayComponent\n *\n * @param {Client} bot a Discord bot client\n * @param {DataComponent} dataComponent a DataComponent for data\n */\n constructor(bot, dataComponent) {\n /**\n * The Discord Client\n *\n * @type {Client}\n */\n this.bot = bot;\n /**\n * The component for data\n *\n * @type {DataComponent}\n */\n this.dataComponent = dataComponent;\n }\n\n setMusicPlaying() {\n this.bot.user.setActivity(\"Music ♫\", {\n type: \"STREAMING\",\n });\n }\n\n setDefault() {\n this.bot.user.setActivity(this.dataComponent.getPrefix() + \"help\", {\n type: \"LISTENING\",\n });\n }\n}\n\nmodule.exports = ActivityDisplayComponent;\n"
},
{
"alpha_fraction": 0.5648558735847473,
"alphanum_fraction": 0.5709534287452698,
"avg_line_length": 35.08000183105469,
"blob_id": "6afd1fa222f9f3d111bcfcafbab78e328baf50eb",
"content_id": "878b0e34523e3d8d655724b44828ac2e70fd3b1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1804,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 50,
"path": "/source/commands/text/DeleteMessagesCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../Command\");\nconst CommandTypeEnum = require(\"../CommandTypeEnum\");\n\nclass DeleteMessagesCommand extends Command {\n name = \"delete\";\n description = \"deletes messages\";\n usage = \" *required:* <number of messages before this command>\";\n type = CommandTypeEnum.UTILITY;\n shouldDelete = false;\n async execute(message, args, elia) {\n if (message.channel.type === \"dm\")\n return elia.messageComponent.reply(\n message,\n \"You can't use this command in DM's\"\n );\n\n if (message.member) {\n if (!message.member.hasPermission(\"MANAGE_MESSAGES\")) {\n return elia.messageComponent.reply(\n message,\n \"You don't have the permissions for deleting messages!\"\n );\n }\n }\n\n // get the delete count, as an actual number.\n const deleteCount = parseInt(args[0], 10);\n\n // Ooooh nice, combined conditions. <3\n if (!deleteCount || deleteCount < 1 || deleteCount > 99)\n return elia.messageComponent.reply(\n message,\n \"Please provide a number between 1 and 99 for the number of messages to delete\"\n );\n\n // So we get our messages, and delete them. Simple enough, right?\n message.channel.bulkDelete(deleteCount + 1, true).catch((error) => {\n elia.loggingComponent.error(error);\n elia.messageComponent.reply(\n message,\n \"there was an error trying to delete messages in this channel!\"\n );\n });\n elia.loggingComponent.log(\n message.author.username + \" deleted \" + deleteCount + \" messages\"\n );\n }\n}\n\nmodule.exports = DeleteMessagesCommand;\n"
},
{
"alpha_fraction": 0.5668715238571167,
"alphanum_fraction": 0.5689200758934021,
"avg_line_length": 33.867347717285156,
"blob_id": "de59f94e19f365a0f3ec213de409a7611e37f57d",
"content_id": "e07ddfcfb392b6984c30f26633a697257a99be27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3417,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 98,
"path": "/source/commands/voice/music/PlayCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../../Command\");\nconst CommandTypeEnum = require(\"../../CommandTypeEnum\");\nconst ytSearch = require(\"yt-search\");\nconst validURL = require(\"../../../components/music/UrlChecker.js\");\nconst getYouTubePlaylistId = require(\"../../../components/music/UrlPlaylist.js\");\nconst { VoiceChannel, Message } = require(\"discord.js\");\nconst Elia = require(\"../../../Elia\");\n\nclass PlayCommand extends Command {\n name = \"play\";\n description = \"Joins and plays a video from youtube\";\n usage =\n \" *required:* <Youtube link> *or search terms:* <term1> <term2> <term3> ...\";\n type = CommandTypeEnum.MUSIC;\n hasArguments = true;\n async execute(message, args, elia) {\n if (\n elia.dataComponent.getRadioMode() ||\n (elia.musicComponent.messageSenderInVoiceChannel(message) &&\n elia.musicComponent.messageSenderHasRightPermissions(message))\n ) {\n const voiceChannel =\n await elia.musicComponent.musicQueue.getVoiceChannel(\n message.member.voice.channel,\n message\n );\n if (validURL(args[0])) {\n this.playFromYouTube(voiceChannel, message, elia, args[0]);\n } else {\n this.searchAndPlayFromYouTube(\n voiceChannel,\n message,\n args.join(\" \"),\n elia\n );\n }\n }\n }\n\n /**\n * Play's a video or playlist from YouTube\n *\n * @param {VoiceChannel} voiceChannel the VoiceChannel to join\n * @param {Message} message the message which requested the music\n * @param {Elia} elia the elia bot\n * @param {string} url a youtube video url\n */\n playFromYouTube(voiceChannel, message, elia, url) {\n const id = getYouTubePlaylistId(url);\n if (id != null)\n elia.musicComponent.musicQueue.playYouTubePlaylist(\n message,\n voiceChannel,\n id\n );\n else\n elia.musicComponent.musicQueue.playMusic(\n message,\n voiceChannel,\n url\n );\n }\n\n /**\n * Searches a query in YouTube and then play's the first video result match, if result exits\n *\n * @param {VoiceChannel} voiceChannel the VoiceChannel to join\n * @param {Message} message the message which requested the music\n * @param {string} query the search terms in one string\n * @param {Elia} elia the elia bot\n */\n searchAndPlayFromYouTube(voiceChannel, message, query, elia) {\n const video = this.videoFinder(query);\n if (video) {\n elia.musicComponent.musicQueue.playMusic(\n message,\n voiceChannel,\n video.url,\n video.title\n );\n } else {\n elia.messageComponent.reply(message, \"No video results found.\");\n }\n }\n\n /**\n * Searches a srting on YouTube and get the fist result.\n *\n * @param {string} query the string to search on YouTube\n * @returns {?string} the first result of the query or null if no results\n */\n async videoFinder(query) {\n const videoResult = await ytSearch(query);\n return videoResult.videos.length > 1 ? videoResult.videos[0] : null;\n }\n}\n\nmodule.exports = PlayCommand;\n"
},
{
"alpha_fraction": 0.5250176191329956,
"alphanum_fraction": 0.525722324848175,
"avg_line_length": 26.823530197143555,
"blob_id": "a939679965600c9004c318aa8deb73e1487dcda3",
"content_id": "94f14483d59efed355557013241ac320209b0ffc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1419,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 51,
"path": "/source/components/music/UrlPlay.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const { Message, VoiceConnection } = require(\"discord.js\");\nconst ytdl = require(\"ytdl-core\");\nconst Elia = require(\"../../Elia\");\n\n/**\n * Play's a song from an URL\n *\n * @param {Elia} elia an ELIA object\n * @param {Message} message a Discord message\n * @param {VoiceConnection} connection a Discord connection\n * @param {string} url a Youtube URL\n * @param {string} title the title of the song\n */\nmodule.exports = async function playFromURL(\n elia,\n message,\n connection,\n url,\n title\n) {\n const stream = ytdl(url, { filter: \"audioonly\" });\n\n connection\n .play(stream, {\n seek: 0,\n volume: parseFloat(elia.dataComponent.getMusicVolume()),\n })\n .on(\"finish\", () => {\n elia.musicComponent.musicQueue.continuePlayingMusic();\n });\n\n elia.musicComponent.musicQueue.cacheYouTubeTitle(url);\n\n if (message != null) {\n if (title == null)\n elia.messageComponent.reply(\n message,\n \":musical_note: Now Playing ***\" + url + \"***\"\n );\n else\n elia.messageComponent.reply(\n message,\n \":musical_note: Now Playing ***\" +\n title +\n \"*** at ***\" +\n url +\n \"***\"\n );\n elia.loggingComponent.log(message.author.username + \" played: \" + url);\n }\n};\n"
},
{
"alpha_fraction": 0.6349206566810608,
"alphanum_fraction": 0.6984127163887024,
"avg_line_length": 30.5,
"blob_id": "0ae2f23161c2ae8630ee59adf725e9eb29515227",
"content_id": "dc6dfa26329c045cc51aa25bc24c2099bcd243af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 2,
"path": "/README.md",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "# iet-hf2021-elia\nA dokumentáció [itt](./IET-HF.md) érhető el.\n"
},
{
"alpha_fraction": 0.5214998722076416,
"alphanum_fraction": 0.5221397280693054,
"avg_line_length": 29.763778686523438,
"blob_id": "334cd1676ba5ee4c1ef9732567c5547e47c9640e",
"content_id": "36fc20eba6e7a144f532971f6ab2db87e7325b62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 7814,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 254,
"path": "/source/components/core/MessageComponent.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const { Client, Message, MessageEmbed } = require(\"discord.js\");\nconst Command = require(\"../../commands/Command\");\nconst CommandTypeEnum = require(\"../../commands/CommandTypeEnum\");\nconst Elia = require(\"../../Elia\");\nconst DataComponent = require(\"./DataComponent\");\nconst LoggingComponent = require(\"./LoggingComponent\");\n\n/**\n * Component that handles ELIA-s messages. Sends Discord embedded messages.\n */\nclass MessageComponent {\n constructor(bot, dataComponent, loggingComponent) {\n /**\n * The Discord Client\n *\n * @type {Client}\n */\n this.bot = bot;\n /**\n * The component for data\n *\n * @type {DataComponent}\n */\n this.dataComponent = dataComponent;\n /**\n * The component for logging\n *\n * @type {LoggingComponent}\n */\n this.loggingComponent = loggingComponent;\n }\n\n /**\n * Replies to the message.\n *\n * @param {Message} message the Discord message to reply to\n * @param {string} answer the answer in string\n */\n reply(message, answer) {\n let replyMsg = this.buildBaseEmbed().setTitle(answer);\n this.addFooterToEmbed(message, replyMsg);\n\n message.reply(replyMsg).then((msg) => {\n this.deleteMsgTimeout(msg);\n });\n this.deleteMsgNow(message);\n }\n\n /**\n * Deletes a message after a given time.\n *\n * @param {Message} message the Discord message to delete\n */\n deleteMsgTimeout(message) {\n if (\n message &&\n !message.deleted &&\n message.deletable &&\n message.channel.type !== \"dm\"\n )\n message\n .delete({\n timeout: this.dataComponent.getMessageDisplayTime(),\n })\n .catch((error) => {\n this.loggingComponent.error(error);\n });\n }\n\n /**\n * Deletes a message instantly.\n *\n * @param {Message} message the Discord message to delete\n */\n deleteMsgNow(message) {\n if (\n message &&\n !message.deleted &&\n message.deletable &&\n message.channel.type !== \"dm\"\n )\n message.delete().catch((error) => {\n this.loggingComponent.error(error);\n });\n }\n\n /**\n * Replies to the user that no arguments was provided, but\n * it was necessary, with the proper command ussage.\n *\n * @param {Message} message the Discord message which has the command\n * @param {Command} command the used Command\n */\n replyDidntProvideCommandArgs(message, command) {\n let embedMessage = this.buildBaseEmbed();\n this.addFooterToEmbed(message, embedMessage);\n\n embedMessage.setTitle(\"You didn't provide any arguments!\");\n\n if (command.usage) {\n embedMessage.addField(\n \"The proper usage would be:\",\n `\\`\\`\\`${this.dataComponent.getPrefix()}${command.name} ${\n command.usage\n }\\`\\`\\``,\n true\n );\n }\n\n message.channel\n .send(embedMessage)\n .then((msg) => this.deleteMsgTimeout(msg));\n this.deleteMsgNow(message);\n }\n\n /**\n * Return a base embed\n *\n * @returns {MessageEmbed} a base embedded message\n */\n buildBaseEmbed() {\n return new MessageEmbed().setColor(0x61b15a);\n }\n\n /**\n * Add's a simple footer to the embed message\n *\n * @param {MessageEmbed} message the message to be edited\n * @param {MessageEmbed} embedMessage the edited embed message\n */\n addFooterToEmbed(message, embedMessage) {\n if (message.channel.type !== \"dm\")\n embedMessage.setFooter(\n `${message.member.displayName}`,\n message.author.displayAvatarURL()\n );\n else\n embedMessage.setFooter(\n `${message.author.username}`,\n message.author.displayAvatarURL()\n );\n }\n\n /**\n * Reply's all commands to the user\n *\n * @param {Message} message the Discord message which requested all commands\n * @param {Elia} elia the Elia object which got the request\n */\n helpSendAllCommands(message, elia) {\n let embedMessage = this.buildBaseEmbed();\n this.addFooterToEmbed(message, embedMessage);\n embedMessage.setTitle(\"Here's a list of all my commands:\");\n embedMessage.setThumbnail(this.bot.user.displayAvatarURL());\n\n let musicCommandsList = [];\n let soundEffectCommandsList = [];\n let utilityCommandsList = [];\n let otherCommandsList = [];\n\n elia.commandMap.forEach((command) => {\n switch (command.type) {\n case CommandTypeEnum.MUSIC:\n musicCommandsList.push(command.name);\n break;\n case CommandTypeEnum.SOUNDEFFECT:\n soundEffectCommandsList.push(command.name);\n break;\n case CommandTypeEnum.UTILITY:\n utilityCommandsList.push(command.name);\n break;\n case CommandTypeEnum.OTHER:\n otherCommandsList.push(command.name);\n break;\n }\n });\n\n embedMessage.addFields(\n {\n name: \"Music Commands\",\n value: musicCommandsList.join(\", \"),\n },\n {\n name: \"SoundEffect Commands\",\n value: soundEffectCommandsList.join(\", \"),\n },\n\n {\n name: \"Utility Commands\",\n value: utilityCommandsList.join(\", \"),\n },\n {\n name: \"Other Commands\",\n value: otherCommandsList.join(\", \"),\n },\n {\n name: \"Use the command below to get info on a specific command!\",\n value: `\\`\\`\\`${this.dataComponent.getPrefix()}help [command name]\\`\\`\\``,\n }\n );\n\n message.author\n .send(embedMessage)\n .then((msg) => {\n this.reply(message, \"I've sent you a DM with all my commands!\");\n this.deleteMsgNow(msg);\n this.deleteMsgTimeout(embedMessage);\n })\n .catch((error) => {\n this.loggingComponent.error(\n `Could not send help DM to ${message.author.tag}.\\n`,\n error\n );\n message.reply(\n \"it seems like I can't DM you! Do you have DMs disabled?\"\n );\n });\n }\n\n /**\n * Reply's a command use to the user\n *\n * @param {Message} message the Discord message which requested help for a command\n * @param {Command} command the command to display the usage\n */\n helpCommandUsage(message, command) {\n let embedMessage = this.buildBaseEmbed();\n this.addFooterToEmbed(message, embedMessage);\n\n embedMessage.setTitle(\"Here's the help for: \" + command.name);\n\n embedMessage.setThumbnail(this.bot.user.displayAvatarURL());\n\n embedMessage.addFields(\n {\n name: \"Description\",\n value: command.description,\n },\n {\n name: \"Usage:\",\n value: `\\`\\`\\`${this.dataComponent.getPrefix()}${\n command.name\n } ${command.usage}\\`\\`\\``,\n }\n );\n\n message.channel\n .send(embedMessage)\n .then((msg) => this.deleteMsgTimeout(msg));\n this.deleteMsgNow(message);\n }\n}\n\nmodule.exports = MessageComponent;\n"
},
{
"alpha_fraction": 0.4080846905708313,
"alphanum_fraction": 0.46102020144462585,
"avg_line_length": 27.86111068725586,
"blob_id": "c3167dfe5e7d4f610245f4d8a7e30983d299f883",
"content_id": "59bb51a9f3c31b3a3ada4a43ec9682a207b0d70e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JSON",
"length_bytes": 1039,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 36,
"path": "/package.json",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "{\n \"name\": \"elia\",\n \"version\": \"1.0.0\",\n \"description\": \"Experimental Listening Information Android\",\n \"main\": \"elia-bot.js\",\n \"scripts\": {\n \"start\": \"node elia-bot.js\",\n \"nyc\": \"nyc --reporter lcov --reporter text --report-dir coverage mocha --recursive test/\",\n \"test\": \"mocha --recursive test/unit/\"\n },\n \"author\": \"xShipi <[email protected]>\",\n \"repository\": {\n \"type\": \"git\",\n \"url\": \"https://github.com/xShipi/ELIA.git\"\n },\n \"license\": \"ISC\",\n \"dependencies\": {\n \"@discordjs/opus\": \"^0.3.3\",\n \"bufferutil\": \"^4.0.3\",\n \"discord.js\": \"^12.5.3\",\n \"libsodium-wrappers\": \"^0.7.9\",\n \"ms\": \"^2.1.3\",\n \"utf-8-validate\": \"^5.0.4\",\n \"yt-search\": \"2.8.0\",\n \"ytdl-core\": \"^4.8.0\",\n \"ytpl\": \"2.1.1\"\n },\n \"devDependencies\": {\n \"mocha\": \"^8.4.0\",\n \"nyc\": \"^15.1.0\",\n \"babel-eslint\": \"^10.1.0\",\n \"eslint\": \"^7.26.0\",\n \"eslint-plugin-jsdoc\": \"^34.6.2\",\n \"sinon\": \"^10.0.0\"\n }\n}\n"
},
{
"alpha_fraction": 0.5613079071044922,
"alphanum_fraction": 0.5613079071044922,
"avg_line_length": 20.275362014770508,
"blob_id": "23e543604bcd50e99507a3b124da1fc01f9d2742",
"content_id": "188cff00e8813c5ab1fa047ee0eb74446b87407f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1468,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 69,
"path": "/source/commands/Command.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const { Message } = require(\"discord.js\");\nconst Elia = require(\"../Elia\");\nconst CommandTypeEnum = require(\"./CommandTypeEnum\");\n\nclass Command {\n /**\n * The name of the command, this commes after the prefix\n *\n * @type {string}\n */\n name = \"\";\n\n /**\n * The description of the command\n *\n * @type {string}\n */\n description = \"\";\n\n /**\n * The usage of the command\n *\n * @type {string}\n */\n usage = \"\";\n\n /**\n * The type of the command\n *\n * @type {CommandTypeEnum}\n */\n type = CommandTypeEnum.DEFAULT;\n\n /**\n * Determines if the command can or cannot be used in DM's, default is true\n *\n * @type {boolean}\n */\n guildOnly = true;\n\n /**\n * Determines if the command needs aditional arguments, default is false\n *\n * @type {boolean}\n */\n hasArguments = false;\n\n /**\n * Determines if the the message.delete() funcition should be called after the execute function.\n * Default is yes.\n *\n * @type {boolean}\n */\n shouldDelete = true;\n\n /**\n * Execute the command\n *\n * @param {Message} message the Discord message object\n * @param {string[]} args the arguments for the commands\n * @param {Elia} elia the Elia object\n */\n // eslint-disable-next-line no-unused-vars\n async execute(message, args, elia) {\n // default command, this does nothing.\n }\n}\n\nmodule.exports = Command;\n"
},
{
"alpha_fraction": 0.7595325708389282,
"alphanum_fraction": 0.7595325708389282,
"avg_line_length": 30.269229888916016,
"blob_id": "4c59106dbd4a7730bd068f26417004f8cbda4174",
"content_id": "dfa60616a9a968047f33040375bcdb8c8702e761",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1626,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 52,
"path": "/elia-bot.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Discord = require(\"discord.js\");\nconst Elia = require(\"./source/Elia\");\nconst DataComponent = require(\"./source/components/core/DataComponent\");\nconst ActivityDisplayComponent = require(\"./source/components/core/ActivityDisplayComponent\");\nconst LoggingComponent = require(\"./source/components/core/LoggingComponent\");\nconst MessageComponent = require(\"./source/components/core/MessageComponent\");\nconst CommandComponent = require(\"./source/components/CommandComponent\");\nconst MusicComponent = require(\"./source/components/music/MusicComponent\");\nconst SoundEffectComponent = require(\"./source/components/SoundEffectComponent\");\n\nlet bot = new Discord.Client();\n\n// The DataComponent for ELIA\nlet dataComponent = new DataComponent();\n// The LoggingComponent for ELIA\nlet loggingComponent = new LoggingComponent();\n// The ActivityDisplayComponent for ELIA\nlet activityDisplayComponent = new ActivityDisplayComponent(bot, dataComponent);\n// The MessageComponent for ELIA\nlet messageComponent = new MessageComponent(\n bot,\n dataComponent,\n loggingComponent\n);\n\nlet EliaBot = new Elia(\n bot,\n dataComponent,\n loggingComponent,\n activityDisplayComponent,\n messageComponent\n);\n\n// Add function component's\nEliaBot.addComponent(new CommandComponent());\nEliaBot.addComponent(new MusicComponent());\nEliaBot.addComponent(new SoundEffectComponent());\n\nEliaBot.getAvaliableCommands();\n\n// on start\nbot.on(\"ready\", () => {\n EliaBot.onReady();\n});\n\n// setup message handling\nbot.on(\"message\", (message) => {\n EliaBot.onMessage(message);\n});\n\n// bot login\nbot.login(EliaBot.getToken()).then(() => null);\n"
},
{
"alpha_fraction": 0.7839721441268921,
"alphanum_fraction": 0.7944250702857971,
"avg_line_length": 34.9375,
"blob_id": "59b0406cadded04601a44cffe8fa3b02d1296dea",
"content_id": "563b7b4903ceef7e386ed18e0aee58fffb92157d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 16,
"path": "/doc/unit-testing.md",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "# Unit tesztelés\n\nA feladat során a Mocha.js és a Sinon.js JavaScript teszt keretrendszereket használtuk fel.\n\nA tesztek írása során használtuk ezen keretrendszerek mock, spy, stub és assert funkcióit.\nAz egységtesztek főként a forráskód zenelejátszásávaal kapcsolatos részeit tesztelik, valamint a bot URL címeket ellenőrző függvényét.\n\nA tesztek lefutattása a következő paranccsal lehetséges:\n\n``` npm run unit-test ```\n\nA tesztek futásának eredménye:\n\n\n\nA kapcsolódó Github issue: [#1](https://github.com/BME-MIT-IET/iet-hf2021-elia/issues/1)"
},
{
"alpha_fraction": 0.5775602459907532,
"alphanum_fraction": 0.5775602459907532,
"avg_line_length": 23.822429656982422,
"blob_id": "8b41a024344d3fd1d6b77a8d1ed6e241f8ffeb9a",
"content_id": "b75b0f082aee5d957ef8465db05ad752006ef8b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2656,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 107,
"path": "/source/components/core/DataComponent.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const DataSource = require(\"../../../config.json\");\n/**\n * Component for handling the data in the config.json file.\n */\nclass DataComponent {\n /**\n * Get's the Discord token\n *\n * @returns {string} the token\n */\n getToken() {\n return DataSource.token;\n }\n\n /**\n * Get's the message display time\n *\n * @returns {number} the display time in millisecs\n */\n getMessageDisplayTime() {\n return parseInt(DataSource.messageDisplayTime);\n }\n\n /**\n * Get's the bot's mode\n *\n * @returns {boolean} true if the bot is in development mode, else false\n */\n getDevMode() {\n return DataSource.devMode;\n }\n\n /**\n * Get's the bot's radio mode\n *\n * @returns {boolean} true if the bot is in radio mode, else false\n */\n getRadioMode() {\n return DataSource.radioMode;\n }\n\n /**\n * Get's the radio channel for a specific server\n *\n * @param {number} serverId the server's id\n * @returns {?string} the radio channel's id if exist's else null\n */\n getRadioChannel(serverId) {\n return DataSource.servers.find((e) => e.id == serverId).radioChannelID;\n }\n\n /**\n * Get's the bot's preifx\n *\n * @returns {string} the prefix\n */\n getPrefix() {\n return DataSource.prefix;\n }\n\n /**\n * Get's the default music volume for a specific server\n *\n * @param {number} serverId the server's id\n * @returns {string} the server volume\n */\n getMusicVolume(serverId) {\n if (!serverId) return DataSource.defaultMusicVolume;\n else\n return DataSource.servers.find((e) => e.id == serverId).musicVolume;\n }\n\n /**\n * Get's the bot spam channel id for a specific server\n *\n * @param {number} serverId the server's id\n * @returns {string} the channel's id\n */\n getBotSpamChannelId(serverId) {\n return DataSource.servers.find((e) => e.id == serverId)\n .botSpamChannelID;\n }\n\n /**\n * Get's the meme channel id for a specific server\n *\n * @param {number} serverId the server's id\n * @returns {string} the channel's id\n */\n getMemeChannelId(serverId) {\n return DataSource.servers.find((e) => e.id == serverId)\n .memeTextChannelID;\n }\n\n /**\n * Get's the pin channel id for a specific server\n *\n * @param {number} serverId the server's id\n * @returns {string} the channel's id\n */\n getPinChannelId(serverId) {\n return DataSource.servers.find((e) => e.id == serverId)\n .pinTextChannelID;\n }\n}\n\nmodule.exports = DataComponent;\n"
},
{
"alpha_fraction": 0.5768101811408997,
"alphanum_fraction": 0.5768101811408997,
"avg_line_length": 33.06666564941406,
"blob_id": "97a15f781a5eef6c85208202ae66a9e125a04da0",
"content_id": "26378d8dfb57d20b890d01c3ff4adb0bd2117d54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2044,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 60,
"path": "/test/unit/musicqueue/pauseMusic.test.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "/* eslint-disable no-undef */\nconst assert = require(\"assert\");\nconst sinon = require(\"sinon\");\nconst { VoiceChannel } = require(\"discord.js\");\nconst Elia = require(\"../../../source/Elia.js\");\nconst MusicQueue = require(\"../../../source/components/music/MusicQueue\");\n\nvar voiceChannel = VoiceChannel;\nvar mockMessageComponent;\nvar spyReply;\nvar mockDispatcher;\nvar voiceConnection;\nvar spyPause;\nvar mockLoggingComponent;\nvar spyLog;\nvar mockUser;\nvar mockMessage;\nvar musicQueue;\n\ndescribe(\"MusicQueue\", function () {\n describe(\"#pauseMusic()\", function () {\n beforeEach(function () {\n mockMessageComponent = { reply: function () {} };\n spyReply = sinon.spy(mockMessageComponent, \"reply\");\n mockDispatcher = { pause: function () {} };\n voiceConnection = { dispatcher: mockDispatcher };\n spyPause = sinon.spy(mockDispatcher, \"pause\");\n mockLoggingComponent = { log: function () {} };\n spyLog = sinon.spy(mockLoggingComponent, \"log\");\n mockUser = { username: String };\n mockMessage = { author: mockUser };\n musicQueue = new MusicQueue(\n new Elia(\n null,\n null,\n mockLoggingComponent,\n null,\n mockMessageComponent\n )\n );\n });\n\n it(\"musicQueue.isPaused was true\", function () {\n musicQueue.isPaused = true;\n musicQueue.pauseMusic(mockMessage);\n assert(spyReply.calledOnce);\n });\n\n it(\"musicQueue.isPaused was false\", function () {\n musicQueue.isPaused = false;\n musicQueue.voiceChannel = voiceChannel;\n musicQueue.connection = voiceConnection;\n musicQueue.pauseMusic(mockMessage);\n assert(spyPause.calledOnce);\n assert(spyReply.calledOnce);\n assert(spyLog.calledOnce);\n assert.strictEqual(musicQueue.isPaused, true);\n });\n });\n});\n"
},
{
"alpha_fraction": 0.8026461005210876,
"alphanum_fraction": 0.8103638291358948,
"avg_line_length": 99.88888549804688,
"blob_id": "0c5cc5090f8756bca763bcba70ac459866e03436",
"content_id": "4026cbe7a71a94ae2853d970892362f20a7913af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 964,
"license_type": "no_license",
"max_line_length": 436,
"num_lines": 9,
"path": "/doc/static-code-analysis.md",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "# Statikus kód ellenőrzés\n\nA feladatat során lefuttattam a SonarCloud-ot és a kimenetek alapján kezdetem el javítani a kód minőséségén.\n\nA code smellekből nem vol sok, ezért mindet javíttottam. SonarCloud jó irányokat mutat, bár ha komolyabb refaktorálásról van szó, akkor nem sokat segít, viszont a refakorálás után újból lefutattható, addig ameddig nem vesz észre code smell-t.\n\n1 security hibát jelzett a SonarCloud, ezt nem javítottam ki, erre nem volt szükség ebben az esetben: A math.random() nem elég erős algoritmus kriptográfiailag. Ebben az esetben ez megengedhető, hiszen nem biztonság kritikus alkalmazásról van szó, és a randomizálás egy tömb elemeit kavarja össze, ezen tömb viszont publikusan lekérhető egy parancsal mindig. Ezen két ok miatt döntöttem úgy hogy ebben az esetben ezt nem kell javítani.\n\nA kapcsolódó Github issue: [#6](https://github.com/BME-MIT-IET/iet-hf2021-elia/issues/6)"
},
{
"alpha_fraction": 0.6465798020362854,
"alphanum_fraction": 0.6465798020362854,
"avg_line_length": 31.3157901763916,
"blob_id": "2695feeb5d0980f7134ab5838a4774686cf6a289",
"content_id": "49bb450712a6352d356380e6b63283675289b6bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 614,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/source/commands/voice/music/SkipSongCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../../Command\");\nconst CommandTypeEnum = require(\"../../CommandTypeEnum\");\n\nclass SkipSongCommand extends Command {\n name = \"skip\";\n description = \"Skip a song\";\n usage = \"\";\n type = CommandTypeEnum.MUSIC;\n async execute(message, _args, elia) {\n if (\n elia.dataComponent.getRadioMode() ||\n (elia.musicComponent.messageSenderInVoiceChannel(message) &&\n elia.musicComponent.messageSenderHasRightPermissions(message))\n )\n elia.musicComponent.musicQueue.skipSong(message);\n }\n}\n\nmodule.exports = SkipSongCommand;\n"
},
{
"alpha_fraction": 0.6192893385887146,
"alphanum_fraction": 0.624365508556366,
"avg_line_length": 37.30555725097656,
"blob_id": "e2f36b9d05e6e1ac675408ef48a7be4f6322e470",
"content_id": "c0ba2e5a64d9802b6d5875fc9f5ad1200e0f3b94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1379,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 36,
"path": "/test/unit/musicqueue/playMusic.test.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "/* eslint-disable no-undef */\nvar assert = require(\"assert\");\nconst sinon = require(\"sinon\");\nconst { Message } = require(\"discord.js\");\nconst Elia = require(\"../../../source/Elia.js\");\nconst MusicQueue = require(\"../../../source/components/music/MusicQueue\");\n\nvar musicQueue;\nvar message;\nvar mockVoiceChannel;\nvar spyCacheYoutubeTitle;\nvar spyJoin;\n\ndescribe(\"MusicQueue\", function () {\n describe(\"#playMusic()\", function () {\n before(function () {\n musicQueue = new MusicQueue(Elia);\n message = new Message();\n mockVoiceChannel = { id: String, join: function () {} };\n spyCacheYoutubeTitle = sinon.spy(musicQueue, \"cacheYouTubeTitle\");\n spyJoin = sinon.spy(mockVoiceChannel, \"join\");\n });\n\n it(\"add values and called once voiceChannel.join() function and call once cacheYouTubeTitle function\", function () {\n musicQueue.playMusic(\n message,\n mockVoiceChannel,\n \"https://www.youtube.com/watch?v=NYeLG0wG--k&list=RDGMEMTmC-2iNKH_l8gQ1LHo9FeQVMNYeLG0wG--k&start_radio=1\"\n );\n assert.strictEqual(musicQueue.isPlayingMusic, true);\n assert.strictEqual(musicQueue.voiceChannel, mockVoiceChannel);\n assert(spyJoin.calledOnce);\n assert(spyCacheYoutubeTitle.calledOnce);\n });\n });\n});\n"
},
{
"alpha_fraction": 0.5607187151908875,
"alphanum_fraction": 0.5607187151908875,
"avg_line_length": 31.93877601623535,
"blob_id": "8fbef2f69b07968ced957dbb82e9238d0dfce819",
"content_id": "1d821aa6a26b1817a1efeb4e3e096ada211d2f79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1614,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 49,
"path": "/test/unit/musicqueue/skipSong.test.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "/* eslint-disable no-undef */\nconst assert = require(\"assert\");\nconst sinon = require(\"sinon\");\nconst MusicQueue = require(\"../../../source/components/music/MusicQueue\");\nconst Elia = require(\"../../../source/Elia\");\n\nvar mockLoggingComponent;\nvar spyLog;\nvar mockMessageComponent;\nvar spyReply;\nvar mockUser;\nvar mockMessage;\nvar musicQueue;\nvar spyContinuePlayingMusic;\n\ndescribe(\"MusicQueue\", function () {\n describe(\"#skipSong()\", function () {\n before(function () {\n mockLoggingComponent = { log: function () {} };\n spyLog = sinon.spy(mockLoggingComponent, \"log\");\n mockMessageComponent = { reply: function () {} };\n spyReply = sinon.spy(mockMessageComponent, \"reply\");\n mockUser = { username: String };\n mockMessage = { author: mockUser };\n musicQueue = new MusicQueue(\n new Elia(\n null,\n null,\n mockLoggingComponent,\n null,\n mockMessageComponent\n )\n );\n spyContinuePlayingMusic = sinon.spy(\n musicQueue,\n \"continuePlayingMusic\"\n );\n });\n\n it(\"reply, log then continouePlayingMusic\", function () {\n musicQueue.skipSong(mockMessage);\n assert(spyLog.calledAfter(spyReply));\n assert(spyContinuePlayingMusic.calledAfter(spyLog));\n assert(spyLog.calledOnce);\n assert(spyReply.calledOnce);\n assert(spyContinuePlayingMusic.calledOnce);\n });\n });\n});\n"
},
{
"alpha_fraction": 0.6862687468528748,
"alphanum_fraction": 0.6953837275505066,
"avg_line_length": 38.09195327758789,
"blob_id": "0b10e2d1639e109343e778210bceae5060decaff",
"content_id": "d7fe4ed395225f675eec92dc2bf32da68c4608f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3401,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 87,
"path": "/test/performance/main.py",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport sys\nfrom datetime import datetime, timedelta\nimport discord\nfrom discord.member import VoiceState\nfrom distest import TestCollector\nfrom distest import run_dtest_bot\nfrom discord import Embed, Member, Status\nfrom distest.TestInterface import TestInterface\n\ntest_collector = TestCollector()\n\nperformance_output: str = \"\"\n\ndef get_base_embed(embed_title):\n base_embed = (\n Embed(\n title=embed_title,\n color=6402394,\n type=\"rich\",\n )\n .set_footer(\n icon_url=\"https://cdn.discordapp.com/embed/avatars/1.png\",\n text=\"ELIA - TESTER\"\n )\n )\n return base_embed\n\ndef add_time_to_output(message: str, time_taken: timedelta):\n global performance_output\n performance_output += \"{}\\n\\tTime taken:\\t\\t\\t{}\\n\\n\".format(message, time_taken.total_seconds())\n\n@test_collector()\nasync def ping_time(interface: TestInterface):\n start_time: datetime = datetime.now()\n for i in range(0, 10):\n await interface.assert_reply_embed_regex(\"+ping\", {\"title\": \"Pong!\"})\n add_time_to_output(\"Sent 10 ping requests\", datetime.now() - start_time)\n\n@test_collector()\nasync def meme_time(interface: TestInterface):\n start_time: datetime = datetime.now()\n for i in range(0, 10):\n await interface.assert_reply_has_image(\"+meme https://i.redd.it/7ptrlc47tuc51.jpg\")\n global meme_time_taken\n add_time_to_output(\"Sent 10 memes\", datetime.now() - start_time)\n\n@test_collector()\nasync def pin_time(interface: TestInterface):\n start_time: datetime = datetime.now()\n for i in range(0, 10):\n await interface.assert_reply_contains(\"+pin pinned\", \"pinned\")\n global pin_time_taken\n add_time_to_output(\"Sent 10 pin requests\", datetime.now() - start_time)\n\n@test_collector()\nasync def voice_connect_disconnect_time(interface: TestInterface):\n await interface.send_message(\"+play https://youtube.com/watch?v=jHkxauiiEWs\")\n connect_start_time: datetime = datetime.now()\n await interface.wait_for_event(\"voice_state_update\", lambda m, before, after: before.channel == None and after.channel != None)\n global voice_connect_time_taken\n voice_connect_time_taken = datetime.now() - connect_start_time\n add_time_to_output(\"Sent a play request\", datetime.now() - connect_start_time)\n\n await asyncio.sleep(3)\n \n await interface.send_message(\"+leave\")\n disconnect_start_time: datetime = datetime.now()\n await interface.wait_for_event(\"voice_state_update\", lambda m, before, after: before.channel != None and after.channel == None)\n global voice_disconnect_time_taken\n add_time_to_output(\"Sent a leave request\", datetime.now() - disconnect_start_time)\n\n@test_collector()\nasync def play_start_time(interface: TestInterface):\n play_embed = get_base_embed(\":musical_note: Now Playing ***https://youtube.com/watch?v=jHkxauiiEWs***\")\n\n start_time: datetime = datetime.now()\n await interface.assert_reply_embed_equals(\"+play https://youtube.com/watch?v=jHkxauiiEWs\", play_embed)\n add_time_to_output(\"Requested a song, starting time\", datetime.now() - start_time)\n await interface.send_message(\"+leave\")\n\n@test_collector()\nasync def write_performance_metrics(interface: TestInterface):\n await interface.send_message(\"```{}```\".format(performance_output))\n\nif __name__ == \"__main__\":\n run_dtest_bot(sys.argv, test_collector, timeout=10)\n"
},
{
"alpha_fraction": 0.6626139879226685,
"alphanum_fraction": 0.6626139879226685,
"avg_line_length": 33.6315803527832,
"blob_id": "14adb5290302a0b698c1af839035077ed1a2b77a",
"content_id": "b145e07d086a2a1def3d7ff83d5edeae4c316ac0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 658,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/source/commands/voice/music/ShuffleQueueCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../../Command\");\nconst CommandTypeEnum = require(\"../../CommandTypeEnum\");\n\nclass ShuffleQueueCommand extends Command {\n name = \"shuffle\";\n description = \"Shuffles the music queue in a random way\";\n usage = \"\";\n type = CommandTypeEnum.MUSIC;\n async execute(message, _args, elia) {\n if (\n elia.dataComponent.getRadioMode() ||\n (elia.musicComponent.messageSenderInVoiceChannel(message) &&\n elia.musicComponent.messageSenderHasRightPermissions(message))\n )\n elia.musicComponent.musicQueue.shuffleMusic(message);\n }\n}\n\nmodule.exports = ShuffleQueueCommand;\n"
},
{
"alpha_fraction": 0.8083003759384155,
"alphanum_fraction": 0.8142292499542236,
"avg_line_length": 62.3125,
"blob_id": "076d37c32eb0f7376124f3f467cd144c4606408a",
"content_id": "ebaf1b70c7a38dc59e0fd780c19a17206db5e71e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1070,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 16,
"path": "/doc/bdd-testing.md",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "# Bdd tesztelés\n\nA feladat során a Distest keretrendszerrel valósítottuk meg a teszteket. Ez egy Python-ban írt keretrendszer, amely kifejezetten a Discord bot-ok tesztelésére lett létrehozva.\n\nA tesztelés kezdetén Corde-val kezdtünk el dolgozni, viszont ebben a Javascript alapú keretrendszerben nem lehetett várakozni a kiadott parancsok között.\nTanulmányozva ennek a forráskódját, rájöttünk, hogy rosszul van implementálva a tesztek futtatása, ezért váltottunk Distest-re. A várakozás azért szükséges a tesztekhez, hogy voice channel-hez tartozó funkcionalitást tesztelni lehessen.\n\nA bot legtöbb funkciója a zenelejátszáshoz kapcsolódik, ezért a tesztek során erre helyeztük a hangsúlyt.\n\nA tesztek futtatásához szükséges egy Discord szerver és két bot hozzáadása a szerverhez. Ezután az összes teszt lefuttatása a következő paranccsal lehetséges:\n\n```\npython main.py <bot_id> <tester_token> -c <channel_id> -r all\n```\n\nA kapcsolódó Github issue: [#5](https://github.com/BME-MIT-IET/iet-hf2021-elia/issues/5)"
},
{
"alpha_fraction": 0.5023364424705505,
"alphanum_fraction": 0.5064252614974976,
"avg_line_length": 34.66666793823242,
"blob_id": "ea8a8700fe10eedc41a58dd6c0743611961f57fb",
"content_id": "bfe5d123d9ef28cb585a308d6f9538c783b4ae8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1712,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 48,
"path": "/source/commands/voice/music/QueueSongCommand.js",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "const Command = require(\"../../Command\");\nconst CommandTypeEnum = require(\"../../CommandTypeEnum\");\nconst ytSearch = require(\"yt-search\");\nconst validURL = require(\"../../../components/music/UrlChecker.js\");\n\nclass QueueSongCommand extends Command {\n name = \"queue\";\n description =\n \"Queue a video from youtube, if no music plays starts playing it.\";\n usage =\n \" *required:* <Youtube link> *or search terms:* <term1> <term2> <term3> ...\";\n hasArguments = true;\n type = CommandTypeEnum.MUSIC;\n async execute(message, args, elia) {\n if (\n elia.dataComponent.getRadioMode() ||\n (elia.musicComponent.messageSenderInVoiceChannel(message) &&\n elia.musicComponent.messageSenderHasRightPermissions(message))\n ) {\n if (validURL(args[0])) {\n elia.musicComponent.musicQueue.queueMusic(message, args[0]);\n } else {\n const videoFinder = async (query) => {\n const videoResult = await ytSearch(query);\n return videoResult.videos.length > 1\n ? videoResult.videos[0]\n : null;\n };\n\n const video = await videoFinder(args.join(\" \"));\n\n if (video) {\n elia.musicComponent.musicQueue.queueMusic(\n message,\n video.url\n );\n } else {\n elia.messageComponent.reply(\n message,\n \"No video results found.\"\n );\n }\n }\n }\n }\n}\n\nmodule.exports = QueueSongCommand;\n"
},
{
"alpha_fraction": 0.6885531544685364,
"alphanum_fraction": 0.8140615820884705,
"avg_line_length": 70.75,
"blob_id": "d284438abbd4745f66a64f9b16c9498614790b29",
"content_id": "913d1f4cb13a1866c9b5ba50fd8f5b051b71e962",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1759,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 24,
"path": "/doc/ci.md",
"repo_name": "BME-MIT-IET/iet-hf2021-elia",
"src_encoding": "UTF-8",
"text": "# Build keretrendszer + CI beüzemelése\n\nA feladat célja a CI továbbfejlesztése volt az automatikus SonarCloud-on túl.\n\nA SonarCloud-ot automatikus futás helyett csak GitHub Actions futtatja le.\n\nEzen felül az Actions Node v12, v14, v15 verziókkal builedeli a projektet.\n\nA tesztek futtatása során a code coverage-et is ellenőrzi, ami a pull requestben is megjelenik.\n\nVégül ESLint (babel parser-t használva) segítségével a kód stílust is ellenőrzi. Ebben az ellenőrzésbe a JSDoc kommentek is beletartoznak. \n\nSajnos a feladat elején, a lokális CI branch-et valamért az origin/master-nek vette, és így pár az ehhez a feladathoz kapcsolodó commit oda került, nem látható a pull requestben.\n\nA feladathoz tartozó hiányzó commitok:\n- [Add main workflow](https://github.com/BME-MIT-IET/iet-hf2021-elia/commit/85ee9873bd13162bf2becf82edd0aa831aebe6ea)\n- [Added package-lock.json for CI](https://github.com/BME-MIT-IET/iet-hf2021-elia/commit/3f475ad5d0002045089bfba7ac44325f26c61441)\n- [Removed Node v10 from CI](https://github.com/BME-MIT-IET/iet-hf2021-elia/commit/82603065d6672c60b45ffef8ea9b60645c233d6e)\n- [Added properties file for SonarCloud](https://github.com/BME-MIT-IET/iet-hf2021-elia/commit/eb8068c1c1d76030df1376f36afe13ed6de117c3)\n- [Formatted main workflow](https://github.com/BME-MIT-IET/iet-hf2021-elia/commit/4e123a1261a75e3a4e2ea96ec0fb9a1c265f8df0)\n- [Updated package.json for testing](https://github.com/BME-MIT-IET/iet-hf2021-elia/commit/e8f39fb47f09e50538d4a1e142719943faa0795c)\n- [Separated workflow, added Linting](https://github.com/BME-MIT-IET/iet-hf2021-elia/commit/cdd92671a5d2d6153fa8bcdd6cdf9b6150792123)\n\nA kapcsolódó Github issue: [#17](https://github.com/BME-MIT-IET/iet-hf2021-elia/issues/17)"
}
] | 36 |
Adil7777/Cats_and_Dogs_identification
|
https://github.com/Adil7777/Cats_and_Dogs_identification
|
7d28105d6e5e5a2e645a3f31ee9fd9bee6782b6c
|
66611b43c0c19cf6fa5843e5d02913609521ca3a
|
88bc7abccfd3c6d300fdc248942396894435b580
|
refs/heads/master
| 2022-11-27T20:30:00.050343 | 2020-08-11T07:58:10 | 2020-08-11T07:58:10 | 286,682,020 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6418826580047607,
"alphanum_fraction": 0.6657571792602539,
"avg_line_length": 30.869565963745117,
"blob_id": "36f589f9396f7ba1bbd726131096794566279f9c",
"content_id": "7c5688d8e1a48c9378c38ec7ed246dd27c54a494",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1466,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 46,
"path": "/main.py",
"repo_name": "Adil7777/Cats_and_Dogs_identification",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nfrom tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout\nimport matplotlib.pyplot as plt\nfrom google.colab import files\n\n\ntrain, _ = tfds.load('cats_vs_dogs', split=['train[:100%]'], with_info=True, as_supervised=True)\n\n\nSIZE = (224, 224)\ndef resize_image(img, label):\n img = tf.cast(img, tf.float32)\n img = tf.image.resize(img, SIZE)\n img /= 255.0\n return img, label\n \ntrain_resized = train[0].map(resize_image)\ntrain_batches = train_resized.shuffle(1000).batch(16)\n\nbase_layers = tf.keras.applications.MobileNetV2(input_shape=(SIZE[0], SIZE[1], 3), include_top=False)\n\nmodel = tf.keras.Sequential([\n base_layers,\n GlobalAveragePooling2D(),\n Dropout(0.2),\n Dense(1)\n])\nmodel.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy'])\n\nmodel.fit(train_batches, epochs=1)\n\nimages = []\n\nfor i in images:\n img = load_img(i)\n img_array = img_to_array(img)\n img_resized, _ = resize_image(img_array, _)\n img_expended = np.expand_dims(img_resized, axis=0)\n prediction = model.predict(img_expended)\n pred_label = 'CAT' if prediction < 0.5 else 'DOG'\n plt.figure()\n plt.imshow(img)\n plt.title('{} {}'.format(pred_label, prediction))\n"
}
] | 1 |
awi29/i-voting
|
https://github.com/awi29/i-voting
|
033e6fe5512039d11be3466fdf3d39bdc9095e9d
|
3e0e6b04fb23ea20a29e9992b3af953cfa3ed3c7
|
f92f46ad4fabb6bd7f0007c77624b0449a3f8617
|
refs/heads/master
| 2021-01-01T04:07:03.591971 | 2017-04-02T18:35:46 | 2017-04-02T18:35:46 | 56,266,716 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6732636094093323,
"alphanum_fraction": 0.6803227066993713,
"avg_line_length": 33.33766174316406,
"blob_id": "78e65e19f909448ad2be86fc686dd808607418df",
"content_id": "a7f5d8cd1032fd62d6a50b3e7cae1526eae23dbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7933,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 231,
"path": "/castvote/views.py",
"repo_name": "awi29/i-voting",
"src_encoding": "UTF-8",
"text": "import re\nfrom django.shortcuts import render\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.context_processors import csrf\nfrom accounts.models import Student\nfrom accounts.models import Candidate\nfrom django.contrib import messages\n\n\nencryptedkey = ['104','421','251','340','874','982','124','719','364','284','412']\n\n\n# @login_required\n# def voting(request):\n# \tif request.user.is_authenticated():\n# \t\tusername = request.user.username\n# \t\tstudent = Student.objects.get(username=username)\n# \t\tyear = student.year\n# \t\tstudents = Student.objecs.filter(iscandidate=True, year=year).order_by('firstname')\n# \t# POSTS = ['general','cultural','technical','sports','environmental','mess','maintenance']\n# \t\tposts = ['General Secretary','Cultural Secretary'\n# \t\t\t\t 'Technical Secretary', 'Sports Secretary'\n# \t\t\t\t 'Environmental Secretary', 'Mess Secretary',\n# \t\t\t\t 'Maintenance Secretary', 'Literary Secretary'\n# \t\t\t]\n\n# \t\tcontext = {\n# \t\t\t'general_sec' : [],\n# \t\t\t'cultural_sec': [],\n# \t\t\t'technical_sec': [],\n# \t\t\t'sports_sec' : [],\n# \t\t\t'environmental_sec' : [],\n# \t\t\t'mess_sec' : [],\n# \t\t\t'maintenance_sec' : [],\n# \t\t\t'unchosen' : []\n# \t\t}\n\n# \t\tfor student in students:\n# \t\t\tcandidate = Candidate.objects.get(student=student)\n# \t\t\tpost = candidate.postname\n# \t\t\ti = 0\n# \t\t\twhile i < 8:\n# \t\t\t\tif post == posts[i]:\n# \t\t\t\t\tbreak\n\n# \t\t\tif i==0 :\n# \t\t\t\tcontext['general_sec'].append(candidate)\n# \t\t\telif i==1 :\n# \t\t\t\tcontext['cultural_sec'].append(candidate)\n# \t\t\telif i==2 :\n# \t\t\t\tcontext['technical_sec'].append(candidate)\n# \t\t\telif i==3 :\n# \t\t\t\tcontext['sports_sec'].append(candidate)\n# \t\t\telif i==4 :\n# \t\t\t\tcontext['environmental_sec'].append(candidate)\n# \t\t\telif i==5 :\n# \t\t\t\tcontext['mess_sec'].append(candidate)\n# \t\t\telif i==6 :\n# \t\t\t\tcontext['maintenance_sec'].append(candidate)\n# \t\t\telse :\n# \t\t\t\tcontext['unchosen'].append(candidate)\n\n\n# \t\treturn render(request,'castvote/votingform.html',context)\n\n\n\n@login_required\ndef voting(request):\n\tcontext = {}\n\tif request.user.is_authenticated():\n\t\tusername = request.user.username\n\t\tstudent = Student.objects.get(username=username)\n\t\tyear = student.year\n\t\t\n\t\tcontext['encryptedkey'] = encryptedkey\n\t\tcontext['general_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True,postname='General Secretary').order_by('student__username')\n\t\tcontext['cultural_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True,postname='Cultural Secretary').order_by('student__username')\n\t\tcontext['technical_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True,postname='Technical Secretary').order_by('student__username')\n\t\tcontext['sports_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True,postname='Sports Secretary').order_by('student__username')\n\t\tcontext['environmental_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True, postname='Environmental Secretary').order_by('student__username')\n\t\tcontext['mess_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True,postname='Mess Secretary').order_by('student__username')\n\t\tcontext['maintenance_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True,postname='Maintenance Secretary').order_by('student__username')\n\t\tcontext['literary_sec'] = Candidate.objects.filter(student__year=year,student__iscandidate=True,postname='Literary Secretary').order_by('student__username')\t\n\n\n\t\t# for index,candidate in enumerate(context['general_sec']):\n\t\t# \tcandidate1 = [encryptedkey[index],candidate]\n\n\n\t\t# for candidate in context['general_sec']:\n\t\t# \tprint candidate[0],candidate[1]\n\n\t\treturn render(request,'castvote/castvote.html',context)\n\n\n\n\ndef storevote(request):\n\terrors = []\n\tcontext = {}\n\tif request.user.is_authenticated():\n\t\tusername = request.user.username\n\t\ttry:\n\t\t\tstudent = Student.objects.get(username=username)\n\t\texcept Student.DoesNotExist:\n\t\t\treturn HttpResponse(\"<h3>No student with username - \" ,username, \"exists in database</h3>\")\n\n\t\tif request.method == \"POST\":\n\t\t\ttry:\n\t\t\t\tgeneral \t= request.POST.get('general','')\n\t\t\t\tcultural \t= request.POST.get('cultural','')\n\t\t\t\ttechnical \t= request.POST.get('technical','')\n\t\t\t\tsports \t\t= request.POST.get('sports','')\n\t\t\t\tenvironmental = request.POST.get('environmental','')\n\t\t\t\tmess \t\t= request.POST.get('mess','')\n\t\t\t\tmaintenance = request.POST.get('maintenance','')\n\t\t\t\tliterary \t= request.POST.get('literary','')\n\n\t\t\t\t# print \"\\n\\n\"\n\t\t\t\t# print \"general = \", general\n\t\t\t\t# print \"cultural = \", cultural\n\t\t\t\t# print \"technical = \", technical\n\t\t\t\t# print \"sports = \",sports\n\t\t\t\t# print \"environmental = \", environmental\n\t\t\t\t# print \"mess = \",mess\n\t\t\t\t# print \"maintenance = \", maintenance\n\t\t\t\t# print \"literary = \",literary\n\t\t\t\t# print \"\\n\\n\"\n\n\t\t\t\tif is_number(general):\n\t\t\t\t\tif general not in encryptedkey:\n\t\t\t\t\t\tcontext['error_general'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_general'] = '*Please Select Candidate for general secretary'\n\n\t\t\t\tif is_number(cultural):\n\t\t\t\t\tif cultural not in encryptedkey:\n\t\t\t\t\t\tcontext['error_cultural'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_cultural'] = '*Please Select Candidate for cultural secretary'\n\n\n\t\t\t\tif is_number(technical):\n\t\t\t\t\tif technical not in encryptedkey:\n\t\t\t\t\t\tcontext['error_technical'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_technical'] = '*Please Select Candidate for technical secretary'\n\n\t\t\t\tif is_number(sports):\n\t\t\t\t\tif sports not in encryptedkey:\n\t\t\t\t\t\tcontext['error_sports'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_sports'] = '*Please Select Candidate for sports secretary'\n\n\n\t\t\t\tif is_number(environmental):\n\t\t\t\t\tif environmental not in encryptedkey:\n\t\t\t\t\t\tcontext['error_environmental'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_environmental'] = '*Please Select Candidate for environmental secretary'\n\n\n\t\t\t\tif is_number(mess):\n\t\t\t\t\tif mess not in encryptedkey:\n\t\t\t\t\t\tcontext['error_mess'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_mess'] = '*Please Select Candidate for mess secretary'\n\n\t\t\t\t\n\t\t\t\tif is_number(maintenance):\n\t\t\t\t\tif maintenance not in encryptedkey:\n\t\t\t\t\t\tcontext['error_maintenance'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_maintenance'] = '*Please Select Candidate for maintenance secretary'\n\n\n\t\t\t\tif is_number(literary):\n\t\t\t\t\tif literary not in encryptedkey:\n\t\t\t\t\t\tcontext['error_literary'] = '*Dont Change value in the form'\n\t\t\t\telse:\n\t\t\t\t\tcontext['error_literary'] = '*Please Select Candidate for literary secretary'\n\n\n\t\t\t\treturn HttpResponse(\"Vote Received\")\n\t\t\texcept ValueError:\n\t\t\t\treturn HttpResponse(\"<h3>Please Select candidate for every post</h3>\") \n\n\t\telse :\n\t\t\treturn HttpResponse(\"<h3> Method of submission should be <strong>POST</strong></h3>\")\n\n\telse :\n\t\treturn HttpResponse(\"<h3>You are not authenticated to vote</h3>\")\n\n\n\n\ndef is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\n\n\ndef printga():\n\tprint \"\\n\\n\"\n\tprint \"general = \", general\n\tprint \"cultural = \", cultural\n\tprint \"technical = \", technical\n\tprint \"sports = \",sports\n\tprint \"environmental = \", environmental\n\tprint \"mess = \",mess\n\tprint \"maintenance = \", maintenance\n\tprint \"literary = \",literary\n\tprint \"\\n\\n\"\n\n\t# general \t= int(request.POST.get('general',''))\n\t# cultural \t= int(request.POST.get('cultural',''))\n\t# technical \t= int(request.POST.get('technical',''))\n\t# sports \t\t= int(request.POST.get('sports',''))\n\t# environmental = int(request.POST.get('environmental',''))\n\t# mess \t\t= int(request.POST.get('mess',''))\n\t# maintenance = int(request.POST.get('maintenance',''))\n\t# literary \t= int(request.POST.get('literary',''))\n\n"
},
{
"alpha_fraction": 0.686170220375061,
"alphanum_fraction": 0.686170220375061,
"avg_line_length": 22.5,
"blob_id": "aac4cdebf0661b2e214e2dff4501c6137f8f8834",
"content_id": "8aaeacb74aadc7654a41bdf35fd87c926ab2c99f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 8,
"path": "/castvote/urls.py",
"repo_name": "awi29/i-voting",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\n\napp_name = 'castvote'\n\nurlpatterns = [\n\turl(r'^$','castvote.views.voting',name='voting'),\n\turl(r'^done/$','castvote.views.storevote', name='storevote')\n]\n"
},
{
"alpha_fraction": 0.8314606547355652,
"alphanum_fraction": 0.8314606547355652,
"avg_line_length": 88,
"blob_id": "eda5f355faae317ebf95342daaed13e6f84dec7b",
"content_id": "83aab49f45195e817e2315dc324975d127411fef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 178,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 2,
"path": "/README.md",
"repo_name": "awi29/i-voting",
"src_encoding": "UTF-8",
"text": "Developed an application which allows user to cast their ballots from any internet connected computer anywhere in the world.\nThey can verify their ballot after counting is done.\n"
}
] | 3 |
edcarlosneves/regristra-notas
|
https://github.com/edcarlosneves/regristra-notas
|
c4eccbbd4e25b40ec989f8880a591a850833b75b
|
791eeee155e625dd350da6f053e3b64817304a9a
|
548090e29b88f7f47984d371b0dda39e509b5c66
|
refs/heads/master
| 2020-04-04T11:43:42.381297 | 2018-11-02T17:58:49 | 2018-11-02T17:58:49 | 155,902,067 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6782945990562439,
"alphanum_fraction": 0.682170569896698,
"avg_line_length": 18.846153259277344,
"blob_id": "45d1e29f565966c6b798ee19e0bf97b86ff3e2b0",
"content_id": "8908f7d61d72b3c3e8a4764e3900a5f3841c8b55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 13,
"path": "/alunos/views.py",
"repo_name": "edcarlosneves/regristra-notas",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom .models import Aluno\n\ndef alunos(request):\n alunos = Aluno.objects.all()[:5]\n\n context = {\n 'alunos': alunos\n }\n\n return render(request, 'index.html', context)\n"
},
{
"alpha_fraction": 0.65887850522995,
"alphanum_fraction": 0.6822429895401001,
"avg_line_length": 25.625,
"blob_id": "49247b817d26d8bd3fd700bd882bd742be2c3c79",
"content_id": "3a36829ca44a04a4671db04f9fed02dde7fd5064",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 8,
"path": "/alunos/models.py",
"repo_name": "edcarlosneves/regristra-notas",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Aluno(models.Model):\n nome = models.CharField(max_length=255)\n nota = models.DecimalField(max_digits=3, decimal_places=1)\n\n def __str__(self):\n return self.nome\n\n"
},
{
"alpha_fraction": 0.6818181872367859,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 17.5,
"blob_id": "9a88b6428370de108b50070b051f1b4707d4828b",
"content_id": "34494536c17dcbec91ff20681ba9fc01e3f8e5af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 6,
"path": "/alunos/urls.py",
"repo_name": "edcarlosneves/regristra-notas",
"src_encoding": "UTF-8",
"text": "from django. urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.alunos, name='alunos')\n]"
}
] | 3 |
sitongye/web_scraping
|
https://github.com/sitongye/web_scraping
|
6a1788c5d4f2d9bf23b59f3fa8fb888f8bf8c550
|
6fe9b987dbffdab6edb8158d0d633ac7654f2f44
|
374c5c3184ff1fa29822e6c8daad995e83c6fd10
|
refs/heads/master
| 2023-02-08T13:07:29.864677 | 2020-12-29T00:36:43 | 2020-12-29T00:36:43 | 324,443,409 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5207026600837708,
"alphanum_fraction": 0.5257214307785034,
"avg_line_length": 37.878047943115234,
"blob_id": "3c8463808de29f901933b779901d5f6b49c1f6f7",
"content_id": "0046ff94f435567027a5226cc2dc9b02b4c02ed3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1594,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 41,
"path": "/RainerMariaRilke.py",
"repo_name": "sitongye/web_scraping",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport requests\nimport re\nimport pandas as pd\n\nif __name__ == \"__main__\":\n response = requests.get(\"http://www.rainer-maria-rilke.de/\")\n homepage_soup = BeautifulSoup(response.content, \"html.parser\")\n gedichte = homepage_soup.find_all(\"a\",href=True)\n dataframe = pd.DataFrame(columns=[\"ID\", \"Title\", \"Content\", \"Time\", \"Place\"])\n for gedicht in gedichte:\n if re.match(r\"\\d+\",gedicht[\"href\"]) is not None:\n\n poem_url = gedicht[\"href\"]\n print(poem_url)\n ID = re.findall(r\"\\d+\\w*\\d+\",poem_url)[0]\n print(ID)\n poem_response = requests.get(\"http://www.rainer-maria-rilke.de/\"+poem_url)\n poem_soup = BeautifulSoup(poem_response.content, \"html.parser\")\n try:\n headline = poem_soup.find(\"h1\").get_text()\n print(\"poet headline: \",headline)\n except:\n continue\n poem_content = poem_soup.find_all(\"p\")[1].get_text()\n poet_time_place =poem_soup.find_all(\"p\")[-1].get_text().strip()\n #print(poet_time_place)\n time = poet_time_place.split(\",\")[1].strip()\n print(\"time: \", time)\n place = poet_time_place.split(\",\")[-1].strip()\n print(\"place: \",place)\n dataframe = dataframe.append(\n {\"ID\": ID,\n \"Title\": headline,\n \"Content\": poem_content,\n \"Time\": time,\n \"Place\":place},\n ignore_index=True\n )\n\ndataframe[1:].to_csv(\"all_poem.csv\")\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7729411721229553,
"avg_line_length": 37.6363639831543,
"blob_id": "50bd914d95e5d8a6c20cb0c949f7a166c091c103",
"content_id": "fbb5c1631a74b24face28de920fc9df84cee1733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 850,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 22,
"path": "/selenium_ws.py",
"repo_name": "sitongye/web_scraping",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom bs4 import BeautifulSoup\n\n\ndriver = webdriver.Firefox(executable_path='./geckodriver')\n#driver.get('https://www.snipes.com/')\n\ndriver.get('https://www.snipes.com/')\nWebDriverWait(driver,timeout=1000).until(lambda a: a.find_elements_by_class_name('slide'))\nslide = driver.find_elements_by_class_name('slide')[0]\nbrands = slide.find_elements_by_tag_name(\"a\")\nprint(len(brands))\nfor brand in brands:\n brand_html = brand.get_attribute(\"href\")\n #print(brand_html)\n brand_soup = BeautifulSoup(brand_html, parser=\"html.parser\")\n print(brand_soup.prettify())\n"
},
{
"alpha_fraction": 0.6461307406425476,
"alphanum_fraction": 0.6579013466835022,
"avg_line_length": 52.878379821777344,
"blob_id": "2385000a4f47268b90d46e836fbaf6d9860ee403",
"content_id": "e89dd33bb4706fa7cc077e534f1433fa37fd234c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3993,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 74,
"path": "/beautsp.py",
"repo_name": "sitongye/web_scraping",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport pandas as pd\n\nheaders = requests.utils.default_headers()\nheaders.update({\"User-Agent\": \"Mozilla/5.0 (X11; CrOS x86_64 13421.89.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36\"})\ndriver = webdriver.Firefox(executable_path='./geckodriver')\nr = requests.get(\"https://www.snipes.com/c/shoes/sneaker\")\ntry:\n snipe_soup= BeautifulSoup(r.content, \"html.parser\")\nexcept:\n print(\"unable to parse\")\n#print(snipe_soup.prettify())\ntext = snipe_soup.find_all(\"div\", {\"class\": \"b-product-sort-bar l-row\"})[0].find(\"div\", {\"class\": \"b-grid-quantity l-col-4 l-col-md-3 js-grid-quantity\"}).get_text()\nall_amount = re.compile(r\"\\d+\").findall(text)[0]\nprint(all_amount)\ndriver.get(\"https://www.snipes.com/c/shoes/sneaker?sz=\"+all_amount)\ntry:\n showall_soup = BeautifulSoup(driver.page_source, \"html.parser\")\nexcept:\n print(\"parsing error\")\n\nall_sneakers = showall_soup.find_all(\"div\",{\"class\": \"b-product-grid-tile js-tile-container\"})\n# except:\n# print(\"fail to connect\")\n# pass\ndef get_href(shoe_tag):\n return shoe_tag.find_all(\"span\", {\"class\": \"b-product-tile-link js-product-tile-link\"})[0][\"href\"]\nprint(len(all_sneakers))\n\ndef get_productdetail(shoepage_soup):\n product_detail = {}\n product_detail[\"shoe_title\"] = re.sub(r\"\\n\", \" \", shoepage_soup.find_all(\"div\", {\"class\":\"js-target\"})[0].get_text()).strip()\n product_detail[\"shoe_price\"] = re.sub(r\"\\n\", \"\", shoepage_soup.find(\"span\",{\"class\": \"b-product-tile-price-item\"}).get_text()).strip()\n product_detail[\"shoe_image\"] = shoepage_soup.find(\"img\", {\"class\": \"b-dynamic_image_content\"})[\"data-src\"]\n orderable_class = \"js-pdp-attribute-tile b-size-value js-size-value b-swatch-circle b-swatch-value b-swatch-value--selectable b-swatch-value--orderable\"\n in_store_only_class = \"js-pdp-attribute-tile b-size-value js-size-value b-swatch-circle b-swatch-value b-swatch-value--selectable b-swatch-value--in-store-only\"\n sold_out_class = \"js-pdp-attribute-tile b-size-value js-size-value b-swatch-circle b-swatch-value b-swatch-value--selectable b-swatch-value--sold-out\"\n product_detail[\"in_stock_sizes\"] = [re.sub(r\"\\n\",\"\",x.get_text()) for x in shoepage_soup.find_all(\"span\",{\"class\": orderable_class})]\n product_detail[\"in_store_only_sizes\"] = [re.sub(r\"\\n\", \"\", x.get_text()) for x in\n shoepage_soup.find_all(\"span\", {\"class\": in_store_only_class})]\n product_detail[\"sold_out_sizes\"] = [re.sub(r\"\\n\", \"\", x.get_text()) for x in\n shoepage_soup.find_all(\"span\", {\"class\": sold_out_class})]\n return product_detail\n\n\n\nfor i in range(len(all_sneakers)):\n dataframe = pd.DataFrame(columns=[\"product\", \"price\", \"shoe_image\", \"orderable_size\", \"sold_out_size\"])\n shoe_link = \"https://www.snipes.com\"+get_href(all_sneakers[i])\n shoepage = BeautifulSoup(requests.get(shoe_link).content, \"html.parser\")\n try:\n product_dict = get_productdetail(shoepage)\n print(product_dict)\n dataframe = dataframe.append({\"product\": product_dict[\"shoe_title\"],\n \"price\": product_dict[\"shoe_price\"],\n \"shoe_image\": product_dict[\"shoe_image\"],\n \"orderable_size\": product_dict[\"in_stock_sizes\"],\n \"sold_out_size\": product_dict[\"sold_out_sizes\"]}, ignore_index=True)\n except:\n print(shoe_link)\n continue\ndataframe.to_csv(\"allshoes.csv\")\n\n\n\n#<span data-attr-value=\"39\"\n# class=\"js-pdp-attribute-tile b-size-value js-size-value b-swatch-circle b-swatch-value b-swatch-value--selectable b-swatch-value--in-store-only b-swatch-value--selected\">39</span>\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5695295333862305,
"alphanum_fraction": 0.5894116163253784,
"avg_line_length": 41.81683349609375,
"blob_id": "4f169985f39de44b116abb6f09cb2b4e53807986",
"content_id": "87ef078bd5f27571f2a1948a050a6bb99cd8485f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8651,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 202,
"path": "/Scraper.py",
"repo_name": "sitongye/web_scraping",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver import Proxy\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport pandas as pd\nimport random\nimport lxml\nimport asyncio\nfrom proxybroker import Broker\nfrom fake_useragent import UserAgent\n\nua = UserAgent()\nua.update()\n\nclass Scraper:\n def __init__(self, url, rotate_proxy=False, agent=ua.random):\n print(agent)\n self.url = url\n self.site = \"\"\n if rotate_proxy is True:\n self.proxy_pool = self._populate_proxy(7)\n else:\n self.proxy_pool = []\n # [\"154.12.18.{}:65007\".format(str(i)) for i in range(21,46)]\n print(self.proxy_pool)\n self.rotate_proxy = rotate_proxy\n self.driver = None\n self.agent = {\"User-Agent\": agent}\n self.headers = requests.utils.default_headers()\n self.headers.update(self.agent)\n\n def _populate_proxy(self, num=5):\n proxy_pool = []\n\n async def show(proxies):\n while True:\n proxy = await proxies.get()\n if proxy is None: break\n proxy_pool.append(proxy)\n print('Found proxy: %s' % proxy)\n\n proxies = asyncio.Queue()\n broker = Broker(proxies)\n tasks = asyncio.gather(\n broker.find(types=[\"HTTPS\"], limit=10),\n show(proxies))\n loop = asyncio.get_event_loop()\n loop.run_until_complete(tasks)\n return [str(t.host) + \":\" + str(t.port) for t in proxy_pool]\n\n def _get_request(self, url=\"default\", rotate_proxy=False):\n if self.rotate_proxy is True:\n rotate_proxy = True\n if url == \"default\":\n url = self.url\n if rotate_proxy is True:\n loop = 0\n while loop < 6:\n if len(self.proxy_pool) != 0:\n proxy = self.proxy_pool.pop()\n print(proxy)\n try:\n response = requests.get(url, proxies={\"http\": proxy, \"https\": proxy})\n if response.status_code == 200:\n print(\"response sucessful!\")\n return response\n except:\n continue\n else:\n self.proxy_pool = self._populate_proxy(6)\n loop += 1\n print(\"last batch doesnt work\")\n return \"all failed\"\n else:\n response = requests.get(url, headers=self.headers)\n if response.status_code == 200:\n print(\"successful request!\")\n return response\n else:\n return \"fail to connect\"\n\n def _get_soup(self, request, parser=\"html.parser\"):\n soup = BeautifulSoup(request.content, parser)\n return soup\n\n\nclass foot_locker(Scraper):\n\n def check_stock(self, soup):\n content_url = \\\n soup.find(\"div\", {\"class\": \"fl-load-animation\", \"data-ajaxcontent\": \"fl-productDetailsSizeSelection\"})[\n \"data-ajaxcontent-url\"]\n print(content_url)\n new_response = self._get_request(url=content_url)\n new_soup = self._get_soup(new_response)\n sizes = new_soup.find_all(\"button\", {\"class\": r'\\\"fl-product-size--item\\\"',\"type\":re.compile(r\".+EU.+\")})\n for size in sizes:\n print(re.sub(r\"\\\\n\", \"\", size.get_text()))\n return sizes\n\n\n def open_browser(self, with_proxy=False):\n profile = webdriver.FirefoxProfile()\n if with_proxy is True:\n profile.set_preference(\"network.proxy.type\", 1)\n profile.set_preference(\"network.proxy.socks\", \"127.0.0.1\")\n profile.set_preference(\"network.proxy.socks_port\", 9050)\n\n profile.update_preferences()\n\n self.driver = webdriver.Firefox(profile, executable_path='./geckodriver')\n self.driver.get(self.url)\n WebDriverWait(self.driver, 20)\n\n def add_to_cart(self):\n select_size_button = self.driver.find_elements_by_xpath(r'//*[@id=\"fitanalytics_sizecontainer\"]/section[1]/div/button[10]')[0]\n self.driver.execute_script(\"arguments[0].click();\", select_size_button)\n wait = WebDriverWait(self.driver, 10)\n element = wait.until()\n add_to_cart_button = self.driver.find_elements_by_xpath(r'//*[@id=\"add-to-cart-form\"]/div/div[3]/div/div[7]/button')[0]\n self.driver.execute_script(\"arguments[0].click();\", add_to_cart_button)\n WebDriverWait(self.driver, 30)\n\n def check_out(self):\n #WebDriverWait(self.driver, 30)\n cart_amount =self.driver.find_element_by_class_name(\"fl-header--mini-cart--items-count\").text\n print(cart_amount)\n if int(cart_amount) == 1:\n upright_cart = self.driver.find_elements_by_xpath(r'//*[@id=\"flcomponentheaderfull\"]/div/div[1]/span[1]/div/div[1]')[0]\n self.driver.execute_script(\"arguments[0].click();\", upright_cart)\n jetzt_kaufen = self.driver.find_elements_by_xpath(r'//*[@id=\"flcomponentheaderfull\"]/div/div[1]/span[1]/div/div[2]/div/div[2]/div[7]/div[1]/a')[0]\n if jetzt_kaufen.is_displayed() is True:\n self.driver.execute_script(\"arguments[0].click();\", jetzt_kaufen)\n EC.presence_of_element_located((By.ID, \"myDynamicElement\"))\n FORTFAHREN = self.driver.find_elements_by_xpath(r'/html/body/main/div/div[2]/div[1]/div[2]/div/div/div/a')[0]\n #FORTFAHREN = WebDriverWait(self.driver, 30).until(EC.element_to_be_clickable((By.XPATH, r'/html/body/main/div/div[2]/div[1]/div[2]/div/div/div/a')))\n if FORTFAHREN.is_displayed() is True:\n self.driver.execute_script(\"arguments[0].click();\", FORTFAHREN)\n element = WebDriverWait(self.driver, 30).until(\n EC.presence_of_element_located((By.XPATH, r\"/html/body/main/div/div[2]/div/ol/li[1]\"))\n )\n # fill in billing info\n Vorname = self.driver.find_element_by_id(\"billing_FirstNamecheckout-billing-address-form\")\n Vorname.send_keys('Sitong')\n Nachname = self.driver.find_element_by_id(\"billing_LastName\")\n Nachname.send_keys('Ye')\n Adresse = self.driver.find_element_by_id(\"predict_checkout-billing-address-form\")\n Adresse.send_keys('Kastellstr. 6, 53227, Bonn')\n Tel = self.driver.find_element_by_id(\"billing_PhoneHomecheckout-billing-address-form\")\n Tel.send_keys('015174211571')\n Day = self.driver.find_element_by_id(\"Day\")\n Day.send_keys('04')\n Month = self.driver.find_element_by_id(\"Month\")\n Month.send_keys('11')\n Year = self.driver.find_element_by_id(\"Year\")\n Year.send_keys('1994')\n email = self.driver.find_element_by_id(\"fl-login-input-email-billing\")\n email.send_keys('[email protected]')\n passwort = self.driver.find_element_by_id(\"fl-input-password-checkout-address-panel\")\n passwort.send_keys(\"Nike941104!\")\n WebDriverWait(self.driver, 20)\n self.browser.find_element_by_class(\"fl-checkbox--label fl-checkbox--label__top-aligned\").click()\n WebDriverWait(self.driver, 3)\n self.browser.find_element_by_class(\"fl-checkbox--label\").click()\n\n JETZTKAUFEN = self.driver.find_element_by_xpath(\"/html/body/main/div/div[2]/form/div/div[5]/button[2]\")\n JETZTKAUFEN.click()\n else:\n return False\n WebDriverWait(self.driver, 5)\n\n\n\ndef test(check_stock_test=True,\n populate_proxy_test=True,\n add_to_cart_test=False\n ):\n new_shoe = foot_locker(\"https://www.footlocker.de/de/p/nike-air-force-1-07-x-3m-men-shoes-118661?v=314108366304\",\n rotate_proxy=False,\n agent=\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.14; rv:85.0) Gecko/20100101 Firefox/85.0\")\n response = new_shoe._get_request()\n print(response)\n soup = new_shoe._get_soup(response)\n if check_stock_test is True:\n new_shoe.check_stock(soup)\n new_shoe.open_browser()\n new_shoe.add_to_cart()\n if input(\"continue?\")==\"y\":\n new_shoe.check_out()\n else:\n new_shoe.add_to_cart()\n WebDriverWait(new_shoe.driver, 100)\n\n\nif __name__ == \"__main__\":\n test()\n\n\n"
}
] | 4 |
murilocamargos/comp-evolutiva-ga
|
https://github.com/murilocamargos/comp-evolutiva-ga
|
8252e805232f4f007fce314ff184d1b00bdaacc4
|
6159bc6a7621c198f62b506ca89efe218bc24b26
|
32f13ed5ffc70d6377673b611a1616f34542e142
|
refs/heads/master
| 2021-01-20T15:27:19.521093 | 2020-10-08T12:51:22 | 2020-10-08T12:51:22 | 82,815,949 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4690370261669159,
"alphanum_fraction": 0.48088890314102173,
"avg_line_length": 35.69565200805664,
"blob_id": "065e3a8a08fdef921bd767e0dd83ab86bf9a7fb4",
"content_id": "33f940021e5ea31ad3f3146a158cbb9d46643e12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3386,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 92,
"path": "/evcomp/pop.py",
"repo_name": "murilocamargos/comp-evolutiva-ga",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\n\nclass Pop(object):\n def __init__(self, pop, fitness):\n self.pop = pop\n self.fitness = fitness\n self.size = len(pop)\n if self.size > 0:\n self.dim = len(pop[0])\n\n def eval(self, npop = 'self'):\n pop = self.pop if type(npop) != np.ndarray else npop\n return np.apply_along_axis(self.fitness, 1, pop)\n\n def selection(self, stype):\n S = np.zeros((self.size, self.dim))\n\n if stype == 'roulette':\n f = self.eval()\n a = np.cumsum(f/(len(f)*np.mean(f)))\n for i in range(len(f)):\n ind = self.pop[((a > np.random.rand()) == True).tostring().find(b'\\x01')]\n S[i,:] = ind\n return Pop(S, self.fitness)\n\n if stype == 'tournament':\n k = min(2, self.size)\n a = np.arange(self.size)\n for i in range(self.size):\n np.random.shuffle(a)\n sol = self.pop[a[:k],:]\n win = sol[np.argsort(self.eval(sol))[-1]]\n S[i,:] = win\n return Pop(S, self.fitness)\n \n raise Exception('O operador de seleção `' + stype + '` ainda não foi implementado!')\n\n def crossover(self, ctype, rate):\n Q = np.zeros((1, self.dim))\n for i in np.arange(0, self.size, 2):\n if i+1 <= self.size and np.random.rand() <= rate:\n father1, father2 = np.round(np.random.rand(2) * (self.size - 1))\n father1 = self.pop[int(father1)]\n father2 = self.pop[int(father2)]\n\n if ctype == '1cp':\n cp = int(np.round(np.random.rand() * (self.dim - 1)))\n s1 = np.append(father1[cp:], father2[:cp])\n s2 = np.append(father1[cp:], father1[:cp])\n sons = [s1, s2]\n\n elif ctype == 'uniform':\n ff = np.append(father1, father2)\n pD = self.dim\n idx = np.round(np.random.rand(pD)) * pD + np.arange(pD)\n sons = [ff[idx.astype(int)]]\n\n else:\n raise Exception('O operador de cruzamento `' + ctype + '` ainda não foi implementado!')\n\n Q = np.vstack((Q, sons))\n\n return Pop(Q[1:,:], self.fitness)\n\n def mutation(self, mtype, rate):\n for i in range(self.size):\n if mtype == '1bit':\n bit = np.random.randint(0,36)\n if np.random.rand() <= rate:\n self.pop[i][bit] = int(not self.pop[i][bit])\n\n elif mtype == 'uniform':\n idx = np.where(np.random.rand(1, self.dim) <= rate)[1]\n self.pop[i][idx] = abs(self.pop[i] - 1)[idx]\n else:\n raise Exception('O operador de mutação `' + mtype + '` ainda não foi implementado!')\n\n return Pop(self.pop, self.fitness)\n\n def substitution(self, stype):\n if stype == 'elitism':\n fit = self.eval()\n idx = np.argsort(fit)[-self.size:]\n elif stype == 'random':\n idx = np.arange(self.size)\n np.random.shuffle(idx)\n idx = idx[:self.size]\n else:\n raise Exception('O método de substituição `' + stype + '` ainda não foi implementado!')\n\n return Pop(self.pop[idx], self.fitness)"
},
{
"alpha_fraction": 0.38118812441825867,
"alphanum_fraction": 0.5111386179924011,
"avg_line_length": 30.115385055541992,
"blob_id": "f976a54bfd350d05ce9f447c1e48ffa6c575f46c",
"content_id": "0b17241c1170260816b44972ae27d49c04704417",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 808,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 26,
"path": "/run.py",
"repo_name": "murilocamargos/comp-evolutiva-ga",
"src_encoding": "UTF-8",
"text": "from evcomp.ga import GA\n\ndef objectiveFunction(bits):\n b = [0] + list(bits)\n return 9 + b[2]*b[5] - b[23]*b[14]\\\n + b[24]*b[4] - b[21]*b[10] + b[36]*b[15] - b[11]*b[26]\\\n + b[16]*b[17] + b[3]*b[33] + b[28]*b[19] + b[12]*b[34]\\\n - b[31]*b[32] - b[22]*b[25] + b[35]*b[27] - b[29]*b[7]\\\n + b[8]*b[13] - b[6]*b[9] + b[18]*b[20] - b[1]*b[30]\\\n + b[23]*b[4] + b[21]*b[15] + b[26]*b[16] + b[31]*b[12]\\\n + b[25]*b[19] + b[7]*b[8] + b[9]*b[18] + b[1]*b[33]\n\ne = GA(popSize = 30,\n popDim = 36,\n representation = 'binary',\n fitnessEval = objectiveFunction,\n crossRate = 0.8,\n crossType = 'uniform',\n selectionType = 'roulette',\n mutationRate = 0.025,\n mutationType = 'uniform',\n maxEpochs = 100,\n substitutionType = 'elitism',\n plotFitness = True)\n\ne.optimize()"
},
{
"alpha_fraction": 0.5570887327194214,
"alphanum_fraction": 0.5622294545173645,
"avg_line_length": 37.51041793823242,
"blob_id": "c19a02b7256961b9bdc3b8fede26d89acf0b9821",
"content_id": "91e4fcc470dd6fda01348d5441234226f6e9e39c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3699,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 96,
"path": "/evcomp/ga.py",
"repo_name": "murilocamargos/comp-evolutiva-ga",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom .pop import Pop\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport copy\nfrom pathlib import Path\nimport os\n\nclass GA(object):\n def __init__(self, popDim, crossRate, mutationRate, fitnessEval, popSize = 30, \\\n representation = 'binary', crossType = 'uniform', selectionType = 'roulette',\\\n mutationType = 'uniform', maxEpochs = 100, substitutionType = 'elitism',\n resultsPath = None, plotFitness = False):\n self.config = {\n 'popDim': popDim,\n 'crossRate': crossRate,\n 'mutationRate': mutationRate,\n 'fitnessEval': fitnessEval,\n 'popSize': popSize,\n 'representation': representation,\n 'crossType': crossType,\n 'selectionType': selectionType,\n 'mutationType': mutationType,\n 'maxEpochs': maxEpochs,\n 'substitutionType': substitutionType,\n 'resultsPath': resultsPath,\n 'plotFitness': plotFitness\n }\n \n if resultsPath is not None:\n path = Path(resultsPath)\n if path.is_dir():\n raise Exception(\"You must provide a file path.\")\n \n if not os.access(path.parent, os.W_OK):\n raise Exception(f\"You do not have writting permissions to `{path.parent}`.\")\n\n self.config['resultsPath'] = path\n\n\n def checkConfig(self, indexList):\n for i in indexList:\n if not i in self.config:\n raise Exception('Você precisa definir a configuração `' + i + '`')\n\n def randomPop(self):\n self.checkConfig(['representation', 'popDim', 'popSize', 'fitnessEval'])\n c = self.config\n\n if c['representation'] == 'binary':\n return Pop(np.round(np.random.rand(c['popSize'], c['popDim'])), c['fitnessEval'])\n\n def optimize(self):\n self.checkConfig(['maxEpochs', 'substitutionType', 'selectionType', 'crossType', 'crossRate', 'mutationType', 'mutationRate'])\n c = self.config\n\n p = self.randomPop()\n\n results = {'Epoch': [i for i in range(c['maxEpochs'])],\\\n 'PopFitness': [0 for _ in range(c['maxEpochs'])],\\\n 'BstFitness': [0 for _ in range(c['maxEpochs'])],\\\n 'NumDistn': [0 for _ in range(c['maxEpochs'])]}\n\n for e in range(c['maxEpochs']):\n pp = copy.deepcopy(p)\n a = pp.selection(c['selectionType'])\n b = a.crossover(c['crossType'], c['crossRate'])\n d = b.mutation(c['mutationType'], c['mutationRate'])\n # Join sets\n p.pop = np.vstack((p.pop, d.pop))\n p = p.substitution(c['substitutionType'])\n\n ft = p.eval()\n results['PopFitness'][e] = np.mean(ft)\n results['BstFitness'][e] = np.max(ft)\n #http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array\n results['NumDistn'][e] = np.max(np.unique(p.pop, axis=0).shape[0])\n \n resDf = pd.DataFrame(results)\n\n if c['resultsPath'] is not None:\n np.save(c['resultsPath'], (p, resDf))\n\n if c['plotFitness']:\n sns.set_style('darkgrid')\n _, ax = plt.subplots()\n sns.lineplot(x='Epoch', y='PopFitness', data=resDf, label='Average fitness', ax=ax)\n sns.lineplot(x='Epoch', y='BstFitness', data=resDf, label='Best fitness', ax=ax)\n sns.lineplot(x='Epoch', y='NumDistn', data=resDf, label='Num. of distinct solutions', ax=ax)\n ax.set_ylabel('PopFitness, BstFitness, NumDistn')\n plt.show()\n\n return p, resDf"
},
{
"alpha_fraction": 0.7631769180297852,
"alphanum_fraction": 0.7631769180297852,
"avg_line_length": 65,
"blob_id": "d62773f86fb433e55aac20211700a18a8ef6fbec",
"content_id": "63e1b10d4d1a34357db84fa34995ebe191763cef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1385,
"license_type": "no_license",
"max_line_length": 215,
"num_lines": 21,
"path": "/README.md",
"repo_name": "murilocamargos/comp-evolutiva-ga",
"src_encoding": "UTF-8",
"text": "# Evolutionary Computation - GA\nUndergaduation project on evolutionary computation algorithms for the Evolutionary Computation course in Unimontes. A general genetic algorithm (GA) is implemented in Python. The following operators are implemented:\n\n* **Selection**\n - Roulette Wheel - [https://en.wikipedia.org/wiki/Fitness_proportionate_selection](https://en.wikipedia.org/wiki/Fitness_proportionate_selection)\n - Binary Tournament - [https://www.geeksforgeeks.org/tournament-selection-ga/](https://www.geeksforgeeks.org/tournament-selection-ga/)\n* **Crossover**\n - Single-point - [https://en.wikipedia.org/wiki/Crossover_(genetic_algorithm)](https://en.wikipedia.org/wiki/Crossover_(genetic_algorithm)#Single-point_crossover)\n - Uniform - [https://en.wikipedia.org/wiki/Crossover_(genetic_algorithm)](https://en.wikipedia.org/wiki/Crossover_(genetic_algorithm)#Uniform_crossover)\n* **Mutation**\n - Single-bit uniform - [https://en.wikipedia.org/wiki/Mutation_(genetic_algorithm)](https://en.wikipedia.org/wiki/Mutation_(genetic_algorithm))\n - Uniform - [https://en.wikipedia.org/wiki/Mutation_(genetic_algorithm)](https://en.wikipedia.org/wiki/Mutation_(genetic_algorithm))\n* **Substitution**\n - Elitism - [https://en.wikipedia.org/wiki/Genetic_algorithm](https://en.wikipedia.org/wiki/Genetic_algorithm#Elitism)\n - Random\n\n# Run\nTo run the project, use\n```\npython run.py\n```"
}
] | 4 |
liuhj2015/ai-chatbot-framework
|
https://github.com/liuhj2015/ai-chatbot-framework
|
1feb1c523b89a96ace2ab64430d6c699afb606de
|
0489c9586278211dd2a3cc41fe722d6ce0638fd9
|
949db0a939c4e658e02ea8097caeaf625a509e9e
|
refs/heads/master
| 2021-09-03T09:55:11.075758 | 2018-01-08T06:09:22 | 2018-01-08T06:09:22 | 108,958,215 | 0 | 0 | null | 2017-10-31T07:07:19 | 2017-11-20T13:33:37 | 2018-01-08T06:10:30 |
C
|
[
{
"alpha_fraction": 0.7462121248245239,
"alphanum_fraction": 0.7471590638160706,
"avg_line_length": 27.15999984741211,
"blob_id": "87cd241c8dcb96ea94a5ecbc750acbe1f4e709c1",
"content_id": "9a35bbd11dce6d14b998dbdedb3611d69b50db87",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2112,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 75,
"path": "/app/core/nlp.py",
"repo_name": "liuhj2015/ai-chatbot-framework",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nfrom nltk.tag.perceptron import PerceptronTagger\nfrom nltk import word_tokenize\nimport jieba\nimport jieba.posseg\nfrom app.commons.logger import logger\nfrom app import app\n\n# Load and initialize Perceptron tagger\ntagger = PerceptronTagger()\n\n\nChinese = app.config[\"CHINESE\"]\n\ndef posTaggerEnglish(sentence):\n tokenizedSentence = word_tokenize(sentence)\n posTaggedSentence = tagger.tag(tokenizedSentence)\n return posTaggedSentence\n\n\ndef posTagger(sentence):\n\n if Chinese:\n \n #return posTaggerEnglish(sentence)\n return posTaggerChinese(sentence)\n else:\n return posTaggerEnglish(sentence)\n\ndef posTaggerChinese(sentence):\n tokenizedSentence = jieba.posseg.cut(sentence)\n posTaggedSentence = [(token,postag) for token, postag in tokenizedSentence]\n return posTaggedSentence\n \n\ndef posTagAndLabelEnglish(sentence):\n taggedSentence = posTagger(sentence)\n taggedSentenceJson = []\n for token, postag in taggedSentence:\n taggedSentenceJson.append([token, postag, \"O\"])\n return taggedSentenceJson\n\ndef posTagAndLabel(sentence):\n \n if Chinese:\n return posTagAndLabelChinese(sentence)\n else:\n return posTagAndLabelEnglish(sentence)\n\ndef posTagAndLabelChinese(sentence):\n taggedSentence = posTaggerChinese(sentence)\n taggedSentenceJson = []\n for token,postag in taggedSentence:\n taggedSentenceJson.append([token, postag, \"0\"])\n return taggedSentenceJson\n\ndef sentenceTokenize(sentences):\n if Chinese:\n return sentenceTokenizeChinese(sentences)\n else:\n return sentenceTokenizeEnglish(sentences)\n\ndef sentenceTokenizeEnglish(sentences):\n tokenizedSentences = word_tokenize(sentences)\n tokenizedSentencesPlainText = \"\"\n for t in tokenizedSentences:\n tokenizedSentencesPlainText += \" \" + t\n return tokenizedSentencesPlainText\n\ndef sentenceTokenizeChinese(sentences):\n tokenizedSentences = jieba.cut(sentences)\n tokenizedSentencesPlainText = \"\"\n for t in tokenizedSentences:\n tokenizedSentencesPlainText += \" \" + t\n return tokenizedSentencesPlainText\n"
},
{
"alpha_fraction": 0.6491646766662598,
"alphanum_fraction": 0.6658711433410645,
"avg_line_length": 14.185185432434082,
"blob_id": "119f87c09ac780711535d4ad8620bfdda156f25d",
"content_id": "efab93a5ac5de3c1376348db6bbb551dc53c37f6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 27,
"path": "/app/core/textsimilarity.py",
"repo_name": "liuhj2015/ai-chatbot-framework",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\n\nimport jieba\nimport jieba.posseg as psseg\n\nclass TextSimilarity():\n\t\n\tdef posTager(self,text):\n\t\ttext = [(token,pos) for token,pos in psseg.cut(text)]\n\t\treturn text\n\n\n\tdef compute(self,text1,text2):\n\t\tposText1 = set(self.posTager(text1))\n\t\tposText2 = set(self.posTager(text2))\n\t\t\n\n\n\n\n\n\nif __name__ == '__main__':\n\t \n\t textSimilarity = TextSimilarity()\n\n\t textSimilarity.compute(\"我要定机票\",\"我要定车票\")\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6525821685791016,
"alphanum_fraction": 0.65727698802948,
"avg_line_length": 18.272727966308594,
"blob_id": "57affa127f3385472c8745531361da383df2a498",
"content_id": "8136b9b9095ca687966f2a5710cccb45cb0046f0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 11,
"path": "/app/core/texttest.py",
"repo_name": "liuhj2015/ai-chatbot-framework",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\n\nfrom textsimilarity import TextSimilarity\n\n\nif __name__ == '__main__':\n \n textSimilarity = TextSimilarity()\n print textSimilarity.__dict__\n\n textSimilarity.compute(\"我要定机票\",\"我要定车票\")\n\n"
}
] | 3 |
brakeman/NLP-Models-Tensorflow
|
https://github.com/brakeman/NLP-Models-Tensorflow
|
eed060fb2684f704fc46614545f4d3327ac2d1ac
|
0f42c2ce7294a68e57ded31349c1b8c73cae8d5f
|
55ab9b8f795b7dddf79c22b0679d76ea027e051b
|
refs/heads/master
| 2020-04-13T08:07:05.263986 | 2018-12-24T08:49:31 | 2018-12-24T08:49:31 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.39883720874786377,
"alphanum_fraction": 0.5813953280448914,
"avg_line_length": 39.9523811340332,
"blob_id": "e495c3b7c283da3ff7c96a9ad5523edcc30ab2c3",
"content_id": "e2ae048417f8ee8db01654b69556e7ad4e983d6b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1088,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 21,
"path": "/speech-to-text/7.wavenet/README.md",
"repo_name": "brakeman/NLP-Models-Tensorflow",
"src_encoding": "UTF-8",
"text": "## How-to\n\n1. First, you need to run [download.ipynb](download.ipynb)\n2. Make sure `data` folder is same directory with [wavenet.py](wavenet.py)\n3. Run [wavenet.py](wavenet.py),\n```bash\npython3 wavenet.py\n```\n\nTraining output,\n```text\nminibatch loop: 100%|████████████████| 88/88 [00:02<00:00, 40.63it/s, cost=11.1]\nminibatch loop: 100%|████████████████| 88/88 [00:02<00:00, 40.62it/s, cost=11.9]\nminibatch loop: 100%|████████████████| 88/88 [00:02<00:00, 40.66it/s, cost=11.6]\nminibatch loop: 100%|██████████████████| 88/88 [00:02<00:00, 40.62it/s, cost=11]\nminibatch loop: 100%|████████████████| 88/88 [00:02<00:00, 40.59it/s, cost=11.4]\nminibatch loop: 100%|████████████████| 88/88 [00:02<00:00, 41.09it/s, cost=11.3]\nminibatch loop: 100%|████████████████| 88/88 [00:02<00:00, 41.05it/s, cost=13.1]\nreal: say the word pole\npredicted: say the word toate\n```\n"
},
{
"alpha_fraction": 0.5865764021873474,
"alphanum_fraction": 0.592780590057373,
"avg_line_length": 28.53333282470703,
"blob_id": "65681ff3be80e8a99418c3437787cca10a7f90b6",
"content_id": "4ef8beafc219719622510097afc8049e5c8eb6b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1773,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 60,
"path": "/Summarization/5.skip-thought/smashword.py",
"repo_name": "brakeman/NLP-Models-Tensorflow",
"src_encoding": "UTF-8",
"text": "\"\"\"Book scraping script for smashwords.com.\n\nUsage: python smashwords.py [scrape_link] [output_dir (defaults to data/books)]\n\"\"\"\n\nimport os\nimport re\nimport sys\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef browse(url):\n \"\"\"Retrieve the server response contents of the given URL.\"\"\"\n # A cookie is required to allow books with adult content to be served.\n return requests.get(url, cookies={\"adultOff\": \"no\"}).text\n\n\ndef to_filename(s):\n \"\"\"Convert the given string to a valid filename.\"\"\"\n s = str(s).strip().replace(' ', '_')\n return re.sub(r'(?u)[^-\\w.]', '', s)\n\n\nif __name__ == '__main__':\n\n write_dir = 'books'\n if len(sys.argv) > 2:\n write_dir = sys.argv[2]\n\n count = 0\n num_downloaded = 0\n while True:\n res = browse((sys.argv[1] + '/{}').format(count))\n soup = BeautifulSoup(res, 'html.parser')\n for div in soup.find_all('div', {'class': 'library-book'}):\n \n # Detect language\n language_html = div.find('div', {'class': 'subnote'})\n language_html = language_html.find_all('span', {'class': 'text-nowrap'})\n language_html = ''.join(map(lambda tag: tag.get_text(), language_html))\n \n if 'english' in language_html.lower():\n \n # Get title and download link\n link_html = div.find('a', {'class': 'library-title'})\n title = link_html.get_text()\n link = link_html.get('href').split('/')\n link[-2] = 'download'\n link.append('6') # text file format\n \n download = browse('/'.join(link))\n if not download.startswith('<!DOCTYPE html>'):\n num_downloaded += 1\n print(num_downloaded, title, sep='\\t')\n with open(os.path.join(write_dir, to_filename(title)), 'w') as f:\n f.write(download)\n \n count += 20\n\n"
},
{
"alpha_fraction": 0.7230769395828247,
"alphanum_fraction": 0.7569230794906616,
"avg_line_length": 31.5,
"blob_id": "8f39068bf5dc8d3fa1fdc9ac937cf6826a28b7f6",
"content_id": "8c4b1f34f0a68dbccbd935e8d490b930dfefccea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 325,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 10,
"path": "/Summarization/5.skip-thought/README.md",
"repo_name": "brakeman/NLP-Models-Tensorflow",
"src_encoding": "UTF-8",
"text": "## How-to\n\n1. Run the crawler to get some data,\n```bash\npython smashwords.py https://www.smashwords.com/books/category/1/newest/0/free/medium\n```\n\n2. Download google vectors from [google drive](https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing) and extract in here.\n\n3. Run `5.skip-thought.ipynb`\n"
}
] | 3 |
eugenehp/jingtrang
|
https://github.com/eugenehp/jingtrang
|
a622c55839d14466d100af46a435f215fda15c49
|
245df07d40773fc09fa51def13149efe5aa53e4f
|
eefecdcf6625795d8aa3c5a84fc2b33024e19c54
|
refs/heads/master
| 2023-03-21T11:45:31.694039 | 2019-04-23T16:39:13 | 2019-04-23T16:39:13 | 345,873,242 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6340579986572266,
"alphanum_fraction": 0.6394927501678467,
"avg_line_length": 23,
"blob_id": "534be69c4e689477f51b888941bb0a70dead3c28",
"content_id": "dd39b7bcbf77ab3b969c4f5755d72adb6561b322",
"detected_licenses": [
"BSD-3-Clause",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 23,
"path": "/jingtrang/__init__.py",
"repo_name": "eugenehp/jingtrang",
"src_encoding": "UTF-8",
"text": "import os\nimport subprocess\nimport sys\n\nimport pkg_resources\n\nHERE = os.path.split(__file__)[0]\nJING_JAR = os.path.join(HERE, \"jing.jar\")\nTRANG_JAR = os.path.join(HERE, \"trang.jar\")\n\n\ndef jing():\n cmd = [\"java\", \"-jar\", JING_JAR]\n cmd = cmd + sys.argv[1:]\n res = subprocess.call(cmd, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin)\n sys.exit(res)\n\n\ndef trang():\n cmd = [\"java\", \"-jar\", TRANG_JAR]\n cmd = cmd + sys.argv[1:]\n res = subprocess.call(cmd, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin)\n sys.exit(res)\n"
},
{
"alpha_fraction": 0.6523972749710083,
"alphanum_fraction": 0.670376718044281,
"avg_line_length": 29.736841201782227,
"blob_id": "ad28788e406dc865f47ce61bfd41a3e73732c87d",
"content_id": "d61b11e847f728cf0e61062c9a9f7db30331537a",
"detected_licenses": [
"BSD-3-Clause",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 1168,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 38,
"path": "/pyproject.toml",
"repo_name": "eugenehp/jingtrang",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"jingtrang\"\nversion = \"0.1.2\"\ndescription = \"Wrapping jing and trang RELAX NG tools into Python script\"\nauthors = [\"Jan Vlcinsky <[email protected]>\"]\nhomepage = \"http://bitbucket.org/tamtamresearch/jingtrang\"\nrepository = \"http://bitbucket.org/tamtamresearch/jingtrang\"\nreadme = \"README.rst\"\n\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Java\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Software Development :: Quality Assurance\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Utilities\",\n]\n[tool.poetry.scripts]\npyjing = \"jingtrang:jing\"\npytrang = \"jingtrang:trang\"\n\n[tool.poetry.dependencies]\npython = \"~2.7 || ^3.4\"\n\n[tool.poetry.dev-dependencies]\npytest = \"^4.4\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n"
},
{
"alpha_fraction": 0.6844964623451233,
"alphanum_fraction": 0.6861680150032043,
"avg_line_length": 23.171716690063477,
"blob_id": "50ce7a8c5c675948338d093510c7b05188447157",
"content_id": "79045531c936fead982568606c85276efcfde4ca",
"detected_licenses": [
"BSD-3-Clause",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2393,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 99,
"path": "/tests/test_it.py",
"repo_name": "eugenehp/jingtrang",
"src_encoding": "UTF-8",
"text": "import subprocess\n\nimport py\nimport pytest\n\n\[email protected](\n params=[\"tests/dataset-rdstmc\", \"tests/dataset-wiki\", \"tests/dataset-rntutor\"]\n)\ndef datasetdir(request):\n return py.path.local(request.param)\n\n\[email protected]\ndef messages(datasetdir):\n msgdir = datasetdir.join(\"messages\")\n return msgdir.listdir(fil=\"*.xml\")\n\n\[email protected]\ndef rncdir(datasetdir):\n return datasetdir.join(\"schemas\")\n\n\[email protected]\ndef rootrnc(rncdir):\n return rncdir.join(\"root.rnc\")\n\n\[email protected]\ndef rncschemas(rootrnc):\n return rootrnc.dirpath().listdir(\"*.rnc\")\n\n\ndef test_validate_by_rnc_onemsg(rootrnc, messages):\n cmd = [\"pyjing\", \"-c\"]\n cmd.append(rootrnc.strpath)\n cmd.append(messages[0].strpath)\n subprocess.check_call(cmd)\n\n\ndef test_validate_by_rnc_allmsgs(rootrnc, messages):\n cmd = [\"pyjing\", \"-c\"]\n cmd.append(rootrnc.strpath)\n cmd.extend(map(str, messages))\n subprocess.check_call(cmd)\n\n\ndef test_rnc2rng(rootrnc, tmpdir, rncschemas):\n cmd = [\"pytrang\"]\n rngname = rootrnc.new(dirname=tmpdir, ext=\".rng\")\n cmd.append(rootrnc.strpath)\n cmd.append(rngname.strpath)\n subprocess.check_call(cmd)\n rngnames = tmpdir.listdir(fil=\"*.rng\")\n assert len(rngnames) == len(rncschemas)\n for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):\n assert rnc.purebasename == rng.purebasename\n\n\n\"\"\"RNG section ========================\n\"\"\"\n\n\[email protected]\ndef rngschemas(rootrnc, tmpdir, rncschemas):\n cmd = [\"pytrang\"]\n rngname = rootrnc.new(dirname=tmpdir, ext=\".rng\")\n cmd.append(rootrnc.strpath)\n cmd.append(rngname.strpath)\n subprocess.check_call(cmd)\n rngnames = tmpdir.listdir(fil=\"*.rng\")\n assert len(rngnames) == len(rncschemas)\n for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):\n assert rnc.purebasename == rng.purebasename\n\n return rngnames\n\n\[email protected]\ndef rootrng(rngschemas):\n rootschema = rngschemas[0].new(basename=\"root.rng\")\n assert rootschema in rngschemas\n rootschema.ensure()\n return rootschema\n\n\ndef test_validate_by_rng_onemsg(rootrng, messages):\n cmd = [\"pyjing\"]\n cmd.append(rootrng.strpath)\n cmd.append(messages[0].strpath)\n subprocess.check_call(cmd)\n\n\ndef test_validate_by_rng_allmsgs(rootrng, messages):\n cmd = [\"pyjing\"]\n cmd.append(rootrng.strpath)\n cmd.extend(map(str, messages))\n subprocess.check_call(cmd)\n"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7109573483467102,
"avg_line_length": 38.054054260253906,
"blob_id": "ab39ad202197799de4131fe8bd914ca666df4f37",
"content_id": "68c1d8dd1456479895733295a3fe63cde9e03293",
"detected_licenses": [
"BSD-3-Clause",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 4335,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 111,
"path": "/README.rst",
"repo_name": "eugenehp/jingtrang",
"src_encoding": "UTF-8",
"text": "=====================================================================\njingtrang (compact and XML) RELAX NG schema validator and transformer\n=====================================================================\n\n`jingtrang` is wrapper of Java based command line tools for working with RELAX NG schemas.\n\nMotivation\n==========\n\nRELAX NG and especially compact form of describing XML structures is very efficient.\n\nUnfortunately, with Python, support is rather limited:\n\n- lxml allows validation of XML based schemas only\n- lxml is based on libxml2 library, which has some minor limitations with regards to RELAX NG validation (in some cases you find error messages like \"TODO\").\n- package rnc2rng is promising conversion from XML to compact form, but is not really usable (it simply does not work).\n\nIn general, it is even not very easy finding command line validator for compact RELAX NG syntax.\n\nRNV is very promising, but version on Sourceforge is rather old and version on GitHub does not have up todate installation instructions.\n\nOther RELAX NG related tools mostly ignore compact syntax.\n\nThe only exception to this is jingtrang project hosted on googlecode.\n\nProblem with this tool is, that it takes few steps more then is really convenient to have it easily installed for daily use from console.\n\nAs our team is working on Linux as well as on MS Windows, I was looking for cross platform command line solution.\n\nAs jingtrang commands (jing and trang) seem to be functioning very well, I have decided to write this jing and trang wrapper.\n\nDelivering (py)jing and (py)trang command line tool\n===================================================\n\nOriginal command line tools are named jing (validator) and trang (transforming schemas).\n\nTo prevent naming conflict, prefix `py` is used.\n\nCommand line interface is exactly the same, as if using it with java interpreter, only introductory \"java -jar <jarfile.jar>\" part is not necessary to call.\n\nHere are described only most popular use cases, for more options, consult original jingtrang documentation (download from googlecode or elsewhere and see included html doc).\n\nInstallation\n============\n\nPrerequisites are:\n\n- Python (2.7 or 3.4 and higher)\n- Java\n- pip\n\nInstall it by::\n\n $ pip install jingtrang\n\nAfter that, two new scripts are installed:\n\n- pyjing - RELAX NG validator\n- pytrang - utility for transforming between XML/compact syntax/XSD/few more formats\n\n.. note:: There is no need to install `jing.jar` and `trang.jar` files as they are already included in `jintrang` Python package.\n\n\npyjing - RELAX NG validator (XML as well as compact syntax)\n-----------------------------------------------------------\n\n`pyjing` serves for validating XML documents against XML as well as compact syntax RELAX NG schemas:\n\n $ pyjing\n Jing version 20091111\n usage: java com.thaiopensource.relaxng.util.Driver [-i] [-c] [-s] [-t] [-C catalogFile] [-e encoding] RNGFile XMLFile...\n RELAX NG is a schema language for XML\n See http://relaxng.org/ for more information.\n\nTo validate XML using XML syntax RELAX NG schema::\n\n $ pyjing schema.rng file.xml\n\nTo validate using compact syntax schema, use `-c` switch::\n\n $ pyjing -c schema.rnc file.xml\n\nValidation of multiple XML files at once is possible::\n\n $ pyjing schema.rnc samples/*.xml\n\npytrang - Schema format convertor\n---------------------------------\n\n`pytrang` is \"schema language translator\" supporting not only RELAX NG XML and\ncompact syntax, but also DTD, XSD. It even allows generating initial schema\nbased on sample XML document.\n\nTry to run it::\n\n $ pytrang\n fatal: at least two arguments are required\n Trang version 20091111\n usage: java com.thaiopensource.relaxng.translate.Driver [-C catalogFileOrUri] [-I rng|rnc|dtd|xml] [-O rng|rnc|dtd|xsd] [-i input-param] [-o output-param] inputFileOrUri ... outputFile\n\n`pytrang` is able auto-detect format from file extension, so you can mostly directly convert without specifying explicitly, what input and output formats are to be used.\n\nConverting compact syntax to XML one can be done by::\n\n $ pytrang root.rnc root.rng\n\n.. note:: If you use `include` in your schema, all included schemas will be converted too.\n\nTo generate initial RELAX NG schema in compact format from sample XML file, try::\n\n $ pytrang sample.xml initial.rnc\n"
},
{
"alpha_fraction": 0.6358024477958679,
"alphanum_fraction": 0.6975308656692505,
"avg_line_length": 15.199999809265137,
"blob_id": "e1b2502bbd488e81ff2c46b79859d982277c5dfa",
"content_id": "7f363292d991b2c6b8df76789c6e5aa119f5c849",
"detected_licenses": [
"BSD-3-Clause",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 162,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 10,
"path": "/tox.ini",
"repo_name": "eugenehp/jingtrang",
"src_encoding": "UTF-8",
"text": "[tox]\nisolated_build = true\nenvlist = py27,py34,py35,py36,py37\n\n[testenv]\nwhitelist_externals =\n poetry\ncommands =\n poetry install -v\n pytest -sv tests/\n"
},
{
"alpha_fraction": 0.46846845746040344,
"alphanum_fraction": 0.6081081032752991,
"avg_line_length": 16.076923370361328,
"blob_id": "20847dc71ba9112687a4c729602b95cbb1467bff",
"content_id": "a2a7a7176ba6a68b4d5d074e8fc4ba5c81ae7d16",
"detected_licenses": [
"BSD-3-Clause",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 444,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 26,
"path": "/NEWS.rst",
"repo_name": "eugenehp/jingtrang",
"src_encoding": "UTF-8",
"text": "News\n====\n\n0.1.2 - 2019-04-23\n-------------------\n\n- rewritten from pbr to poetry\n- fixed README.rst\n- fixed tests for py27, py34, py35, py36\n\n\n0.1.1 - 2015-07-27\n------------------\n\n- tox.ini tests for py27 and py36\n- added LICENSE.rst\n- setup.py rewritten to pbr\n\n0.1dev - 2014-08-23\n-------------------\n\n- initial version\n- includes py.test and tox test suite\n- included jar files for:\n 1. Jing version 20091111\n 2. Trang version 20091111\n"
}
] | 6 |
Suyongzhi1997/ArticleSpider
|
https://github.com/Suyongzhi1997/ArticleSpider
|
a616d53e0bd4825800afa6b7e7d501756e314593
|
cd47398a9c90dc85dd663dc8f8e085769928518e
|
8bb5b0a4a49ced59fbe9cd39d95d520fff621c58
|
refs/heads/master
| 2022-11-06T09:59:05.533216 | 2020-06-23T11:09:34 | 2020-06-23T11:09:34 | 273,649,314 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6744966506958008,
"alphanum_fraction": 0.6767337918281555,
"avg_line_length": 23.83333396911621,
"blob_id": "f4ef73ca26f8b2fe29c68cc9ed784c171bd0a2c3",
"content_id": "8b95e95ffd9d9b9c94c5ecf1e960f9d2b257bc48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 36,
"path": "/ArticleSpider/items.py",
"repo_name": "Suyongzhi1997/ArticleSpider",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport scrapy\nimport datetime\nfrom scrapy.loader.processors import MapCompose, TakeFirst\nfrom scrapy.loader import ItemLoader\n\n\nclass ArticlespiderItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\n\ndef date_convert(value):\n create_time = value.split(' ')[0]\n try:\n create_time = datetime.datetime.strptime(create_time, '%Y-%m-%d').date()\n except Exception as e:\n create_time = datetime.datetime.now().date()\n return create_time\n\n\nclass ArticleItemLoader(ItemLoader):\n default_output_processor = TakeFirst()\n\n\nclass ArticleItem(scrapy.Item):\n title = scrapy.Field()\n url = scrapy.Field()\n url_object_id = scrapy.Field()\n create_time = scrapy.Field(\n input_processor=MapCompose(date_convert)\n )\n content = scrapy.Field()\n # front_image_path = scrapy.Field()\n"
},
{
"alpha_fraction": 0.8780487775802612,
"alphanum_fraction": 0.8780487775802612,
"avg_line_length": 19.5,
"blob_id": "0ec47981f676bbe860b3614eb334463437d5e237",
"content_id": "bbe1ee50fc7747b210807b150071dc2be06dfd9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 63,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Suyongzhi1997/ArticleSpider",
"src_encoding": "UTF-8",
"text": "# ArticleSpider\nScrapy+django搭建分布式爬虫搜索引擎\n"
},
{
"alpha_fraction": 0.5891164541244507,
"alphanum_fraction": 0.5924793481826782,
"avg_line_length": 27.946903228759766,
"blob_id": "7ae80a1b75722bd894d31bad4bc7d20dad5e8cda",
"content_id": "1c34f1b16d92ed916437de7e647a6e5b830486c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3351,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 113,
"path": "/ArticleSpider/pipelines.py",
"repo_name": "Suyongzhi1997/ArticleSpider",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport codecs\nimport json\nimport MySQLdb\nimport MySQLdb.cursors\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom scrapy.exporters import JsonItemExporter\nfrom twisted.enterprise import adbapi\n\n\nclass ArticlespiderPipeline:\n def process_item(self, item, spider):\n return item\n\n\nclass JsonWithEncodingPipeline(object):\n \"\"\"\n 自定义json文件导出\n \"\"\"\n\n def __init__(self):\n self.file = codecs.open('article.json', 'w', encoding='utf-8')\n\n def process_item(self, item, spider):\n lines = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.file.write(lines)\n return item\n\n def spider_closed(self, spider):\n self.file.close()\n\n\nclass MysqlPipeline(object):\n def __init__(self):\n self.conn = MySQLdb.connect('localhost', 'root', '123456', 'article_spider', charset='utf8', use_unicode=True)\n self.cur = self.conn.cursor()\n\n def process_item(self, item, spider):\n insert_sql = \"\"\"\n insert into articles(title, create_time, content, url, url_object_id)\n values (%s, %s, %s, %s, %s)\n \"\"\"\n self.cur.execute(insert_sql,\n (item['title'], item['create_time'], item['content'], item['url'], item['url_object_id']))\n self.conn.commit()\n\n\nclass MysqlTwistedPipline(object):\n def __init__(self, dbpool):\n self.dbpool = dbpool\n\n @classmethod\n def from_settings(cls, settings):\n dbparms = dict(\n host=settings['MYSQL_HOST'],\n db=settings['MYSQL_DBNAME'],\n user=settings['MYSQL_USER'],\n password=settings['MYSQL_PASSWORD'],\n charset='utf8',\n cursorclass=MySQLdb.cursors.DictCursor,\n use_unicode=True\n )\n\n dbpool = adbapi.ConnectionPool('MySQLdb', **dbparms)\n return cls(dbpool)\n\n def process_item(self, item, spider):\n \"\"\"\n 使用twisted将mysql插入变成异步执行\n \"\"\"\n query = self.dbpool.runInteraction(self.do_insert, item)\n query.addErrback(self.handle_error) # 处理异常\n\n def handle_error(self, failure):\n \"\"\"\n 处理异步插入的异常\n \"\"\"\n print(failure)\n\n def do_insert(self, cur, item):\n insert_sql = \"\"\"\n insert into articles(title, create_time, content, url, url_object_id)\n values (%s, %s, %s, %s, %s)\n \"\"\"\n cur.execute(insert_sql,\n (item['title'], item['create_time'], item['content'], item['url'], item['url_object_id']))\n\n\nclass JsonExportPipeline(object):\n \"\"\"\n 调用scrapy提供的json export导出json文件\n \"\"\"\n\n def __init__(self):\n self.file = open('articleexport.json', 'wb')\n self.exporter = JsonItemExporter(self.file, encoding='utf-8', ensure_ascii=False)\n self.exporter.start_exporting()\n\n def close_spider(self, spider):\n self.exporter.finish_exporting()\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item\n\n\nclass ArticleImagePipline(ImagesPipeline):\n def item_completed(self, results, item, info):\n for ok, value in results:\n image_file_path = value['path']\n item['front_image_path'] = image_file_path\n return item\n"
},
{
"alpha_fraction": 0.6187015771865845,
"alphanum_fraction": 0.623062014579773,
"avg_line_length": 42.914894104003906,
"blob_id": "a9a18ac801b48b783ae2139c8e5d1059f88703a0",
"content_id": "32802ca5dfd343dc94973b4e9b2f71eb1b131218",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2096,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 47,
"path": "/ArticleSpider/spiders/jobole.py",
"repo_name": "Suyongzhi1997/ArticleSpider",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport datetime\nfrom scrapy import Request\nfrom scrapy.loader import ItemLoader\n\nfrom ArticleSpider.items import ArticleItem, ArticleItemLoader\n\nfrom ArticleSpider.utils.common import get_md5\n\n\nclass JoboleSpider(scrapy.Spider):\n name = 'jobole'\n allowed_domains = ['blog.jobbole.com']\n start_urls = ['http://blog.jobbole.com/kaifadou/snews-getajax.php?next=%d' % i for i in range(1, 2)]\n\n def parse(self, response):\n article_urls = response.xpath('//a/@href').extract()\n for article_url in article_urls:\n article_url = 'http://blog.jobbole.com' + article_url\n yield Request(url=article_url, callback=self.detail_parse)\n\n def detail_parse(self, response):\n # item = ArticleItem()\n # item['url'] = response.url\n # item['url_object_id'] = get_md5(response.url)\n # item['title'] = response.xpath('//div[@class=\"main_left\"]//h2/text()').extract_first() # 新闻标题\n # create_time = response.xpath('//div[@class=\"meta\"]/span/text()').extract_first() # 发布时间\n # create_time = create_time.split(' ')[0]\n # try:\n # create_time = datetime.datetime.strptime(create_time, '%Y-%m-%d').date()\n # except Exception as e:\n # create_time = datetime.datetime.now().date()\n # item['create_time'] = create_time\n # item['content'] = response.xpath('//div[@class=\"wen_article\"]').extract_first() # 文章正文\n\n # 通过item loader 加载item\n item_loader = ArticleItemLoader(item=ArticleItem(), response=response)\n item_loader.add_xpath('title', '//div[@class=\"main_left\"]//h2/text()')\n item_loader.add_xpath('content', '//div[@class=\"wen_article\"]')\n item_loader.add_value('url', response.url)\n item_loader.add_value('url_object_id', get_md5(response.url))\n item_loader.add_xpath('create_time', '//div[@class=\"meta\"]/span/text()')\n article_loader = item_loader.load_item()\n yield article_loader\n # return item_loader.load_item()\n # yield item\n"
}
] | 4 |
jscanlon77/python-calc-engine
|
https://github.com/jscanlon77/python-calc-engine
|
cc0ebbfe44938ac57bd06e0e49125dc159d96810
|
adc3283040f9d59af4ebbb75fc3c0d5eaf6c1b91
|
c849dc29a8cb10ace61f2c4245d575c41b32af53
|
refs/heads/master
| 2022-11-05T13:29:02.025479 | 2020-07-03T10:11:02 | 2020-07-03T10:11:02 | 276,868,743 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7138643264770508,
"alphanum_fraction": 0.7177974581718445,
"avg_line_length": 27.661972045898438,
"blob_id": "13048cfa8e9fb76d0dada1a1d87008179a4a2c9e",
"content_id": "02f5d66b60db795f9f1ee7ee3e3d94575209eeea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2034,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 71,
"path": "/npv.py",
"repo_name": "jscanlon77/python-calc-engine",
"src_encoding": "UTF-8",
"text": "import pyodbc\nimport pandas as pd\nimport numpy as np\nimport itertools\nfrom itertools import islice\nfrom sqlalchemy import create_engine, event\nfrom urllib.parse import quote_plus\n\n\ndef createConnection():\n conn = pyodbc.connect('Driver={SQL Server};'\n 'Server=LOCALHOST;'\n 'Database=InvestorAnalytics_TEST;'\n 'Trusted_Connection=yes;')\n return conn\n\n\nconn = createConnection()\n\n\n# This brings back all the positions in order\n# now pop it in a grouped data frame so that we can calculate the npv\npositions_df = pd.read_sql_query('EXEC GetPositions',conn)\n\n\n\n#now group the data frame\ngrouped_positions_df = positions_df.groupby(['Equity_ID', 'Institution_ID'])\n\n# now loop over each group and then get all the positions so that we can calculate npv\n# provide a discount rate as well\n\ndiscount = 0.281\n\n\nequityIdList = []\ninstitutionIdList = []\nnpvList = []\n\nfor name, group in grouped_positions_df:\n #Now calculate the cashflows and write out the equity, institution and date, cashflow value\n # and add to the dictionary\n\n npv = np.npv(discount, group['Position'])\n equityIdList.append(name[0])\n institutionIdList.append(name[1])\n npvList.append(npv)\n\n # now create the array to calculate xirr\n\ndf = pd.DataFrame({'Equity_ID': equityIdList, 'Institution_ID': institutionIdList, 'NPV': npvList}) \ndel equityIdList, institutionIdList, npvList\n\n\n\n#Now write back to the sql in a fast way\nalchemyConn = \"DRIVER={ODBC Driver 17 for SQL Server};SERVER=LOCALHOST;DATABASE=InvestorAnalytics_TEST;Trusted_Connection=yes;\"\nquoted = quote_plus(alchemyConn)\nnew_con = 'mssql+pyodbc:///?odbc_connect={}'.format(quoted)\nengine = create_engine(new_con)\n\n\[email protected]_for(engine, 'before_cursor_execute')\ndef receive_before_cursor_execute(conn, cursor, statement, params, context, executemany):\n if executemany:\n cursor.fast_executemany = True\n\n\n\ndf.to_sql('CalculatedNPV', engine, index=False, schema=\"dbo\", if_exists = \"replace\")\nconn.close()"
}
] | 1 |
leelening/MulVAL-to-pddl
|
https://github.com/leelening/MulVAL-to-pddl
|
a36db064e6b4ca535a124d5a49d04568e4fd1b68
|
a60bfaea90ca0ea8ba925340b8bb3063f91b4c0d
|
47cde64ddd8f194d7e91b38d17f0f4f16e34c738
|
refs/heads/master
| 2022-12-03T11:11:32.724448 | 2020-08-11T15:51:47 | 2020-08-11T15:51:47 | 280,418,908 | 3 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5680146813392639,
"alphanum_fraction": 0.5726103186607361,
"avg_line_length": 25.536584854125977,
"blob_id": "7280c01334dccce7d8fc0bb4e9c57c957027724c",
"content_id": "eb7c82816fda2ebf499bdbde9913c1fa90cfa49b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1088,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 41,
"path": "/create_prob.py",
"repo_name": "leelening/MulVAL-to-pddl",
"src_encoding": "UTF-8",
"text": "from common import *\nimport pandas\nimport sys\n\nif __name__ == '__main__':\n lines = list()\n lines.append('(define(problem pb1)\\n')\n lines.append(' (:domain attack_graph)\\n')\n lines.append('(:objects s)\\n\\n')\n\n vertices_file = sys.argv[1]\n\n vertices = pandas.read_csv(vertices_file, header=None)\n classified_vertices = {'AND': set(), 'OR': set(), 'LEAF': set()}\n\n # classify the nodes\n for v in vertices.values:\n type = v[2]\n classified_vertices[type].add(tuple(v))\n\n temp = str()\n for v in classified_vertices['LEAF']:\n temp += '(' + format_string(v[1]) + ' s)\\n'\n\n lines.append('(:init\\n')\n lines.append(temp)\n lines.append('\\n)\\n\\n')\n\n # in order to have all the transitions, we need all OR nodes to be true\n lines.append('(:goal (and\\n')\n temp = str()\n for v in classified_vertices['OR']:\n temp += '(' + format_string(v[1]) + ' s)\\n'\n lines.append(temp)\n lines.append(')\\n')\n lines.append(')\\n\\n')\n\n lines.append(')\\n')\n\n with open(\"problem.pddl\", 'w+') as f:\n f.writelines(lines)\n"
},
{
"alpha_fraction": 0.5807985067367554,
"alphanum_fraction": 0.5826995968818665,
"avg_line_length": 29.05714225769043,
"blob_id": "93468bfecb825920e45cfd705395efa2ef2ee6e7",
"content_id": "3b09ccf0634c667471d85f4b6fab1f7fb9aadcb6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1052,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 35,
"path": "/common.py",
"repo_name": "leelening/MulVAL-to-pddl",
"src_encoding": "UTF-8",
"text": "def find_predicate(vertices, index):\n string = vertices.values[index - 1, 1]\n\n return format_string(string)\n\n\ndef format_string(string):\n string = string.replace(' ', '_')\n string = string.replace('(', '_')\n string = string.replace(')', '_')\n string = string.replace('-', '_')\n string = string.replace('\\'', '_')\n string = string.replace(',', '_')\n string = string.replace('.', '_')\n string = string.replace('__', '_')\n string = string.replace('__', '_')\n string = string.rstrip('_')\n string = string.lower()\n return string\n\n\ndef predecessor(index, arcs, vertices):\n predecessors = str()\n for (source, target) in arcs:\n if source == index:\n predecessors += ' (' + format_string(find_predicate(vertices, target)) + ' ?x)\\n'\n return predecessors\n\n\ndef successor(index, arcs, vertices):\n successors = str()\n for (source, target) in arcs:\n if target == index:\n successors += ' (' + format_string(find_predicate(vertices, source)) + ' ?x)\\n'\n return successors\n"
},
{
"alpha_fraction": 0.5780932903289795,
"alphanum_fraction": 0.5846856236457825,
"avg_line_length": 31.327869415283203,
"blob_id": "ee2303ff547aca9b50e570a8487e44caf3987714",
"content_id": "d4b8938b90b0010df2e64e5b77dc4683fd65299c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1972,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 61,
"path": "/create_domain.py",
"repo_name": "leelening/MulVAL-to-pddl",
"src_encoding": "UTF-8",
"text": "import pandas\nfrom common import *\nimport collections\nimport sys\n\n\nif __name__ == '__main__':\n\n lines = list()\n lines.append('(define(domain attack_graph)\\n')\n lines.append(' (:requirements :strips)\\n\\n')\n\n vertices_file = sys.argv[1]\n arcs_file = sys.argv[2]\n\n vertices = pandas.read_csv(vertices_file, header=None)\n arcs = pandas.read_csv(arcs_file, header=None)\n arcs = arcs.iloc[:, 0:2].values.tolist()\n arcs = list(tuple(x) for x in arcs)\n\n classified_vertices = {'AND': set(), 'OR': set(), 'LEAF': set()}\n\n # classify the nodes\n for v in vertices.values:\n type = v[2]\n classified_vertices[type].add(tuple(v))\n\n predicates = set('(' + format_string(v[1]) + ' ?x)\\n' for v in vertices.values if v[2] == 'OR' or v[2] == 'LEAF')\n\n lines.append('(:predicates\\n')\n lines.append(' '.join(predicates) + '\\n')\n lines.append(')\\n\\n')\n\n # collection all actions\n actions = dict()\n for v in classified_vertices['AND']:\n actions[v] = dict()\n index = v[0]\n actions[v]['pre'] = predecessor(index, arcs, vertices)\n actions[v]['post'] = successor(index, arcs, vertices)\n\n repeated_actions = [item for item, count in collections.Counter(actions.keys()).items() if count > 1]\n single_actions = [item for item, count in collections.Counter(actions.keys()).items() if count == 1]\n\n for a in single_actions:\n lines.append('(:action ' + format_string(str((a[1] +'_' + str(a[0])))) + '\\n')\n lines.append(':parameters (?' + 'x' + ')\\n')\n lines.append(':precondition (and\\n')\n lines.append(actions[a]['pre'] + ')\\n')\n lines.append(':effect (and\\n')\n lines.append(actions[a]['post'] + ')\\n')\n lines.append(')\\n\\n')\n\n lines.append(')\\n')\n\n with open(\"domain.pddl\", 'w+') as f:\n f.writelines(lines)\n\n lines = list()\n lines.append('(define(domain attack-graph)\\n')\n lines.append(' (: requirements:strips)\\n\\n')\n"
},
{
"alpha_fraction": 0.7141577005386353,
"alphanum_fraction": 0.731630802154541,
"avg_line_length": 32.26865768432617,
"blob_id": "357bc04ecf2694883f10dced4cb49b4fa57f58f4",
"content_id": "9290e4aee8f903e97ce99c32edff17e79f6c542d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2234,
"license_type": "permissive",
"max_line_length": 203,
"num_lines": 67,
"path": "/README.md",
"repo_name": "leelening/MulVAL-to-pddl",
"src_encoding": "UTF-8",
"text": "# MULVALTOPDDL\n\nThis is a small tool I develop to convert a logic attack graph to PDDL files describing the attacking procedure.\n\n## Getting Started\n\nThese instructions will get you a copy of the project up and running on your local machine for development and testing purposes. See deployment for notes on how to deploy the project on a live system.\n\n### Prerequisites\n\n#### MulVAL\n* Visit http://people.cs.ksu.edu/~xou/mulval/ to download the latest MulVAL.\n\n#### Python 3.x\n\n* Visit https://www.python.org/downloads/ to download the latest suitable Python 3.x version according to your OS (we used Python 3.5 on Ubuntu 16.04)\n\n#### PDDL\nVisit http://editor.planning.domains/ to solve the problem. Once you have **domain.pddl** and **problem.pddl**\n\n### Installing\n\nThe following Python packages are required:\n\n1. pandas\n \nYou can run the following command to install the package.\n```\npip install pandas\n```\n\nYou can download the tool using the following command. \n\n```\ngit clone https://github.com/leelening/MulVALTOPDDL.git\n```\n\n## Running the example\n\nWe provide a running example in the package. You can find AttackGraph.dot, ARCS.CSV, and VERTICES.CSV in the package. These files are generated by the [MulVAL](http://people.cs.ksu.edu/~xou/mulval/).\n\nYou can create a domain file **domain.pddl** by running the following command.\n```\npython create_domain.py ./example/VERTICES.CSV ./example/ARCS.CSV\n```\n\nThen you can create a problem file **problem.pddl** by running the following command.\n```\npython create_prob.py ./example/VERTICES.CSV\n```\n\nOnce you get these two files: **domain.pddl** and **problem.pddl**. You can go to [PDDL Editor](http://editor.planning.domains/) to solve your planning problem. This online editor has many good features.\n\n## Authors\n\n* **Lening Li** - *Initial work* - [leelening](https://github.com/leelening)\n\n## License\n\nThis project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details\n\n## Acknowledgments\n\n* [MulVAL](http://people.cs.ksu.edu/~xou/mulval/)\n* [PDDL Editor](http://editor.planning.domains/)\n* The tool is inspired by:\n 1. B, T. G., Puzis, R., & Shapira, B. (2017). Scalable Attack Path Finding. 234–249. https://doi.org/10.1007/978-3-319-60080-2\n\n\n\n"
}
] | 4 |
bighandsam/Thinkful-psycopg2
|
https://github.com/bighandsam/Thinkful-psycopg2
|
3ef02847003d4a8fcc1065a171da5f099dd902f5
|
8a3d5a23362517effcf25e60c0e5c79ca59fed56
|
f11a6aa91b89feab380083b154d94633e603c230
|
refs/heads/master
| 2020-01-03T07:13:23.398862 | 2014-09-30T01:05:48 | 2014-09-30T01:05:48 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6935662031173706,
"alphanum_fraction": 0.6957771182060242,
"avg_line_length": 55.54999923706055,
"blob_id": "5ef72742a616a21b3cfc8a8f33f669e2334aabac",
"content_id": "4aa8d5183071288db3be635b15a0efc09ca09b1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4523,
"license_type": "no_license",
"max_line_length": 299,
"num_lines": 80,
"path": "/test.py",
"repo_name": "bighandsam/Thinkful-psycopg2",
"src_encoding": "UTF-8",
"text": "import csv\nimport psycopg2\n\ndef main(csv_file):\n\t'''Assumption: for every entry in the CSV file, if a breed is provided, then a species is mandatory'''\n\n\twith open(csv_file) as f_obj:\n\n\t\t# Establishes connection to the database and initialize a cursor\n\t\tconn = psycopg2.connect(\"dbname='pets' user='Administrator' host='localhost' password='sam'\")\n\t\tcur = conn.cursor()\n\n\t\t# skipinitialspace: when True, whitespace immediately following the delimiter is ignored. The default is False.\n\t\treader = csv.DictReader(f_obj, skipinitialspace = True)\n\t\t\n\t\t# Keeps track of which pet is being processed\n\t\tcounter = 1\n\n\t\tfor line in reader:\n\n\t\t\t# Inserts pet (only its name) into the \"pet\" table and returns its corresponding ID\n\t\t\tcur.execute(\"INSERT INTO pet(name) VALUES (%(Name)s) RETURNING id\", line)\n\t\t\tcurrent_pet_id = cur.fetchone()\n\n\t\t\t# Informs the user which pet is being processed\n\t\t\tprint \"(\" + str(counter) + \")\" + \"Processing pet: {}\".format(line[\"Name\"].capitalize())\n\n\t\t\t# If an \"age\" entry is supplied in the CSV pet line, then modify the pet record to include its age\n\t\t\tif line[\"age\"]:\n\t\t\t\tcur.execute(\"UPDATE pet SET age=%(age)s WHERE id=%(id)s\", {'age': line['age'], 'id': current_pet_id})\n\n\t\t\t# If a \"species\" entry is supplied in the CSV pet line\n\t\t\tif line[\"species name\"]:\n\t\t\t\t# Queries the \"species\" table for the given species and selects EITHER (1) a SPECIES ID or (2) nothing/Null\n\t\t\t\tcur.execute(\"SELECT id FROM species WHERE UPPER(name)=UPPER(%(species name)s)\", line)\n\t\t\t\t# If the species doesn't exist, then insert a new row into the \"species\" table\n\t\t\t\tif not cur.fetchone():\n\t\t\t\t\tcur.execute(\"INSERT INTO species(name) VALUES(INITCAP(%(species name)s))\", line) # INITCAP() will capitalize the word before inserting it into the table\n\t\t\t\t\tprint \"*Added {} into the SPECIES table\".format(line[\"species name\"].capitalize()) # This line will capitalize the species name and display it to the user\n\n\t\t\t# If a \"breed\" entry is supplied in the CSV pet line\n\t\t\tif line[\"breed name\"]:\n\t\t\t\t# Queries the \"breed\" table for the given breed/species and selects EITHER (1) a BREED ID or (2) nothing/Null\n\t\t\t\tcur.execute(\"SELECT id FROM breed WHERE UPPER(name)=UPPER(%(breed name)s) AND species_id=(SELECT id FROM species WHERE UPPER(name)=UPPER(%(species name)s))\", line)\n\t\t\t\t# If the breed/species combination doesn't exist, then insert a new row into the \"breed\" table\n\t\t\t\tif not cur.fetchone():\n\t\t\t\t\tcur.execute(\"INSERT INTO breed(name, species_id) VALUES(INITCAP(%(breed name)s), (SELECT id FROM species WHERE UPPER(name)=UPPER(%(species name)s)))\", line)\n\t\t\t\t\tprint \"*Added {} (of species '{}') into the BREED table\".format(line[\"breed name\"].capitalize(), line[\"species name\"].capitalize())\n\t\t\t\t# Modifies the pet record to include its BREED ID\n\t\t\t\tcur.execute(\"UPDATE pet SET breed_id=(SELECT id FROM breed WHERE UPPER(name)=UPPER(%(breed name)s) AND species_id=(SELECT id FROM species WHERE UPPER(name)=UPPER(%(species name)s))) WHERE id=%(id)s\", {'breed name': line['breed name'], 'species name': line['species name'], 'id': current_pet_id})\n\n\t\t\t# If a \"shelter\" entry is supplied in the CSV pet line\n\t\t\tif line[\"shelter name\"]:\n\t\t\t\t# Queries the \"shelter\" table for the given shelter and selects EITHER (1) a SHELTER ID of (2) nothing/Null\n\t\t\t\tcur.execute(\"SELECT id FROM shelter WHERE UPPER(name)=UPPER(%(shelter name)s)\", line)\n\t\t\t\t# If the shelter doesn't exist, then insert a new row into the \"shelter\" table\n\t\t\t\tif not cur.fetchone():\n\t\t\t\t\tcur.execute(\"INSERT INTO shelter(name) VALUES(INITCAP(%(shelter name)s))\", line)\n\t\t\t\t\tprint \"*Added {} into the SHELTER table\".format(line[\"shelter name\"].capitalize())\n\t\t\t\t# Modifies the pet record to include its SPECIES ID\n\t\t\t\tcur.execute(\"UPDATE pet SET shelter_id=(select id from shelter where UPPER(name)=UPPER(%(shelter name)s)) WHERE id=%(id)s\", {'shelter name': line['shelter name'], 'id': current_pet_id})\n\n\t\t\t# If an \"adopted\" entry is supplied in the CSV pet line, then modify the pet record to include its adopted status\n\t\t\tif line[\"adopted\"]:\n\t\t\t\tcur.execute(\"UPDATE pet SET adopted=%(adopted)s WHERE id=%(id)s\", {'adopted': line['adopted'], 'id': current_pet_id})\n\n\t\t\t# Commits all the SQL queries\n\t\t\tconn.commit()\n\n\t\t\t# Informs the user that the processing for the current pet has finished\n\t\t\tprint \"(\" + str(counter) + \")\" + \"Finished pet processing\"\n\t\t\tcounter += 1\n\n\t\t# Closes open connection to the database\n\t\tcur.close()\n\t\tconn.close()\n\nif __name__ == \"__main__\":\n\tcsv_file = \"csvfile.csv\" # Substitute your own CSV file\n\tmain(csv_file)"
}
] | 1 |
ilxijwd/kursachbot
|
https://github.com/ilxijwd/kursachbot
|
45636807a659340f9ab8e73e46c4896d56dda8a4
|
b2bd5641164458aeefae905c279dcbfaf90f66dc
|
e0e7844121e859feaef1abdfa3875c505b8f6fc8
|
refs/heads/main
| 2023-04-21T17:44:23.034912 | 2021-04-22T08:07:47 | 2021-04-22T08:07:47 | 359,431,960 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5175257921218872,
"alphanum_fraction": 0.5216494798660278,
"avg_line_length": 40.57143020629883,
"blob_id": "d78bf11caef3fd565bea39e423a6bdb2d3f5dcec",
"content_id": "3c16858158de532c9608c94073564b5611fe9bac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1536,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 35,
"path": "/src/scheduler/tasks/meeting_notify.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import schedule\nfrom datetime import datetime\n\nfrom src.db import session, MeetingWithHead\nfrom src.bot import tb\n\n\ndef meeting_notify():\n today = datetime.today()\n meetings_with_head = session.query(MeetingWithHead).all()\n for meeting_with_head in meetings_with_head:\n faculty = meeting_with_head.head.faculty\n practice_term_start_date = faculty.practice_term.start_date.strptime(\"%Y-%m-%d\")\n days_left_to_practice_start = (practice_term_start_date - today).days\n\n if 0 < days_left_to_practice_start <= 7:\n for group in faculty.groups:\n for student in group.students:\n if student.person.telegram_id:\n tb.send_message(\n student.person.telegram_id,\n f\"Зустріч з керівником практики запланована на:\\n\\n\"\n f\"*Дата*\\n\"\n f\"{meeting_with_head.meeting.date}\\n\"\n f\"*Час початку*\\n\"\n f\"{meeting_with_head.meeting.start_time}\\n\"\n f\"*Час завершення*\\n\"\n f\"{meeting_with_head.meeting.end_time}\\n\"\n f\"*Місце проведення*\\n\"\n f\"{meeting_with_head.meeting.place}\",\n parse_mode='Markdown',\n )\n\n\nschedule.every().day.at(\"08:00\").do(meeting_notify)\n"
},
{
"alpha_fraction": 0.8069307208061218,
"alphanum_fraction": 0.8069307208061218,
"avg_line_length": 66.33333587646484,
"blob_id": "5969b748f8aa27f1f72088d5c91da6a73238cdcd",
"content_id": "3688bac2babc855dabcd654a54f53e7ea5f1879f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 202,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 3,
"path": "/src/bot/listeners/__init__.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from .commands import start, head, debt, inform, places, schedule\nfrom .message_handlers import get_user_fullname, get_place_number, get_schedule_file\nfrom .callback_query_handlers import user_identity\n"
},
{
"alpha_fraction": 0.7084639668464661,
"alphanum_fraction": 0.7084639668464661,
"avg_line_length": 44.57143020629883,
"blob_id": "a5d41daeb17d3a490b05cc0266dbcd3d64b38aca",
"content_id": "15afbdebd68328d868486b6221d4a6ae281b60d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 413,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 7,
"path": "/src/bot/inline_keyboards/identity.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\n\n\nidentity_markup = InlineKeyboardMarkup([\n [InlineKeyboardButton(text='👩🎓 Студент 👨🎓', callback_data='identity_markup=👩🎓 Студент 👨🎓')],\n [InlineKeyboardButton(text='👩🏫 Керівник 👨🏫', callback_data='identity_markup=👩🏫 Керівник 👨🏫')],\n])\n"
},
{
"alpha_fraction": 0.5875617265701294,
"alphanum_fraction": 0.6026043891906738,
"avg_line_length": 30.36619758605957,
"blob_id": "d8382043b0bb3fc182089d44d397975b988eb602",
"content_id": "ca1671dc8ad7967c0545b06feccd14f6cc51de9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4609,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 142,
"path": "/src/db/__init__.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from .models import session, Faculty, Specialty, Group, PlaceOfPractice, Person, Student, Head, Schedule, Meeting, MeetingWithHead, ScheduleMeeting, PracticeTerm, ExamMeeting\n\n\ndef initiate():\n faculties = [\n Faculty(name=\"Інфокомунікацій та програмної інженерії\", short=\"ІКПІ\"),\n ]\n\n practice_terms = [\n PracticeTerm(faculty=faculties[0], start_date=\"2021-06-24\", end_date=\"2021-07-18\")\n ]\n\n specialities = [\n Specialty(name=\"Комп'ютерні науки\", short=\"КН\"),\n ]\n\n groups = [\n Group(course=3, subgroup=1, specialty=specialities[0], faculty=faculties[0]),\n ]\n\n place_of_practices = [\n PlaceOfPractice(name=\"Державний університет інтелектуальних технологій і зв'язку\"),\n PlaceOfPractice(name=\"KeepSolid Inc.\"),\n PlaceOfPractice(name=\"Google Inc.\"),\n ]\n\n persons = [\n Person(fullname=\"Петров В. О.\"),\n Person(fullname=\"Казимир В. О.\"),\n Person(fullname=\"Журний Р. С.\"),\n Person(fullname=\"Пупкін Б. Р.\"),\n ]\n\n students = [\n Student(person=persons[0], group=groups[0]),\n Student(person=persons[1], group=groups[0], has_debt=True),\n Student(person=persons[2], group=groups[0]),\n ]\n\n heads = [\n Head(person=persons[3], faculty=faculties[0]),\n ]\n\n meetings_with_head = [\n MeetingWithHead(\n head=heads[0],\n meeting=Meeting(\n date=\"2021-07-16\",\n start_time=\"9:00\",\n end_time=\"12:00\",\n place=\"Аудиторія 310, головний корпус\"\n )\n )\n ]\n\n exam_meetings = [\n ExamMeeting(\n faculty=faculties[0],\n meeting=Meeting(\n date=\"2021-07-17\",\n start_time=\"9:00\",\n end_time=\"12:00\",\n place=\"Аудиторія 310, головний корпус\"\n )\n )\n ]\n\n for faculty in faculties:\n search = session.query(Faculty).filter(Faculty.name == faculty.name).first()\n\n if not search:\n session.add(faculty)\n\n for practice_term in practice_terms:\n faculty = session.query(Faculty).filter(Faculty.name == practice_term.faculty.name).first()\n search = session.query(PracticeTerm).filter(PracticeTerm.faculty == faculty).first()\n\n if not search:\n session.add(practice_term)\n\n for specialty in specialities:\n search = session.query(Specialty).filter(Specialty.name == specialty.name).first()\n\n if not search:\n session.add(specialty)\n\n for group in groups:\n faculty = session.query(Faculty).filter(Faculty.name == group.faculty.name).first()\n specialty = session.query(Specialty).filter(Specialty.name == group.specialty.name).first()\n search = session.query(Group).filter((Group.specialty == specialty) & (Group.faculty == faculty)).first()\n\n if not search:\n session.add(group)\n\n for place_of_practice in place_of_practices:\n search = session.query(PlaceOfPractice).filter(PlaceOfPractice.name == place_of_practice.name).first()\n\n if not search:\n session.add(place_of_practice)\n\n for person in persons:\n search = session.query(Person).filter(Person.fullname == person.fullname).first()\n\n if not search:\n session.add(person)\n\n for student in students:\n person = session.query(Person).filter(Person.fullname == student.person.fullname).first()\n search = session.query(Student).filter(Student.person == person).first()\n\n if not search:\n session.add(student)\n\n for head in heads:\n person = session.query(Person).filter(Person.fullname == head.person.fullname).first()\n search = session.query(Head).filter(Head.person == person).first()\n\n if not search:\n session.add(head)\n\n for meeting_with_head in meetings_with_head:\n search = session.query(MeetingWithHead).filter(\n (MeetingWithHead.meeting == meeting_with_head.meeting) &\n (MeetingWithHead.head == meeting_with_head.head)\n )\n\n if not search:\n session.add(meeting_with_head)\n\n for exam_meeting in exam_meetings:\n search = session.query(ExamMeeting).filter(\n (ExamMeeting.meeting == exam_meeting.meeting) &\n (ExamMeeting.faculty == exam_meeting.faculty)\n )\n\n if not search:\n session.add(exam_meeting)\n\n session.commit()\n\n\ninitiate()\n"
},
{
"alpha_fraction": 0.5653765797615051,
"alphanum_fraction": 0.569037675857544,
"avg_line_length": 38.020408630371094,
"blob_id": "3039faff9149cfb5c6f29ac39feb4bec1b055a83",
"content_id": "652eb23c1e601304308b6917295831fa86429c46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2096,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 49,
"path": "/src/bot/listeners/commands/places.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom telebot.types import ForceReply\n\nfrom src.bot import tb\nfrom src.db import session, Person, Student, PlaceOfPractice\n\n\[email protected]_handler(commands=['places'])\ndef places_command(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n if not person:\n return tb.send_message(msg.chat.id, \"Ви не зареєстровані\")\n\n args = msg.text.split()[1:]\n if len(args) == 1:\n if args[0] == 'list':\n places = session.query(PlaceOfPractice).all()\n\n return tb.send_message(\n msg.chat.id,\n \"Доступні місця практики:\\n\"\n f\"{os.linesep.join((f'{i + 1}. {place.name}' for (i, place) in enumerate(places)))}\",\n parse_mode='Markdown'\n )\n elif args[0] == 'take':\n if person.head:\n return tb.send_message(msg.chat.id, \"Що, керівник, хочеш на практику?)\")\n\n return tb.send_message(\n msg.chat.id,\n \"Вкажіть номер практики:\",\n reply_markup=ForceReply(selective=False)\n )\n elif args[0] == 'taken':\n if person.student:\n return tb.send_message(msg.chat.id, \"В тебе немає доступу до цієї команди! 😡\")\n\n determined_students = session.query(Student).filter(Student.place_of_practice).all()\n the_rest = session.query(Student).filter(not Student.place_of_practice).all()\n return tb.send_message(\n msg.chat.id,\n f\"Вибрали місце практики: {len(determined_students)}\\n\"\n f\"Не вибрали місце практики:\\n\"\n f\"{os.linesep.join((f'{i + 1}. {student.person.fullname}' for (i, student) in enumerate(the_rest)))}\",\n parse_mode='Markdown'\n )\n\n return tb.send_message(msg.chat.id, \"Незнайомий аргумент, доступні: <list>, <take>, <taken>\")\n"
},
{
"alpha_fraction": 0.574982225894928,
"alphanum_fraction": 0.5785359144210815,
"avg_line_length": 35.07692337036133,
"blob_id": "2d6913a8490691b2db66a8a8a666b3a2f4dc2933",
"content_id": "cdc1563fe294301672553a727aaa4d0360216b25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1590,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 39,
"path": "/src/bot/listeners/commands/debt.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom src.bot import tb\nfrom src.db import session, Person, Student\n\n\[email protected]_handler(commands=['dept'])\ndef dept_command(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n if not person:\n return tb.send_message(msg.chat.id, \"Ви не зареєстровані\")\n\n args = msg.text.split()[1:]\n if len(args) == 1:\n if args[0] == 'list':\n if not person.head:\n return tb.reply_to(msg, \"У вас нема доступа до цієї команди\")\n\n students_with_dept = session.query(Student).filter(Student.has_debt).all()\n\n if len(students_with_dept) > 0:\n return tb.send_message(\n msg.chat.id,\n \"Студенти які мають борг:\\n\"\n f\"{os.linesep.join((f'{i + 1}. {student.person.fullname}' for (i, student) in enumerate(students_with_dept)))}\",\n parse_mode='Markdown'\n )\n else:\n return tb.send_message(msg.chat.id, \"Немає студентів з боргом\")\n else:\n return tb.send_message(msg.chat.id, \"Незнайомий аргумент, доступні: <list>\")\n\n if person.head:\n return tb.send_message(msg.chat.id, \"У керівника не може бути боргу 😂\")\n\n tb.send_message(\n msg.chat.id,\n \"У вас є борг по практиці\" if person.student.has_debt else 'У вас немає боргу по практиці'\n )\n"
},
{
"alpha_fraction": 0.7188940048217773,
"alphanum_fraction": 0.7188940048217773,
"avg_line_length": 38.45454406738281,
"blob_id": "8bb043a8e07e1c3bd7b1976cfe5360a6ed549d1e",
"content_id": "4d4e107e78cfdd7cd88699bb4a3756cfb54e6167",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1017,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 22,
"path": "/src/bot/listeners/message_handlers/get_place_number.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from src.bot import tb\nfrom src.db import session, Person, PlaceOfPractice\n\n\[email protected]_handler(\n func=lambda msg: hasattr(msg.reply_to_message, 'text') and msg.reply_to_message.text == \"Вкажіть номер практики:\"\n)\ndef get_person_fullname(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n\n if not msg.text.isdigit():\n return tb.send_message(msg.chat.id, \"Введіть порядковий номер місця практики зі списку доступних місць\")\n\n place = session.query(PlaceOfPractice).filter(PlaceOfPractice.id == int(msg.text)).first()\n\n if not place:\n return tb.send_message(msg.chat.id, \"Не знайдено жодного місця практики за цим номером\")\n\n person.student.place_of_practice = place\n session.commit()\n\n tb.send_message(msg.chat.id, f\"Ви успішно вибрали _{place.name}_ місцем практики\", parse_mode='Markdown')\n"
},
{
"alpha_fraction": 0.8421052694320679,
"alphanum_fraction": 0.8421052694320679,
"avg_line_length": 37,
"blob_id": "5b3700756889a9e4a890c48d2c5d344aaec0c624",
"content_id": "563e6ef30588da0e23e51c1099ba82244dd9e37b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 1,
"path": "/src/bot/inline_keyboards/__init__.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from .identity import identity_markup\n"
},
{
"alpha_fraction": 0.7294344305992126,
"alphanum_fraction": 0.7352185249328613,
"avg_line_length": 36,
"blob_id": "d9e25d35dd2c624b06ab618ad2dee0e440f17c8e",
"content_id": "c616a9c02f6a269debdd0cf96d34ffa86f2a8844",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2585,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 42,
"path": "/README.md",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "# kursachbot\n***\n\n### Студент: \n- [x] інформує студентів про терміни практики (розсилка повідомлень за списком) \n> За 3 дні до початку практики кожен день о 8 ранку \n- [x] надає інформацію про керівника практики від Академії \n> /head \n- [x] надає список доступних місць практики \n> /places list \n- [x] приймає заявку на обране місце практики \n> /places take \n- [x] перевіряє наявність що діє договору з обраною базою практики та при\nвідсутності що діє договору інформує студента про необхідність укладати \nдоговір (умову) (надає шаблони документів: договір, паспорт бази практики) \n> За 14 днів до початку практики кожен день о 8 ранку \n- [x] інформує студентів про зустріч з керівником практики (дата, час та місце\nзустрічі) – розсилка за списком \n> За 7 днів до початку зустрічі кожен день о 8 ранку \n- [x] інформує студентів про здачу заліку по практиці (дата, час та місце приймання\nзаліку) – розсилка за списком \n> За 3 дні до початку зустрічі кожен день о 8 ранку \n- [x] надає розклад консультацій (приймання заліку) \n> /schedule show \n- [x] інформує про наявність заборгованості по практиці \n> /debt \n\n***\n\n### Керівник:\n- [x] надає інформацію за кількістю обраних місць практики \n> /places taken \n- [x] приймає розклад консультацій та інформує студентів \n> /schedule change \n- [x] приймає інформаційне повідомлення та передає його студентам \n> /inform <text> \n- [x] надає список боржників по практиці \n> /debt list\n\n***\n\n> Дані про студентів та їх місця практики зберігати у БД. Забезпечити експорт даних у формат xlsx. \n"
},
{
"alpha_fraction": 0.7178899049758911,
"alphanum_fraction": 0.7178899049758911,
"avg_line_length": 35.33333206176758,
"blob_id": "c5f8a2f0ce1841d9f6c015d86512db0d09df6693",
"content_id": "6094d381e25ce619904fc9f068c9bbbf37a5c4ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 457,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 12,
"path": "/src/bot/listeners/commands/start.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from src.bot import tb\nfrom src.bot.inline_keyboards import identity_markup\nfrom src.db import session, Person\n\n\[email protected]_handler(commands=['start'])\ndef send_welcome(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n if person:\n return tb.send_message(msg.chat.id, f\"Привіт {person.fullname} 😊\")\n\n tb.send_message(msg.chat.id, \"Привіт, ким ти є?\", reply_markup=identity_markup)\n"
},
{
"alpha_fraction": 0.8351648449897766,
"alphanum_fraction": 0.8351648449897766,
"avg_line_length": 44.5,
"blob_id": "409faea5d8ee6c1f967e0a9cf34e5ad3d10b2c49",
"content_id": "44b14fddc485e6d394991a290ab3c4c82e31e3c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 4,
"path": "/src/scheduler/tasks/__init__.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from .practice_terms import practice_terms\nfrom .selected_place import selected_place\nfrom .meeting_notify import meeting_notify\nfrom .exam_meeting_notify import exam_meeting_notify\n"
},
{
"alpha_fraction": 0.6270877718925476,
"alphanum_fraction": 0.6316428780555725,
"avg_line_length": 36,
"blob_id": "c083804459e555b22c422dda831ed673bc7adf45",
"content_id": "7288b50d25cdb20e958c99a881e20c67ee4e4db1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3478,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 89,
"path": "/src/bot/listeners/message_handlers/get_schedule_file.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import os\nfrom tempfile import TemporaryFile\n\nimport openpyxl\n\nfrom src.bot import tb\nfrom src.db import session, Person, Faculty, Schedule, ScheduleMeeting, Meeting\n\n\[email protected]_handler(\n func=lambda msg: hasattr(msg.reply_to_message, 'text') and\n msg.reply_to_message.text == \"Очікую файл розкладу\" and\n (msg.document.mime_type == 'text/application/vnd.ms-excel' or\n msg.document.mime_type == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'),\n content_types=['document']\n)\ndef get_schedule_file(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n\n # FILE SIZE CHECK\n if msg.document.file_size > 500000:\n return tb.send_message(msg.chat.id, \"Файл великого розміру (0.5mb max)\")\n\n # DOWNLOADING THE FILE\n tg_cloud_file = tb.get_file(msg.document.file_id)\n binary_data = tb.download_file(tg_cloud_file.file_path)\n\n # CREATING TEMPORARY FILE WRAPPER\n schedule_file = TemporaryFile()\n schedule_file.write(binary_data)\n\n # DELETING CURRENT SCHEDULE FROM DB\n if person.head.faculty.schedule:\n session.delete(person.head.faculty.schedule)\n session.commit()\n\n # PARSING TABLE WITH DAYS OF PRACTICE\n schedule = Schedule(faculty=person.head.faculty)\n try:\n schedule_sheet = openpyxl.load_workbook(schedule_file).active\n for row_idx in range(2, schedule_sheet.max_row + 1):\n schedule.schedule_meetings.append(\n ScheduleMeeting(\n schedule=schedule,\n meeting=Meeting(\n date=schedule_sheet.cell(row_idx, 1).value.strftime(\"%Y-%m-%d\"),\n start_time=schedule_sheet.cell(row_idx, 2).value.strftime(\"%H:%M\"),\n end_time=schedule_sheet.cell(row_idx, 3).value.strftime(\"%H:%M\"),\n place=schedule_sheet.cell(row_idx, 4).value,\n )\n )\n )\n except (ValueError, TypeError):\n tb.reply_to(msg, \"Помилка у парсингу файла\")\n schedule_file.close()\n return\n\n # INSERTING NEW SCHEDULE TO DB\n person.head.faculty.schedule = schedule\n session.commit()\n\n # NOTIFYING FOR SUCCESS AND CLEANING UP\n tb.reply_to(msg, \"Успішно завантажено новий розклад!\")\n schedule_file.close()\n\n # INFORMING THE STUDENTS WITH NEW SCHEDULE BY SENDING THE FILE\n not_informed_students = []\n informed_students = []\n for group in person.head.faculty.groups:\n for student in group.students:\n if not student.person.telegram_id:\n not_informed_students.append(student)\n continue\n\n tb.send_document(\n student.person.telegram_id,\n msg.document.file_id,\n caption=\"Шановний студент! Розклад консультацій змінено, лови актуальний\"\n )\n informed_students.append(student)\n\n # NOTIFYING ABOUT THE RESULTS ABOUT STUDENTS INFORMATION\n tb.send_message(\n msg.chat.id,\n f\"Інформованих студентів: {len(informed_students)}\\n\"\n \"Не отримали повідомленя: \\n\"\n f\"{os.linesep.join((f'{i + 1}. {student.person.fullname}' for (i, student) in enumerate(not_informed_students)))}\",\n parse_mode='Markdown'\n )\n"
},
{
"alpha_fraction": 0.6631892919540405,
"alphanum_fraction": 0.6631892919540405,
"avg_line_length": 32.54999923706055,
"blob_id": "944ef22cdafd9257a7caecc550cf2e87decf5041",
"content_id": "4900c74ff96e45833579aeffefebe0273b86a37f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 738,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 20,
"path": "/src/bot/listeners/commands/head.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from src.bot import tb\nfrom src.db import session, Person\n\n\[email protected]_handler(commands=['head'])\ndef head_command(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n if not person:\n return tb.send_message(msg.chat.id, \"Ви не зареєстровані\")\n\n if person.head:\n return tb.send_message(msg.chat.id, \"Ви все про себе знаєте 😊\")\n\n faculty = person.student.group.faculty\n tb.send_message(\n msg.chat.id,\n f\"Керівником практики факультету *{faculty.short}* є\"\n f\"[{faculty.head.person.fullname}](https://t.me/user?id={faculty.head.person.telegram_id})\",\n parse_mode='Markdown'\n )\n"
},
{
"alpha_fraction": 0.6822507381439209,
"alphanum_fraction": 0.6822507381439209,
"avg_line_length": 29.212499618530273,
"blob_id": "631d7ca2b6b3c866dc6a0a58baa84b437c21a154",
"content_id": "be6f96ee7552ecbebb88bc1168a5986e248264b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4834,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 160,
"path": "/src/db/models.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from os import path\nfrom datetime import datetime\n\nfrom sqlalchemy import create_engine, Column, Integer, Text, Boolean, DateTime, Date, ForeignKey, Table\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm.session import sessionmaker\nfrom sqlalchemy.orm import relationship, backref\n\n\ndb_path = path.join(path.dirname(__file__), 'kursachbot.db')\nengine = create_engine(f'sqlite:///{db_path}', connect_args={'check_same_thread': False})\nbase = declarative_base()\n\n\nclass Faculty(base):\n __tablename__ = 'faculty'\n\n id = Column(Integer, primary_key=True)\n name = Column(Text, unique=True)\n short = Column(Text, unique=True)\n\n\nclass Specialty(base):\n __tablename__ = 'specialty'\n\n id = Column(Integer, primary_key=True)\n name = Column(Text, unique=True)\n short = Column(Text, unique=True)\n\n\nclass Group(base):\n __tablename__ = 'group'\n\n id = Column(Integer, primary_key=True)\n specialty_id = Column(Integer, ForeignKey(\"specialty.id\"))\n faculty_id = Column(Integer, ForeignKey(\"faculty.id\"))\n\n specialty = relationship(\"Specialty\", backref=\"groups\")\n faculty = relationship(\"Faculty\", backref=\"groups\")\n\n course = Column(Integer)\n subgroup = Column(Integer)\n\n\nclass PlaceOfPractice(base):\n __tablename__ = 'place_of_practice'\n\n id = Column(Integer, primary_key=True)\n name = Column(Text)\n info = Column(Text)\n\n\nclass Person(base):\n __tablename__ = 'person'\n\n id = Column(Integer, primary_key=True)\n\n telegram_id = Column(Integer, unique=True)\n fullname = Column(Text, unique=True)\n\n\nclass Student(base):\n __tablename__ = 'student'\n\n id = Column(Integer, primary_key=True)\n person_id = Column(Integer, ForeignKey(\"person.id\"))\n group_id = Column(Integer, ForeignKey(\"group.id\"))\n place_of_practice_id = Column(Integer, ForeignKey(\"place_of_practice.id\"))\n\n person = relationship(\"Person\", backref=backref(\"student\", uselist=False))\n group = relationship(\"Group\", backref=\"students\")\n place_of_practice = relationship(\"PlaceOfPractice\", backref=\"students\")\n\n has_debt = Column(Boolean, default=False)\n\n def __repr__(self):\n return f'<Student(id={self.id}, ' \\\n f'telegram_id={self.telegram_id}, ' \\\n f'fullname={self.fullname}>'\n\n\nclass Meeting(base):\n __tablename__ = 'meeting'\n\n id = Column(Integer, primary_key=True)\n date = Column(Text)\n start_time = Column(Text)\n end_time = Column(Text)\n place = Column(Text)\n\n\nclass MeetingWithHead(base):\n __tablename__ = 'meeting_with_head'\n\n id = Column(Integer, primary_key=True)\n head_id = Column(Integer, ForeignKey(\"head.id\"))\n meeting_id = Column(Integer, ForeignKey(\"meeting.id\"))\n\n head = relationship(\"Head\", backref=backref(\"meeting_with_head\", uselist=False, cascade=\"all, delete\"))\n meeting = relationship(\"Meeting\", backref=backref(\"meeting_with_head\", uselist=False, cascade=\"all, delete\"))\n\n\nclass ScheduleMeeting(base):\n __tablename__ = 'schedule_meeting'\n\n id = Column(Integer, primary_key=True)\n schedule_id = Column(Integer, ForeignKey(\"schedule.id\"))\n meeting_id = Column(Integer, ForeignKey(\"meeting.id\"))\n\n schedule = relationship(\"Schedule\", backref=backref(\"schedule_meetings\", cascade=\"all, delete\"))\n meeting = relationship(\"Meeting\", backref=backref(\"schedule_meetings\", cascade=\"all, delete\"))\n\n\nclass ExamMeeting(base):\n __tablename__ = 'exam_meeting'\n\n id = Column(Integer, primary_key=True)\n faculty_id = Column(Integer, ForeignKey(\"faculty.id\"))\n meeting_id = Column(Integer, ForeignKey(\"meeting.id\"))\n\n faculty = relationship(\"Faculty\", backref=backref(\"exam_date\", cascade=\"all, delete\"))\n meeting = relationship(\"Meeting\", backref=backref(\"exam_date\", cascade=\"all, delete\"))\n\n\nclass Head(base):\n __tablename__ = 'head'\n\n id = Column(Integer, primary_key=True)\n person_id = Column(Integer, ForeignKey(\"person.id\"))\n faculty_id = Column(Integer, ForeignKey(\"faculty.id\"))\n\n person = relationship(\"Person\", backref=backref(\"head\", uselist=False))\n faculty = relationship(\"Faculty\", backref=backref(\"head\", uselist=False))\n\n\nclass Schedule(base):\n __tablename__ = 'schedule'\n\n id = Column(Integer, primary_key=True)\n faculty_id = Column(Integer, ForeignKey(\"faculty.id\"))\n\n faculty = relationship(\"Faculty\", backref=backref(\"schedule\", uselist=False))\n\n created_date = Column(DateTime, default=datetime.utcnow)\n\n\nclass PracticeTerm(base):\n __tablename__ = 'practice_term'\n\n id = Column(Integer, primary_key=True)\n faculty_id = Column(Integer, ForeignKey(\"faculty.id\"))\n\n faculty = relationship(\"Faculty\", backref=backref(\"practice_term\", uselist=False))\n\n start_date = Column(Text)\n end_date = Column(Text)\n\n\nbase.metadata.create_all(engine)\nsession = sessionmaker(bind=engine)()\n"
},
{
"alpha_fraction": 0.6398348808288574,
"alphanum_fraction": 0.6460268497467041,
"avg_line_length": 32.44827651977539,
"blob_id": "4c0e457b955571f2aa0e96fd789f437eb3a5f22b",
"content_id": "d9e9577aef34476c793ee90f920b8e084205dc7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1114,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 29,
"path": "/src/bot/listeners/message_handlers/get_user_fullname.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import func\n\nfrom src.bot import tb\nfrom src.db import session, Person, Student\n\n\[email protected]_handler(func=lambda msg: hasattr(msg.reply_to_message, 'text') and msg.reply_to_message.text == \"Ваше ПІБ?\")\ndef get_schedule_file(msg):\n person = session.query(Person).filter(func.lower(Person.fullname) == func.lower(msg.text)).first()\n\n if not person:\n return tb.send_message(\n msg.chat.id,\n f\"Такого ПІБ немає у базі, зв'яжіться з [адміністратором](https://t.me/Regis322)\",\n parse_mode='Markdown'\n )\n\n if person.telegram_id:\n return tb.send_message(\n msg.chat.id,\n \"Людина за цим ПІБ вже зареэстрована, зв'яжіться з [адміністратором](https://t.me/Regis322)\",\n parse_mode='Markdown'\n )\n\n person.telegram_id = msg.chat.id\n session.add(person)\n session.commit()\n\n tb.send_message(msg.chat.id, \"Ваш ПІБ знайдено у базі, ваш телеграм під'єднано\")"
},
{
"alpha_fraction": 0.5813193917274475,
"alphanum_fraction": 0.5858915448188782,
"avg_line_length": 39.28947448730469,
"blob_id": "ee018f4cd5140f84268d6b4947ef2b3072bae809",
"content_id": "08819ac182bc152e12be450ec5132358ad8fdbe2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1713,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 38,
"path": "/src/scheduler/tasks/selected_place.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import os\nimport schedule\nfrom datetime import datetime\n\nfrom src.db import session, Faculty\nfrom src.bot import tb\n\nfrom telebot.types import InputMediaDocument\n\n\ndef selected_place():\n documents = [\n open(os.path.join(os.path.dirname(os.getcwd()), 'паспорт_бази_практики.docx'), \"rb\"),\n open(os.path.join(os.path.dirname(os.getcwd()), 'договір.docx'), \"rb\")\n ]\n\n today = datetime.today()\n faculties = session.query(Faculty).all()\n for faculty in faculties:\n practice_term_start_date = faculty.practice_term.start_date.strptime(\"%Y-%m-%d\")\n days_left_to_practice_start = (practice_term_start_date - today).days\n\n if 0 < days_left_to_practice_start <= 14:\n for group in faculty.groups:\n for student in group.students:\n if student.person.telegram_id:\n tb.send_message(\n student.person.telegram_id,\n \"Ви ще не вибрали місце практики, \"\n \"будь ласка ознайомтесь з переліком завдяки команді /places list \"\n \"та виберіть один з доступних варіантів завдяки команді /places take.\\n\\n\"\n \"Також оформить наступний перелік документів:\",\n parse_mode='Markdown',\n )\n tb.send_media_group(student.person.telegram_id, [InputMediaDocument(doc) for doc in documents])\n\n\nschedule.every().day.at(\"08:00\").do(selected_place)\n"
},
{
"alpha_fraction": 0.6556291580200195,
"alphanum_fraction": 0.6622516512870789,
"avg_line_length": 12.727272987365723,
"blob_id": "569055180182bfa4b7135ab93bea60ab3005344e",
"content_id": "164874564e98c80b9d6410b70288811b7c351f18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 11,
"path": "/src/scheduler/__init__.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import time\n\nimport schedule\n\nimport src.scheduler.tasks\n\n\ndef loop_over_tasks():\n while True:\n schedule.run_pending()\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.5589026212692261,
"alphanum_fraction": 0.5610543489456177,
"avg_line_length": 36.93877410888672,
"blob_id": "8d14c8bb1d89fbf44ae4b57d8dc1df6bf7957f48",
"content_id": "68b3dddd9aea231a58fcbff0a089b80d2977f2ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2039,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 49,
"path": "/src/bot/listeners/commands/schedule.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom telebot.types import ForceReply\n\nfrom src.bot import tb\nfrom src.db import session, Person, Student, PlaceOfPractice\n\n\[email protected]_handler(commands=['schedule'])\ndef schedule_command(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n if not person:\n return tb.send_message(msg.chat.id, \"Ви не зареєстровані\")\n\n args = msg.text.split()[1:]\n if len(args) == 1:\n if args[0] == 'change':\n if person.student:\n return tb.send_message(msg.chat.id, \"В тебе немає доступу до цієї команди! 😡\")\n\n return tb.send_message(msg.chat.id, \"Очікую файл розкладу\", reply_markup=ForceReply(selective=False))\n elif args[0] == 'show':\n schedule = None\n\n if person.student:\n schedule = person.student.group.faculty.schedule\n elif person.head:\n schedule = person.head.faculty.schedule\n\n if not schedule:\n return tb.send_message(msg.chat.id, \"Розкладу поки що немає 😢\")\n\n return tb.send_message(\n msg.chat.id,\n f\"Розклад консультацій від *{schedule.created_date.strftime('%Y-%m-%d')}*:\\n\\n\"\n \"\\n\".join((\n f\"*Дата*\\n\"\n f\"{schedule_meeting.meeting.date}\\n\"\n f\"*Час початку*\\n\"\n f\"{schedule_meeting.meeting.start_time}\\n\"\n f\"*Час завершення*\\n\"\n f\"{schedule_meeting.meeting.end_time}\\n\"\n f\"*Місце проведення*\\n\"\n f\"{schedule_meeting.meeting.place}\\n\" for schedule_meeting in schedule.schedule_meetings\n )),\n parse_mode='Markdown'\n )\n\n return tb.send_message(msg.chat.id, \"Незнайомий аргумент, доступні: <change>, <show>\")\n"
},
{
"alpha_fraction": 0.6165167689323425,
"alphanum_fraction": 0.618969738483429,
"avg_line_length": 32.97222137451172,
"blob_id": "b4ad244cb4d90e70845469af66b4e2509e3b9e83",
"content_id": "52d3048cb744e79988f6e53bef8fd9d252a5a82d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1312,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 36,
"path": "/src/bot/listeners/commands/inform.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom src.bot import tb\nfrom src.db import session, Person\n\n\[email protected]_handler(commands=['inform'])\ndef inform_command(msg):\n person = session.query(Person).filter(Person.telegram_id == msg.chat.id).first()\n if not person:\n return tb.send_message(msg.chat.id, \"Ви не зареєстровані\")\n\n if person.student:\n return tb.send_message(msg.chat.id, \"Ця команда для керівників практики!\")\n\n not_informed_students = []\n informed_students = []\n args = msg.text.split()[1:]\n if len(args) > 1:\n inform_message = ' '.join(args)\n for group in person.faculty.groups:\n for student in group.students:\n if not student.person.telegram_id:\n not_informed_students.append(student)\n continue\n\n tb.send_message(student.person.telegram_id, inform_message)\n informed_students.append(student)\n\n tb.send_message(\n msg.chat.id,\n f\"Інформованих студентів: {len(informed_students)}\\n\"\n \"Не отримали повідомленя: \\n\"\n f\"{os.linesep.join((f'{i + 1}. {student.person.fullname}' for (i, student) in enumerate(not_informed_students)))}\",\n parse_mode='Markdown'\n )\n"
},
{
"alpha_fraction": 0.6902173757553101,
"alphanum_fraction": 0.7663043737411499,
"avg_line_length": 19.44444465637207,
"blob_id": "99f708dc895003ed99e6a68c06db0a9d7b73acc0",
"content_id": "9a9976090e3b8a692ea65171362fee4f9dec1f28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/src/bot/__init__.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from telebot import TeleBot\n\ntb = TeleBot(\"1698606681:AAH_XQtsvWBoF9pm5DIsW-4bJyjAJxDick0\")\n\nfrom src.bot.listeners import *\n\n\ndef loop_over_requests():\n tb.polling(none_stop=True)\n"
},
{
"alpha_fraction": 0.712837815284729,
"alphanum_fraction": 0.712837815284729,
"avg_line_length": 23.66666603088379,
"blob_id": "fe1f03ffd4a5b3ce386881c57933991e5410f729",
"content_id": "ded3738b2802b8a0bba6af0e490ec7b666d15799",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 12,
"path": "/src/main.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from threading import Thread\n\nfrom src.bot import loop_over_requests\nfrom src.scheduler import loop_over_tasks\n\n\nif __name__ == '__main__':\n bot_thread = Thread(target=loop_over_requests)\n schedule_thread = Thread(target=loop_over_tasks)\n\n bot_thread.start()\n schedule_thread.start()\n"
},
{
"alpha_fraction": 0.686274528503418,
"alphanum_fraction": 0.6880570650100708,
"avg_line_length": 32,
"blob_id": "b060ba7ce45bc31ea7dae0398c72c834acfbd98d",
"content_id": "fffd56d12635b6fc350e25aceb7bb634d14b2f86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 17,
"path": "/src/bot/listeners/callback_query_handlers/user_identity.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "from telebot.types import ForceReply\n\nfrom src.bot import tb\n\n\[email protected]_query_handler(func=lambda call: call.data.startswith('identity_markup='))\ndef user_identity_query_handler(call):\n tb.edit_message_text(\n f\"Ви обрали: *{call.data.split('identity_markup=')[1]}*\",\n call.message.chat.id,\n call.message.message_id,\n reply_markup=None,\n parse_mode='Markdown'\n )\n\n tb.answer_callback_query(callback_query_id=call.id)\n tb.send_message(call.message.chat.id, \"Ваше ПІБ?\", reply_markup=ForceReply(selective=False))\n"
},
{
"alpha_fraction": 0.5415657758712769,
"alphanum_fraction": 0.5464084148406982,
"avg_line_length": 38.967742919921875,
"blob_id": "bdbbc6444218b8af74eae5f4212fbef430798e3a",
"content_id": "4348407f5d5a55c66a579edf96ead7764a6640c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1310,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 31,
"path": "/src/scheduler/tasks/practice_terms.py",
"repo_name": "ilxijwd/kursachbot",
"src_encoding": "UTF-8",
"text": "import schedule\nfrom datetime import datetime\n\nfrom src.db import session, Faculty\nfrom src.bot import tb\n\n\ndef practice_terms():\n today = datetime.today()\n faculties = session.query(Faculty).all()\n for faculty in faculties:\n practice_term_start_date = faculty.practice_term.start_date.strptime(\"%Y-%m-%d\")\n practice_term_end_date = faculty.practice_term.start_date.strptime(\"%Y-%m-%d\")\n days_left_to_practice_start = (practice_term_start_date - today).days\n\n if 0 < days_left_to_practice_start <= 3:\n for group in faculty.groups:\n for student in group.students:\n if student.person.telegram_id:\n tb.send_message(\n student.person.telegram_id,\n f\"Кількість днів до початку практики: {days_left_to_practice_start}\\n\\n\"\n f\"*Дата початку практики*\\n\"\n f\"{practice_term_end_date}\\n\\n\"\n f\"*Дата завершення практики*\\n\"\n f\"{practice_term_end_date}\",\n parse_mode='Markdown'\n )\n\n\nschedule.every().day.at(\"08:00\").do(practice_terms)\n"
}
] | 23 |
lewismacdonald/async-web-app
|
https://github.com/lewismacdonald/async-web-app
|
cb31ed3aa24c3318b769a6c2cefea47482f40434
|
4f28a2c57b2ebdfca058d532ab61a6cb5829f9b6
|
9bc9e900e79237f131d627d869ea1f81084d02a5
|
refs/heads/master
| 2020-04-05T19:50:52.059036 | 2018-11-25T06:26:15 | 2018-11-25T06:26:15 | 157,153,155 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6236497759819031,
"alphanum_fraction": 0.6259238123893738,
"avg_line_length": 19.740739822387695,
"blob_id": "8088f3e2a806d20397028ea75f4b6806a934c591",
"content_id": "b8d3a51f949551fbaac281d0c63bd512be2f4f1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1759,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 81,
"path": "/aclient.py",
"repo_name": "lewismacdonald/async-web-app",
"src_encoding": "UTF-8",
"text": "import asyncio\r\nimport aiohttp\r\nimport requests\r\n\r\nimport variants\r\n\r\nimport datetime\r\n\r\nclass Client:\r\n\r\n\[email protected]\r\n\tdef get_url(self, url):\r\n\t\tprint(\"Sent Request for %s\" % url)\r\n\t\treturn requests.get(url).content\r\n\r\n\t@get_url.variant('async')\r\n\tasync def get_url(self, url):\r\n\t\ttry:\r\n\t\t\tasync with aiohttp.request('GET', url) as response:\r\n\t\t\t\tprint(\"Sent Request for %s\" % url)\r\n\t\t\t\treturn await response.content.read()\r\n\t\texcept:\r\n\t\t\treturn \"\"\r\n\r\n\r\n\r\ndef async_url_length(urls, event_loop):\r\n\tclient = Client()\r\n\tresponses = event_loop.run_until_complete(\r\n\t\tasyncio.gather(\r\n\t\t\t*[client.get_url.async(x) for x in urls]\r\n\t\t)\r\n\t)\r\n\treturn list(map(len, responses))\r\n\r\nif __name__=='__main__':\r\n\turls = [\r\n\t\"https://github.com\",\r\n\t\"https://facebook.com\",\r\n\t\"https://google.com\",\r\n\t\"https://ebay.com\",\r\n\t\"https://amazon.co.uk\",\r\n\t\"https://twitter.com\",\r\n\t\"https://docs.python.org/3.3/library/functions.html#open\"\r\n\t]\r\n\r\n\r\n\turls = urls*10\r\n\r\n\ttotal = len(urls)\r\n\t\r\n\tclient = Client()\r\n\tloop = asyncio.get_event_loop()\r\n\tr = async_url_length(urls, loop)\r\n\tstart = datetime.datetime.now()\r\n\tasync_items = loop.run_until_complete(\r\n\t\tasyncio.gather(\r\n\t\t\t*[client.get_url.async(x) for x in urls]\r\n\t\t)\r\n\t)\r\n\tprint(list(map(len, async_items)))\r\n\tmid = datetime.datetime.now()\r\n\r\n\tprint(\"async time taken for %s urls:\" % total, mid-start)\r\n\tsync_items = [\r\n\t\tclient.get_url(x)\r\n\t\tfor x in urls\r\n\t]\r\n\tend = datetime.datetime.now()\r\n\tprint(list(map(len, sync_items)))\r\n\tprint(\"sync time taken for %s urls:\" % total, end-mid)\r\n\r\n\tfor i, c in enumerate(sync_items):\r\n\t\tfile = f\"{str(i)}_sync.txt\"\r\n\t\twith open(file, 'wb') as f:\r\n\t\t\tf.write(c)\r\n\r\n\tfor i, c in enumerate(async_items):\r\n\t\tfile = f\"{str(i)}_async.txt\"\r\n\t\twith open(file, 'wb') as f:\r\n\t\t\tf.write(c)"
},
{
"alpha_fraction": 0.6408364176750183,
"alphanum_fraction": 0.6469864845275879,
"avg_line_length": 21.257143020629883,
"blob_id": "f9de20137c1bed64fdd45021c0cf964c589f5417",
"content_id": "bf837dd9deb7ddaef6349212291a21c166e88253",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1626,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 70,
"path": "/app.py",
"repo_name": "lewismacdonald/async-web-app",
"src_encoding": "UTF-8",
"text": "import json\r\nimport csv\r\nimport logging\r\nimport asyncio\r\nfrom flask import Flask, jsonify, render_template, request\r\nimport sys\r\nfrom io import StringIO\r\n\r\nfrom aclient import Client\r\nlogging.basicConfig(level=logging.DEBUG, stream=sys.stdout)\r\napp = Flask(__name__)\r\nevent_loop = asyncio.get_event_loop()\r\n\r\nclass FileParser:\r\n\tdef __init__(self, file):\r\n\t\tself.file=file\r\n\tdef parse(self):\r\n\t\tdata = self.file.read().decode('utf-8')\r\n\t\tprint(data, file=sys.stderr)\r\n\t\treader = csv.reader(StringIO(data))\r\n\t\theader = next(reader)\r\n\t\tdata = list([r for r in reader])\r\n\t\treturn {\r\n\t\t\t\"data\": data,\r\n\t\t\t\"headers\": header\r\n\t\t}\r\n\r\[email protected]('/')\r\ndef index():\r\n\treturn render_template(\"index.html\")\r\n\r\[email protected]('/upload', methods=['POST'])\r\ndef handle_upload():\r\n\tprint(request.files['file'], file=sys.stderr)\r\n\tparser = FileParser(request.files['file'])\r\n\treturn jsonify(parser.parse())\r\n\r\[email protected]('/translate', methods=['POST'])\r\ndef translate():\r\n\turls = request.json['urls']\r\n\tevent_loop = asyncio.new_event_loop()\r\n\tasyncio.set_event_loop(event_loop)\r\n\tclient = Client()\r\n\tresponses = event_loop.run_until_complete(\r\n\t\tasyncio.gather(\r\n\t\t\t*[client.get_url.async(x) for x in urls]\r\n\t\t)\r\n\t)\r\n\tresults= [\r\n\t\t{\"value\": x, \"alternate\":[x+10, x, x-50]} for x in map(len, responses)\r\n\t]\r\n\r\n\tprint(\"results\", results, file=sys.stderr)\r\n\treturn jsonify(results)\r\n\r\[email protected]('/default_data')\r\ndef get_data():\r\n\tdata = [\r\n\t\t['Google', 'http://google.com', 12344]\r\n\t]\r\n\theaders = ['Name', 'URL', 'Expected Length']\r\n\r\n\treturn jsonify({\r\n\t\t\"data\":data,\r\n\t\t\"headers\":headers\r\n\t})\r\n\r\n\r\nif __name__=='__main__':\r\n\tapp.run(debug=True)"
},
{
"alpha_fraction": 0.760869562625885,
"alphanum_fraction": 0.760869562625885,
"avg_line_length": 44,
"blob_id": "50862c24972fbd4f400d35755daa7544e69be102",
"content_id": "b07e4bb659a86bab3178f77cf2d87e8f45c54bbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 92,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 2,
"path": "/readme.md",
"repo_name": "lewismacdonald/async-web-app",
"src_encoding": "UTF-8",
"text": "# Async Web App\r\nTest web app that loads length of provided urls on a js UI asyncronously.\r\n"
}
] | 3 |
Szewoj/Voice-Modulation
|
https://github.com/Szewoj/Voice-Modulation
|
9d5a009e96dd7b3644290a2d4c15b6a248968aab
|
a7c6535b63abbd4e805e585ac452a5518f30bdda
|
6267f472d6d22146807c0e9a54a01b5e539d65d6
|
refs/heads/main
| 2023-02-11T01:22:04.449501 | 2020-12-22T16:51:35 | 2020-12-22T16:51:35 | 307,952,238 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5721815824508667,
"alphanum_fraction": 0.5845944285392761,
"avg_line_length": 24.35344886779785,
"blob_id": "91db2e524a49564fe3535f73e7d7b006b455fd87",
"content_id": "5589edf774b36ce0248a98fca877b32d2a03e859",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5881,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 232,
"path": "/src/playback.c",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <signal.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <string.h>\n#include <unistd.h>\n#include <semaphore.h>\n#include <sys/time.h>\n#include <sys/mman.h>\n#include <sys/stat.h>\n#include \"portaudio.h\"\n#include <unistd.h>\n#include <sys/types.h>\n#include <pthread.h>\n\n#define SAMPLE_RATE (20000)\n#define FRAMES_PER_BUFFER (1024)\n#define NUM_MILISECONDS (20)\n#define NUM_CHANNELS (1)\n\n#define PA_SAMPLE_TYPE paInt16\ntypedef short SAMPLE;\n#define SAMPLE_SILENCE (0)\n#define PRINTF_S_FORMAT \"%d\"\n\n\nconst char* shmName = \"/mod\";\nchar* addr;\nint fd;\n\nsem_t* log3_semaphore;\nFILE *fid;\n\nconst char *slName = \"/samp_mod\";\npthread_spinlock_t* sl;\n\nint fdsl;\n\nint main(void);\nvoid SIGTERM_handler();\n\nvoid sl_try(char* sl)\n{\n fprintf(stderr,\"trying, sl = %d\\n\", *sl);\n while(*sl);\n memset(sl, 1, sizeof(char));\n fprintf(stderr,\"locked, sl = %d\\n\", *sl);\n}\n\nvoid sl_open(char* sl)\n{\n fprintf(stderr,\"openieng, sl = %d\\n\", *sl);\n memset(sl, 0, sizeof(char));\n fprintf(stderr,\"opened, sl = %d\\n\", *sl);\n}\n\nint main(void)\n{\n fprintf(stderr,\"running playback\\n\");\n PaStreamParameters outputParam;\n PaStream *audioStream;\n PaError exception;\n SAMPLE *samplesRecorded;\n\n struct timeval sendTime, receiveTime;\n\n struct sigaction action;\n memset(&action, 0, sizeof(struct sigaction));\n action.sa_handler = SIGTERM_handler;\n sigaction(SIGINT, &action, NULL);\n\n struct sigaction action2;\n memset(&action2, 0, sizeof(struct sigaction));\n action2.sa_handler = SIGTERM_handler;\n sigaction(SIGINT, &action2, NULL);\n\n fid = fopen(\"logs/log3.txt\", \"w\");\n fclose(fid);\n\n log3_semaphore = sem_open(\"/log3\", O_CREAT, O_RDWR, 1);\n\n fd = shm_open(shmName, O_CREAT | O_RDWR, 0666);\n ftruncate(fd, 2048);\n addr = mmap(NULL, 2048, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);\n\n\n\n fdsl = shm_open(slName, O_CREAT | O_RDWR, 0666);\n ftruncate(fdsl, sizeof(pthread_spinlock_t));\n sl = (pthread_spinlock_t*) mmap(NULL, sizeof(pthread_spinlock_t), PROT_READ | PROT_WRITE, MAP_SHARED, fdsl, 0);\n\n\n int i;\n float amountOfFrames;\n int amountOfSamples;\n int amountOfBytes;\n\n amountOfFrames = NUM_MILISECONDS * SAMPLE_RATE/1000; \n amountOfSamples = amountOfFrames * NUM_CHANNELS;\n amountOfBytes = amountOfSamples * sizeof(SAMPLE);\n\n exception = Pa_Initialize();\n\n if( exception != paNoError ) \n goto error;\n\n outputParam.device = Pa_GetDefaultOutputDevice(); \n if (outputParam.device == paNoDevice) \n {\n printf(\"Error: No default output device.\\n\");\n goto error;\n }\n\n outputParam.channelCount = NUM_CHANNELS;\n outputParam.sampleFormat = PA_SAMPLE_TYPE;\n outputParam.suggestedLatency = Pa_GetDeviceInfo( outputParam.device )->defaultLowOutputLatency;\n outputParam.hostApiSpecificStreamInfo = NULL;\n\n \n while(1)\n {\n //printf(\"playback loop\\n\");\n\n samplesRecorded = (SAMPLE *) malloc( amountOfBytes );\n long error = 0;\n\n if( samplesRecorded == NULL )\n {\n printf(\"Could not allocate playback array.\\n\");\n exit(1);\n }\n\n for( i=0; i<amountOfSamples; i++ ) samplesRecorded[i] = 0;\n\n\n pthread_spin_lock(sl);\n\n\n memcpy(&sendTime, addr, sizeof(struct timeval));\n memcpy(&error, addr, sizeof(long));\n memcpy(samplesRecorded, addr + sizeof(struct timeval), NUM_CHANNELS * sizeof(SAMPLE) * amountOfFrames);\n\n if(error){\n memset(addr, 0, sizeof(long));\n printf(\"Read data from 'samp/mod.raw'.\\n\");\n //fprintf(stderr, \"Read data from 'samp/mod.raw'.\\n\");\n }\n\n\n pthread_spin_unlock(sl);\n\n\n if(error)\n {\n gettimeofday(&receiveTime, NULL);\n\n if(sem_wait(log3_semaphore) < 0)\n printf(\"[sem_wait] failed.\\n\");\n\n fid = fopen(\"logs/log3.txt\", \"a\");\n\n if( fid != NULL )\n { \n unsigned int time = (receiveTime.tv_sec - sendTime.tv_sec) * 1000000 + (receiveTime.tv_usec - sendTime.tv_usec);\n fprintf(fid, \"%d\\n\", time);\n //fwrite( &time, sizeof(unsigned int), 1, fid);\n fclose( fid );\n printf(\"write data to 'logs/log3.txt'.\\n\");\n //fprintf(stderr,\"write data to 'logs/log3.txt'.\\n\");\n }\n\n if (sem_post(log3_semaphore) < 0)\n printf(\"[sem_post] failed.\\n\");\n //printf(\"Begin playback.\\n\");\n\n exception = Pa_OpenStream(\n &audioStream,\n NULL, \n &outputParam,\n SAMPLE_RATE,\n FRAMES_PER_BUFFER,\n paClipOff, \n NULL, \n NULL ); \n\n if( exception != paNoError ) \n goto error;\n\n if( audioStream )\n {\n exception = Pa_StartStream( audioStream );\n if( exception != paNoError ) \n goto error;\n\n exception = Pa_WriteStream( audioStream, samplesRecorded, amountOfFrames );\n if( exception != paNoError ) \n goto error;\n\n exception = Pa_StopStream( audioStream );\n if( exception != paNoError ) \n goto error;\n //printf(\"Done.\\n\");\n\n exception = Pa_CloseStream( audioStream );\n if( exception != paNoError ) \n goto error;\n }\n }\n free( samplesRecorded );\n }\n\n Pa_Terminate();\n return 0;\n\nerror:\n Pa_Terminate();\n\n free( samplesRecorded );\n printf(\"An error occured while using the audio playback stream. Terminating...\\n\" );\n printf(\"Error number: %d\\n\", exception );\n printf(\"Error message: %s\\n\", Pa_GetErrorText( exception ) );\n return -1;\n}\n\nvoid SIGTERM_handler()\n{\n\n Pa_Terminate();\n sl_open(sl);\n printf(\"Received kill signal. Terminating...\\n\" );\n exit(EXIT_SUCCESS);\n}"
},
{
"alpha_fraction": 0.554600179195404,
"alphanum_fraction": 0.5789337754249573,
"avg_line_length": 25.62470245361328,
"blob_id": "939c0dfe8cff4b71a6ab0e864481189919caa955",
"content_id": "076d54416aa16a5be0e0cfeaca31c6aaf98e9383",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 11632,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 421,
"path": "/src/modulator.cpp",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "#include <iostream>\r\n#include <thread>\r\n#include <semaphore.h>\r\n#include <csignal>\r\n#include <fstream>\r\n#include <sys/stat.h>\r\n#include <fcntl.h>\r\n#include <queue>\r\n#include <cmath>\r\n#include <cstring>\r\n#include <sys/time.h>\r\n#include <sys/mman.h>\r\n#include <unistd.h>\r\n#include <sys/types.h>\r\n#include <pthread.h>\r\n\r\n#define SAMPLE_RATE (20000)\r\n#define NUM_CHANNELS (1)\r\n\r\n#define PITCH_SEMITONES (10)\r\n\r\n#define BUFFER_SIZE (2048)\r\n#define MAX_FRAME_LENGTH (1024)\r\n\r\n#define FRAME_MS (20)\r\n#define OVERLAP_MS (10)\r\n\r\n#define NUM_MILISECONDS (20)\r\n\r\nusing namespace std;\r\n\r\n\r\nsem_t* log1_semaphore;\r\nsem_t* log2_semaphore;\r\n\r\n\r\nchar *addrIn, *addrOut;\r\npthread_spinlock_t *samp_raw_sl, *samp_mod_sl;\r\nint fdIn, fdOut, fd_samp_raw, fd_samp_mod;\r\n\r\nqueue<unsigned int> log1_time_diff;\r\nqueue<unsigned int> log2_time_diff;\r\n\r\n\r\nvoid FFT(float* buffer, long int frame_size, long int direction); // direction: -1: FFT, 1: IFFT\r\nvoid processSamples(long int semitones, long int numSamples, long int frame_size, long int osamp, float sampleRate, short int *indata, short int *outdata);\r\n\r\nvoid log_handler();\r\nvoid SIGTERM_handler(int signal_id);\r\n\r\nint main(int argc, char const *argv[])\r\n{\r\n\tcerr << \"running modulator\\n\";\r\n\t/*************************************************************************************/\r\n\t// Main loop variables:\r\n\tbool isMod = strtol(argv[1], NULL, 10) == 1;\r\n\r\n\tint inSamples = NUM_MILISECONDS * SAMPLE_RATE/1000;\r\n\tshort int inSampleBuffer[BUFFER_SIZE] = {0};\r\n\tshort int outSampleBuffer[BUFFER_SIZE] = {0};\r\n\r\n\r\n\tint sframe = SAMPLE_RATE * FRAME_MS /1000;\r\n\tint overlap = SAMPLE_RATE * OVERLAP_MS /1000;\r\n\r\n\tlong checkIn;\r\n\t/*************************************************************************************/\r\n\t// Time measurement variables:\r\n\tstruct timeval sendTime, receiveTime;\r\n\tstruct timeval postTime;\r\n\tstruct timeval startTime, endTime;\r\n\t/*************************************************************************************/\r\n\t// Semaphore configuration\r\n\tlog1_semaphore = sem_open(\"/log1\", O_CREAT, O_RDWR, 1);\r\n\tlog2_semaphore = sem_open(\"/log2\", O_CREAT, O_RDWR, 1);\r\n\r\n\r\n\tsignal(SIGTERM, SIGTERM_handler);\r\n\tsignal(SIGINT, SIGTERM_handler);\r\n\r\n\t/*************************************************************************************/\r\n\t// Shared memory and spinlock initialisation:\r\n\t\r\n\tfdIn = shm_open(\"/raw\", O_CREAT | O_RDWR, 0666);\r\n\tftruncate(fdIn, 2048);\r\n\taddrIn = (char*)mmap(0, 2048, PROT_READ | PROT_WRITE, MAP_SHARED, fdIn, 0);\r\n\r\n\tfdOut = shm_open(\"/mod\", O_CREAT | O_RDWR, 0666);\r\n\tftruncate(fdOut, 2048);\r\n\taddrOut = (char*)mmap(0, 2048, PROT_READ | PROT_WRITE, MAP_SHARED, fdOut, 0);\r\n\r\n\tfd_samp_raw = shm_open(\"/samp_raw\", O_CREAT | O_RDWR, 0666);\r\n\tftruncate(fd_samp_raw, sizeof(pthread_spinlock_t));\r\n\tsamp_raw_sl = (pthread_spinlock_t*)mmap(0, sizeof(pthread_spinlock_t), PROT_READ | PROT_WRITE, MAP_SHARED, fd_samp_raw, 0);\r\n\r\n\tfd_samp_mod = shm_open(\"/samp_mod\", O_CREAT | O_RDWR, 0666);\r\n\tftruncate(fd_samp_mod, sizeof(pthread_spinlock_t));\r\n\tsamp_mod_sl = (pthread_spinlock_t*)mmap(0, sizeof(pthread_spinlock_t), PROT_READ | PROT_WRITE, MAP_SHARED, fd_samp_mod, 0);\r\n\r\n\t/*************************************************************************************/\r\n\t// Log file initialisation:\r\n\tfstream log_file;\r\n\r\n\tlog_file.open(\"logs/log1.txt\", fstream::out | fstream::trunc);\r\n\tlog_file.close();\r\n\r\n\tlog_file.open(\"logs/log2.txt\", fstream::out | fstream::trunc);\r\n\tlog_file.close();\r\n\r\n\t/*************************************************************************************/\r\n\t// Logging thread launch:\r\n\tthread logging_thread(log_handler);\r\n\t/*************************************************************************************/\r\n\t// Main processing loop:\r\n\twhile (true) \r\n\t{\r\n\t\tpthread_spin_lock(samp_raw_sl);\r\n\r\n\t\tmemcpy(&sendTime, addrIn, sizeof(struct timeval));\r\n\t\tmemcpy(&checkIn, addrIn, sizeof(long));\r\n\t\tmemcpy(inSampleBuffer, addrIn + sizeof(struct timeval), inSamples * sizeof(short int));\r\n\t\tif(!checkIn){\r\n\t\t\tpthread_spin_unlock(samp_raw_sl);\r\n\t\t\tcontinue;\r\n\t\t}\r\n\r\n\t\tgettimeofday(&receiveTime, NULL);\r\n\t\tlog1_time_diff.push((receiveTime.tv_sec - sendTime.tv_sec) * 1000000 + receiveTime.tv_usec - sendTime.tv_usec);\r\n\t\t\r\n\t\tmemset(addrIn, 0, sizeof(long));\r\n\r\n\t\tpthread_spin_unlock(samp_raw_sl);\r\n\r\n\r\n\r\n\t\tgettimeofday(&startTime, NULL);\r\n\t\tif(isMod){\r\n\t\t\tprocessSamples(PITCH_SEMITONES, inSamples, sframe, overlap, SAMPLE_RATE, inSampleBuffer, outSampleBuffer);\t\t\t\r\n\t\t}\r\n\t\tgettimeofday(&endTime, NULL);;\r\n\t\tlog2_time_diff.push((endTime.tv_sec - startTime.tv_sec) * 1000000 + endTime.tv_usec - startTime.tv_usec);\r\n\r\n\r\n\r\n\t\tpthread_spin_lock(samp_mod_sl);\r\n\r\n\t\tgettimeofday(&postTime, NULL);\r\n\t\tmemcpy( addrOut, &postTime, sizeof(struct timeval));\r\n\r\n\t\tif(isMod){\r\n\t\t\tmemcpy( addrOut + sizeof(struct timeval), outSampleBuffer, inSamples*sizeof(short int));\t\t\t\r\n\t\t}else{\r\n\t\t\tmemcpy( addrOut + sizeof(struct timeval), inSampleBuffer, inSamples*sizeof(short int));\t\t\r\n\t\t}\r\n\t\tpthread_spin_unlock(samp_mod_sl);\r\n\r\n\t}\r\n\t/*************************************************************************************/\r\n\t// If somehow loop broke, exit with error\r\n\treturn EXIT_FAILURE;\r\n\t/*************************************************************************************/\r\n}\r\n\r\nvoid FFT(float* buffer, long int frame_size, long int direction)\r\n{\r\n\tfloat wr, wi, arg, *p1, *p2, temp;\r\n\tfloat tr, ti, ur, ui, *p1r, *p1i, *p2r, *p2i;\r\n\tlong int bitm, j;\r\n\t\r\n\tfor (long int i = 2; i < 2*frame_size-2; i += 2) {\r\n\t\tfor (bitm = 2, j = 0; bitm < 2*frame_size; bitm <<= 1) {\r\n\t\t\tif (i & bitm) j++;\r\n\t\t\tj <<= 1;\r\n\t\t}\r\n\t\tif (i < j) {\r\n\t\t\tp1 = buffer+i; p2 = buffer+j;\r\n\t\t\ttemp = *p1; *(p1++) = *p2;\r\n\t\t\t*(p2++) = temp; temp = *p1;\r\n\t\t\t*p1 = *p2; *p2 = temp;\r\n\t\t}\r\n\t}\r\n\r\n\tint le2;\r\n\r\n\tfor (long int k = 0, le = 2; k < (long int)(log(frame_size)/log(2.)+.5); ++k) {\r\n\t\tle <<= 1;\r\n\t\tle2 = le>>1;\r\n\t\tur = 1.0;\r\n\t\tui = 0.0;\r\n\t\targ = M_PI / (le2>>1);\r\n\t\twr = cos(arg);\r\n\t\twi = direction*sin(arg);\r\n\t\tfor (j = 0; j < le2; j += 2) {\r\n\t\t\tp1r = buffer+j; p1i = p1r+1;\r\n\t\t\tp2r = p1r+le2; p2i = p2r+1;\r\n\t\t\tfor (int i = j; i < 2*frame_size; i += le) {\r\n\t\t\t\ttr = *p2r * ur - *p2i * ui;\r\n\t\t\t\tti = *p2r * ui + *p2i * ur;\r\n\t\t\t\t*p2r = *p1r - tr; *p2i = *p1i - ti;\r\n\t\t\t\t*p1r += tr; *p1i += ti;\r\n\t\t\t\tp1r += le; p1i += le;\r\n\t\t\t\tp2r += le; p2i += le;\r\n\t\t\t}\r\n\t\t\ttr = ur*wr - ui*wi;\r\n\t\t\tui = ur*wi + ui*wr;\r\n\t\t\tur = tr;\r\n\t\t}\r\n\t}\r\n\r\n}\r\n\r\nvoid processSamples(long int semitones, long int numSamples, long int frame_size, long int osamp, float sampleRate, short int *indata, short int *outdata)\r\n{\r\n\tcout << \"modulating\\n\";\r\n\tif(!numSamples)\r\n\t\treturn;\r\n\tstatic float inputFIFO[MAX_FRAME_LENGTH];\r\n\tstatic float outputFIFO[MAX_FRAME_LENGTH];\r\n\tstatic float fftWorkspace[2*MAX_FRAME_LENGTH];\r\n\tstatic float lastPhase[MAX_FRAME_LENGTH/2+1];\r\n\tstatic float sumOfPhase[MAX_FRAME_LENGTH/2+1];\r\n\tstatic float outputAccumulator[2*MAX_FRAME_LENGTH];\r\n\tstatic float analisedFreq[MAX_FRAME_LENGTH];\r\n\tstatic float analisedMagn[MAX_FRAME_LENGTH];\r\n\tstatic float synthesisedFreq[MAX_FRAME_LENGTH];\r\n\tstatic float synthesisedMagn[MAX_FRAME_LENGTH];\r\n\r\n\tstatic bool init = false;\r\n\tstatic long int rover = 0;\r\n\r\n\tdouble magn, phase, tmp, window, real, imag;\r\n\tdouble freqPerBin, expectedFrequency;\r\n\r\n\tlong int qpd, index, inputLatency, stepSize, frame_size_2;\r\n\r\n\tfloat psFactor = pow(2.0, semitones/12.0);\r\n\r\n\tframe_size_2 = frame_size / 2;\r\n\tstepSize = frame_size/osamp;\r\n\tfreqPerBin = sampleRate/(double)frame_size;\r\n\texpectedFrequency = 2.*M_PI*(double)stepSize/(double)frame_size;\r\n\tinputLatency = frame_size - stepSize;\r\n\r\n\tif(!rover) rover = inputLatency;\r\n\r\n\tif (init == false) {\r\n\t\tmemset(inputFIFO, 0, MAX_FRAME_LENGTH*sizeof(float));\r\n\t\tmemset(outputFIFO, 0, MAX_FRAME_LENGTH*sizeof(float));\r\n\t\tmemset(fftWorkspace, 0, 2*MAX_FRAME_LENGTH*sizeof(float));\r\n\t\tmemset(lastPhase, 0, (MAX_FRAME_LENGTH/2+1)*sizeof(float));\r\n\t\tmemset(sumOfPhase, 0, (MAX_FRAME_LENGTH/2+1)*sizeof(float));\r\n\t\tmemset(outputAccumulator, 0, 2*MAX_FRAME_LENGTH*sizeof(float));\r\n\t\tmemset(analisedFreq, 0, MAX_FRAME_LENGTH*sizeof(float));\r\n\t\tmemset(analisedMagn, 0, MAX_FRAME_LENGTH*sizeof(float));\r\n\t\tinit = true;\r\n\t}\r\n\r\n\tfor (long int i = 0; i < numSamples; ++i){\r\n\r\n\t\tinputFIFO[rover] = indata[i];\r\n\t\toutdata[i] = outputFIFO[rover-inputLatency];\r\n\t\t++rover;\r\n\r\n\t\tif (rover >= frame_size) {\r\n\t\t\trover = inputLatency;\r\n\r\n\t\t\tfor (long int k = 0; k < frame_size; ++k) {\r\n\t\t\t\twindow = -0.5*cos(2.0*M_PI*(double)k/(double)frame_size)+0.5;\r\n\t\t\t\tfftWorkspace[2*k] = inputFIFO[k] * window;\r\n\t\t\t\tfftWorkspace[2*k+1] = 0.0;\r\n\t\t\t}\r\n\r\n\t\t\t// Analise:\r\n\t\t\tFFT(fftWorkspace, frame_size, -1);\r\n\r\n\t\t\tfor (long int k = 0; k <= frame_size_2; ++k) {\r\n\r\n\t\t\t\t\r\n\t\t\t\treal = fftWorkspace[2*k];\r\n\t\t\t\timag = fftWorkspace[2*k+1];\r\n\r\n\t\t\t\t\r\n\t\t\t\tmagn = 2.*sqrt(real*real + imag*imag);\r\n\t\t\t\tphase = atan2(imag,real);\r\n\r\n\t\t\t\t\r\n\t\t\t\ttmp = phase - lastPhase[k];\r\n\t\t\t\tlastPhase[k] = phase;\r\n\r\n\r\n\t\t\t\ttmp -= (double)k*expectedFrequency;\r\n\r\n\t\t\t\t\r\n\t\t\t\tqpd = tmp/M_PI;\r\n\t\t\t\tif (qpd >= 0) qpd += qpd&1;\r\n\t\t\t\telse qpd -= qpd&1;\r\n\t\t\t\ttmp -= M_PI*(double)qpd;\r\n\r\n\t\t\t\ttmp = osamp*tmp/(2.*M_PI);\r\n\r\n\t\t\t\ttmp = (double)k*freqPerBin + tmp*freqPerBin;\r\n\r\n\t\t\t\tanalisedMagn[k] = magn;\r\n\t\t\t\tanalisedFreq[k] = tmp;\r\n\r\n\t\t\t}\r\n\r\n\t\t\t// Process:\r\n\t\t\tmemset(synthesisedMagn, 0, frame_size*sizeof(float));\r\n\t\t\tmemset(synthesisedFreq, 0, frame_size*sizeof(float));\r\n\t\t\tfor (long int k = 0; k <= frame_size_2; ++k) { \r\n\t\t\t\tindex = k*psFactor;\r\n\t\t\t\tif (index <= frame_size_2) { \r\n\t\t\t\t\tsynthesisedMagn[index] += analisedMagn[k]; \r\n\t\t\t\t\tsynthesisedFreq[index] = analisedFreq[k] * psFactor; \r\n\t\t\t\t} \r\n\t\t\t}\r\n\r\n\t\t\t// Synthesise:\r\n\t\t\tfor (long int k = 0; k <= frame_size_2; ++k) {\r\n\r\n\t\t\t\tmagn = synthesisedMagn[k];\r\n\t\t\t\ttmp = synthesisedFreq[k];\r\n\r\n\t\t\t\ttmp -= (double)k*freqPerBin;\r\n\r\n\t\t\t\ttmp /= freqPerBin;\r\n\r\n\t\t\t\ttmp = 2.*M_PI*tmp/osamp;\r\n\r\n\t\t\t\ttmp += (double)k*expectedFrequency;\r\n\r\n\t\t\t\tsumOfPhase[k] += tmp;\r\n\t\t\t\tphase = sumOfPhase[k];\r\n\r\n\t\t\t\tfftWorkspace[2*k] = magn*cos(phase);\r\n\t\t\t\tfftWorkspace[2*k+1] = magn*sin(phase);\r\n\t\t\t} \r\n\r\n\t\t\tfor (long int k = frame_size+2; k < 2*frame_size; ++k) fftWorkspace[k] = 0.0;\r\n\r\n\t\t\tFFT(fftWorkspace, frame_size, 1);\r\n \r\n\t\t\tfor(long int k=0; k < frame_size; ++k) {\r\n\t\t\t\twindow = -0.5*cos(2.0*M_PI*(double)k/(double)frame_size)+0.5;\r\n\t\t\t\toutputAccumulator[k] += 2.0*window*fftWorkspace[2*k]/(frame_size_2*osamp);\r\n\t\t\t}\r\n\t\t\tfor (long int k = 0; k < stepSize; ++k) outputFIFO[k] = outputAccumulator[k];\r\n\r\n\r\n\t\t\tmemmove(outputAccumulator, outputAccumulator+stepSize, frame_size*sizeof(float));\r\n\r\n\t\t\tfor (long int k = 0; k < inputLatency; ++k) inputFIFO[k] = inputFIFO[k+stepSize];\r\n\t\t}\r\n\t}\r\n\r\n}\r\n\r\n\r\nvoid log_handler()\r\n{\r\n\tfstream log_file;\r\n\r\n\twhile (EXIT_FAILURE)\r\n\t{\r\n\r\n\t\tif (!log1_time_diff.empty())\r\n\t\t{\r\n\t\t\tsem_wait(log1_semaphore);\r\n\t\t\tlog_file.open(\"logs/log1.txt\", fstream::out | fstream::in | fstream::app);\r\n\r\n\t\t\tdo {\r\n\r\n\t\t\t\tlog_file << log1_time_diff.front() << '\\n';\r\n\t\t\t\tlog1_time_diff.pop();\r\n\r\n\t\t\t} while(!log1_time_diff.empty());\r\n\r\n\t\t\tlog_file.close();\r\n\t\t\tsem_post(log1_semaphore);\r\n\t\t}\r\n\r\n\t\tif (!log2_time_diff.empty())\r\n\t\t{\r\n\t\t\tsem_wait(log2_semaphore);\r\n\t\t\tlog_file.open(\"logs/log2.txt\", fstream::out | fstream::in | fstream::app);\r\n\r\n\t\t\tdo {\r\n\r\n\t\t\t\tlog_file << log2_time_diff.front() << '\\n';\r\n\t\t\t\tlog2_time_diff.pop();\r\n\r\n\t\t\t} while(!log2_time_diff.empty());\r\n\r\n\t\t\tlog_file.close();\r\n\t\t\tsem_post(log2_semaphore);\r\n\t\t}\r\n\r\n\t}\r\n\r\n}\r\n\r\nvoid SIGTERM_handler(int signal_id)\r\n{\r\n\tint tmp = 0;\r\n\r\n\tsem_getvalue(log1_semaphore, &tmp);\r\n\tif(!tmp)\r\n\t\tsem_post(log1_semaphore);\r\n\r\n\tsem_close(log1_semaphore);\r\n\r\n\r\n\tsem_getvalue(log2_semaphore, &tmp);\r\n\tif(!tmp)\r\n\t\tsem_post(log2_semaphore);\r\n\r\n\tsem_close(log2_semaphore);\r\n\r\n\texit(EXIT_SUCCESS);\r\n}\r\n"
},
{
"alpha_fraction": 0.6808510422706604,
"alphanum_fraction": 0.686450183391571,
"avg_line_length": 21.325000762939453,
"blob_id": "bfa58590f36f03ce3b3aebdf1d0d2bab6067008d",
"content_id": "1b3539fb2cc8ab724c35c06d49b99a884ccfc05e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 893,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 40,
"path": "/makefile",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "CPP=g++\nC = gcc\ninc = -I. -I../portaudio/bindings/cpp/include/ -I../portaudio/include/ -I../portaudio/src/common/\nlib = -lportaudiocpp -lpthread -lportaudio -lrt\n\nCPPFLAGS = $(inc)\nCFLAGS = -O3 $(PERF)\nODIR=build\n\nall: directories capture playback summoner modulator\n\ndirectories: build logs samp\n\nbuild:\n\tmkdir build\n\nlogs: \n\tmkdir logs\n\nsamp:\n\tmkdir samp\n\ncapture: src/capture.c\n\t$(C) -Wall src/capture.c $(inc) -o $(ODIR)/capture $(CFLAGS) $(lib)\n\nplayback: src/playback.c\n\t$(C) -Wall src/playback.c $(inc) -o $(ODIR)/playback $(CFLAGS) $(lib)\n\nsummoner: src/summoner.c\n\t$(C) -Wall src/summoner.c -I/usr/include/python2.7 -o $(ODIR)/summoner -lpython2.7 -lpthread -lrt\n\nmodulator: src/modulator.cpp \n\t$(CPP) -Wall src/modulator.cpp -o $(ODIR)/modulator -lpthread -lm -lrt\n\n.PHONY: clean\nclean:\n\trm -f $(ODIR)/capture\n\trm -f $(ODIR)/playback\n\trm -f $(ODIR)/summoner\n\trm -f $(ODIR)/modulator\n"
},
{
"alpha_fraction": 0.5938804149627686,
"alphanum_fraction": 0.6040797233581543,
"avg_line_length": 24.83832359313965,
"blob_id": "1f347870c3555adb978a23e8a65b4a76735fb4ab",
"content_id": "37f4c85e78fc5bc9e80c04e5336804a169456e95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4314,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 167,
"path": "/src/capture.c",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <signal.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <string.h>\n#include <semaphore.h>\n#include <sys/mman.h>\n#include \"portaudio.h\"\n#include <sys/time.h>\n#include <sys/stat.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <pthread.h>\n\n#define SAMPLE_RATE (20000)\n#define FRAMES_PER_BUFFER (1024)\n#define NUM_MILISECONDS (20)\n#define NUM_CHANNELS (1)\n\n#define PA_SAMPLE_TYPE paInt16\ntypedef short SAMPLE;\n#define SAMPLE_SILENCE (0)\n#define PRINTF_S_FORMAT \"%d\"\n\nconst char *slName = \"/samp_raw\";\npthread_spinlock_t* sl;\nint fdsl;\n\nconst char* shmName = \"/raw\";\nchar* addr;\nint fd;\n\n\nint main(void);\nvoid SIGTERM_handler();\n\nint main(void)\n{\n fprintf(stderr,\"running capture\\n\");\n PaStreamParameters inputParam, outputParam;\n PaStream *audioStream;\n PaError exception;\n SAMPLE *samplesRecorded;\n struct timeval start;\n \n struct sigaction action;\n memset(&action, 0, sizeof(struct sigaction));\n action.sa_handler = SIGTERM_handler;\n sigaction(SIGTERM, &action, NULL);\n\n fdsl = shm_open(slName, O_CREAT | O_RDWR, 0666);\n ftruncate(fdsl, sizeof(pthread_spinlock_t));\n sl = (pthread_spinlock_t*) mmap(NULL, sizeof(pthread_spinlock_t), PROT_READ | PROT_WRITE, MAP_SHARED, fdsl, 0);\n\n\n fd = shm_open(shmName, O_CREAT | O_RDWR, 0666);\n ftruncate(fd, 2048);\n addr = mmap(NULL, 2048, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);\n\n int i;\n float amountOfFrames;\n int amountOfSamples;\n int amountOfBytes;\n\n amountOfFrames = NUM_MILISECONDS * SAMPLE_RATE/1000;\n amountOfSamples = amountOfFrames * NUM_CHANNELS;\n amountOfBytes = amountOfSamples * sizeof(SAMPLE);\n\n exception = Pa_Initialize();\n if( exception != paNoError ) \n goto error;\n\n inputParam.device = Pa_GetDefaultInputDevice(); \n if (inputParam.device == paNoDevice) \n {\n printf(\"Error: No default input device.\\n\");\n goto error;\n }\n\n inputParam.channelCount = NUM_CHANNELS;\n inputParam.sampleFormat = PA_SAMPLE_TYPE;\n inputParam.suggestedLatency = Pa_GetDeviceInfo( inputParam.device )->defaultLowInputLatency;\n inputParam.hostApiSpecificStreamInfo = NULL;\n\n\n while(1)\n {\n //printf(\"capture loop\\n\");\n\n samplesRecorded = (SAMPLE *) malloc( amountOfBytes );\n\n if( samplesRecorded == NULL )\n {\n printf(\"Could not allocate capture array.\\n\");\n exit(1);\n }\n\n for( i=0; i<amountOfSamples; i++ ) samplesRecorded[i] = 0;\n\n exception = Pa_OpenStream(\n &audioStream,\n &inputParam,\n NULL, \n SAMPLE_RATE,\n FRAMES_PER_BUFFER,\n paClipOff, \n NULL,\n NULL );\n\n if( exception != paNoError ) \n goto error;\n\n if( audioStream )\n {\n exception = Pa_StartStream( audioStream );\n if( exception != paNoError ) \n goto error;\n\n //printf(\"Now recording!\\n\"); \n\n exception = Pa_ReadStream( audioStream, samplesRecorded, amountOfFrames );\n if( exception != paNoError ) \n goto error;\n \n exception = Pa_StopStream( audioStream );\n if( exception != paNoError ) \n goto error;\n\n exception = Pa_CloseStream( audioStream );\n if( exception != paNoError ) \n goto error;\n }\n\n pthread_spin_lock(sl);\n\n gettimeofday(&start, NULL);\n memcpy( addr, &start, sizeof(struct timeval));\n memcpy( addr + sizeof(struct timeval), samplesRecorded, NUM_CHANNELS * sizeof(SAMPLE)* amountOfFrames);\n\n printf(\"Wrote data to 'samp/raw.raw'.\\n\");\n //fprintf(stderr,\"Wrote data to 'samp/raw.raw'.\\n\");\n\n pthread_spin_unlock(sl);\n\n\n free( samplesRecorded );\n }\n\n Pa_Terminate();\n return 0;\n\nerror:\n Pa_Terminate();\n\n free( samplesRecorded );\n printf(\"An error occured while using the audio capture stream. Terminating...\\n\" );\n printf(\"Error number: %d\\n\", exception );\n printf(\"Error message: %s\\n\", Pa_GetErrorText( exception ) );\n return -1;\n}\nvoid SIGTERM_handler()\n{\n Pa_Terminate();\n\n printf(\"Received kill signal. Terminating...\\n\" );\n exit(EXIT_SUCCESS);\n}"
},
{
"alpha_fraction": 0.5484086871147156,
"alphanum_fraction": 0.559128999710083,
"avg_line_length": 23.467212677001953,
"blob_id": "e3c4d3592482c680725033dea8173967190ea45c",
"content_id": "d74a659c4fb07dcf89d6681d91fa5720c2146cbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5972,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 244,
"path": "/src/modulator(ST).cpp",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "\n#include <iostream>\n#include <thread>\n#include <ctime>\n#include <chrono>\n#include <semaphore.h>\n#include <csignal>\n#include <fstream>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <queue>\n#include \"soundtouch/SoundTouch.h\"\n\n#define SAMPLE_RATE (44100)\n#define NUM_CHANNELS (1)\n\n#define TEMPO_CHANGE (0)\n#define PITCH_SEMITONES (10)\n#define RATE_CHANGE (0)\n\n#define BUFFER_SIZE (20)\n\nusing namespace soundtouch;\nusing namespace std;\n\n\nsem_t* log1_semaphore;\nsem_t* log2_semaphore;\nsem_t* samp_raw_semaphore;\nsem_t* samp_mod_semaphore;\n\nqueue<chrono::high_resolution_clock::time_point> processing_start_times;\nqueue<unsigned int> log1_time_diff;\nqueue<unsigned int> log2_time_diff;\n\n\nvoid log_handler();\nvoid SIGTERM_handler(int signal_id);\n\nint main()\n{\t/*************************************************************************************/\n\t// Main loop variables:\t\n\tint inSamples;\n\tint outSamples;\n\n\tshort int inSampleBuffer[BUFFER_SIZE] = {0};\n\tshort int outSampleBuffer[BUFFER_SIZE] = {0};\n\n\tunsigned int diff;\n\tSoundTouch ST;\n\n\n\tST.setSampleRate(SAMPLE_RATE);\n\tST.setChannels(NUM_CHANNELS);\n\n\tST.setTempoChange(TEMPO_CHANGE);\n\tST.setPitchSemiTones(PITCH_SEMITONES);\n\tST.setRateChange(RATE_CHANGE);\n\n\tST.setSetting(SETTING_USE_QUICKSEEK, 1);\n\tST.setSetting(SETTING_USE_AA_FILTER, 0);\n\n\tST.setSetting(SETTING_SEQUENCE_MS, 40);\n\tST.setSetting(SETTING_SEEKWINDOW_MS, 15);\n\tST.setSetting(SETTING_OVERLAP_MS, 8);\n\n\t/*************************************************************************************/\n\t// Time measurement variables:\n\tchrono::high_resolution_clock::time_point t_start;\n\tchrono::high_resolution_clock::time_point t_end;\n\tchrono::duration<double, std::milli> time_span;\n\t/*************************************************************************************/\n\t// Semaphore configuration\n\tlog1_semaphore = sem_open(\"/log1\", O_CREAT, O_RDWR, 1);\n\tlog2_semaphore = sem_open(\"/log2\", O_CREAT, O_RDWR, 1);\n\tsamp_raw_semaphore = sem_open(\"/samp_raw\", O_CREAT, O_RDWR, 1);\n\tsamp_mod_semaphore = sem_open(\"/samp_mod\", O_CREAT, O_RDWR, 1);\n\n\tsignal(SIGTERM, SIGTERM_handler);\n\t/*************************************************************************************/\n\t// Log file initialisation:\n\tfstream log_file;\n\n\tlog_file.open(\"logs/log1.txt\", fstream::out | fstream::trunc);\n\tlog_file.close();\n\n\tlog_file.open(\"logs/log2.txt\", fstream::out | fstream::trunc);\n\tlog_file.close();\n\n\t/*************************************************************************************/\n\t// Modified samples file initialisation:\n\tfstream samp_mod_file;\n\n\tsamp_mod_file.open(\"samp/mod.raw\", fstream::out | fstream::trunc | ios::binary);\n\tsamp_mod_file.close();\n\n\t/*************************************************************************************/\n\t// Raw samples file variable:\n\tifstream samp_raw_file;\n\n\t//samp_raw_file.open(\"samp/raw.raw\", ios::binary | ios::in);\n\t//samp_raw_file.close();\n\n\t/*************************************************************************************/\n\t// Logging thread launch:\n\tthread logging_thread(log_handler);\n\t/*************************************************************************************/\n\t// Main processing loop:\n\twhile (EXIT_FAILURE) \n\t{\n\t\t/*********************************************************************************/\n\t\t// Read samples and put into processing:\n\n\t\tsem_wait(samp_raw_semaphore);\n\t\tsamp_raw_file.open(\"samp/raw.raw\", ios::binary | ios::in);\n\n\t\tdo {\n\n\t\t\tsamp_raw_file.read((char*)inSampleBuffer, BUFFER_SIZE*2);\n\t\t\tinSamples = samp_raw_file.gcount() / 2;\n\n\t\t\tST.putSamples(inSampleBuffer, inSamples);\n\n\t\t\tt_start = chrono::high_resolution_clock::now();\n\t\t\tfor(int i = 0; i < inSamples; ++i)\n\t\t\t\tprocessing_start_times.push(t_start);\n\n\t\t} while(inSamples !=0 );\n\n\t\tsamp_raw_file.close();\n\t\tsem_post(samp_raw_semaphore);\n\n\t\t/*********************************************************************************/\n\t\t// Receive samples and write to output:\n\n\t\tsem_wait(samp_mod_semaphore);\n\t\tsamp_mod_file.open(\"samp/mod.raw\", fstream::out | fstream::app | ios::binary);\n\n\t\tdo \n\t\t{\n\t\t\toutSamples = ST.receiveSamples(outSampleBuffer, outSamples);\n\n\t\t\tt_end = chrono::high_resolution_clock::now();\n\n\t\t\tfor(int i = 0; i < outSamples; ++i){\n\t\t\t\ttime_span = processing_start_times.front() - t_end;\n\t\t\t\tprocessing_start_times.pop();\n\n\t\t\t\tlog2_time_diff.push(time_span.count());\n\t\t\t}\n\n\t\t\tsamp_mod_file.write(outSampleBuffer, outSamples*2);\n\n\t\t} while (outSamples != 0);\n\n\t\tsamp_mod_file.close();\n\t\tsem_post(samp_mod_semaphore);\n\n\n\t}\n\t/*************************************************************************************/\n\t// If somehow loop broke, exit with error\n\treturn EXIT_FAILURE;\n\t/*************************************************************************************/\n}\n\n\nvoid log_handler()\n{\n\tfstream log_file;\n\n\twhile (EXIT_FAILURE)\n\t{\n\n\t\tif (!log1_time_diff.empty())\n\t\t{\n\t\t\tsem_wait(log1_semaphore);\n\t\t\tlog_file.open(\"logs/log1.txt\", fstream::out | fstream::in | fstream::app);\n\n\t\t\tdo {\n\n\t\t\t\tlog_file << log1_time_diff.front() << '\\n';\n\t\t\t\tlog1_time_diff.pop();\n\n\t\t\t} while(!log1_time_diff.empty());\n\n\t\t\tlog_file.close();\n\t\t\tsem_post(log1_semaphore);\n\t\t}\n\n\t\tif (!log2_time_diff.empty())\n\t\t{\n\t\t\tsem_wait(log2_semaphore);\n\t\t\tlog_file.open(\"logs/log2.txt\", fstream::out | fstream::in | fstream::app);\n\n\t\t\tdo {\n\n\t\t\t\tlog_file << log2_time_diff.front() << '\\n';\n\t\t\t\tlog2_time_diff.pop();\n\n\t\t\t} while(!log2_time_diff.empty());\n\n\t\t\tlog_file.close();\n\t\t\tsem_post(log2_semaphore);\n\t\t}\n\n\t}\n\n}\n\nvoid SIGTERM_handler(int signal_id)\n{\n\tint tmp = 0;\n\n\tsem_getvalue(log1_semaphore, &tmp);\n\tif(!tmp)\n\t\tsem_post(log1_semaphore);\n\n\tsem_close(log1_semaphore);\n\n\n\tsem_getvalue(log2_semaphore, &tmp);\n\tif(!tmp)\n\t\tsem_post(log2_semaphore);\n\n\tsem_close(log2_semaphore);\n\n\n\tsem_getvalue(samp_raw_semaphore, &tmp);\n\tif(!tmp)\n\t\tsem_post(samp_raw_semaphore);\n\n\tsem_close(samp_raw_semaphore);\n\n\n\tsem_getvalue(samp_mod_semaphore, &tmp);\n\tif(!tmp)\n\t\tsem_post(samp_mod_semaphore);\n\n\tsem_close(samp_mod_semaphore);\n\n\t\n\texit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.5624656081199646,
"alphanum_fraction": 0.5883324146270752,
"avg_line_length": 20.127906799316406,
"blob_id": "cf3698ce84ccd16a1755adfaf84be34dced24e36",
"content_id": "d501f6f393bfde077ccf35aca4ccbcdaf0f90ef2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1817,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 86,
"path": "/src/generator.c",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <time.h>\n#include <sys/time.h>\n#include <semaphore.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <signal.h>\n#include <string.h>\n\nsem_t* semarr[3];\n\nvoid term(){\n\tint val = 0;\n\tfor(int i = 3; i>0; --i){\n\t\tsem_getvalue(semarr[i], &val);\n\t\tif(val < 1){\n\t\t\tsem_post(semarr[i]);\n\t\t}\n\t\t//fprintf(stderr,\"semafor /log%d, wartosc %d\\n\", i, val);\n\t\tsem_close(semarr[i]);\n\t}\n}\n\nint main(int argc, char const *argv[])\n{\n\t//fprintf(stderr,\"generator: %s %s\\n\", argv[0], argv[1]);\n\n\tstruct timeval t2, t1, dt;\n\n\tstruct sigaction action;\n\tmemset(&action, 0, sizeof(struct sigaction));\n action.sa_handler = term;\n sigaction(SIGTERM, &action, NULL);\n\n\tFILE* log;\n\tmkdir(\"logs\", O_RDWR);\n\n\tchar fname[3][15];\n\tchar semName[6];\n\tint val;\n\tfor(int i = 3; i>0; --i){\n\n\t\tsnprintf(semName, 6, \"/log%d\", i);\n\t\tsemarr[i] = sem_open(semName, O_CREAT, O_RDWR, 1);\n\t\tsem_getvalue(semarr[i], &val);\n\t\t//fprintf(stderr,\"semafor %s, wartosc %d\\n\", semName, val);\n\n\t\tsem_wait(semarr[i]);\n\t\tsnprintf(fname[i], 15, \"logs/log%d.txt\", i);\n\t\tlog = fopen(fname[i], \"w\");\n\t\tif(log == NULL){\n\t\t\tputs(\"file opening failed\");\n\t\t\texit(1);\n\t\t}\n\t\tfclose(log);\n\t\tsem_post(semarr[i]);\n\t}\n\t\n\t//fprintf(stderr,\"file init ok\");\n\tsrand(time(NULL));\n\tint rando = 0;\n\tfor(;;){\n\t\tfor(int i = 3; i>0; --i){\n\t\t\t//fprintf(stderr,\"in loop\");\n\t\t\tgettimeofday(&t1, NULL);\n\t\t\trando = (10000 + rand() % 30000)*strtol(argv[1], NULL, 10);\n\t\t\t//fprintf(stderr,\"rando = %d\", rando);\n\t\t\tusleep(rando);\n\t\t\tgettimeofday(&t2, NULL);\n\t\t\ttimersub(&t2, &t1, &dt);\n\n\t\t\tsem_wait(semarr[i]);\n\t\t\tlog = fopen(fname[i], \"a\");\n\t\t\tif(log == NULL){\n\t\t\t\tputs(\"file opening failed\");\n\t\t\t\texit(1);\n\t\t\t}\n\t\t\tfprintf(log, \"%ld\\n\", ((long int)dt.tv_usec)/1000 );\n\t\t\tfclose(log);\n\t\t\tsem_post(semarr[i]);\n\t\t}\n\t}\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6022186875343323,
"alphanum_fraction": 0.626518726348877,
"avg_line_length": 23.269229888916016,
"blob_id": "d8eb9fb78b1779b81643950df65a9391dae64c25",
"content_id": "bb4dd59506f9e6a54ad349c187b15fb5c2ff4331",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1893,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 78,
"path": "/scripts/plotter.py",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2.7\n\nimport matplotlib.pyplot as plt\nimport posix_ipc\nimport signal\nimport math\n\nsem = [posix_ipc.Semaphore(\"/log1\", posix_ipc.O_CREAT, mode = posix_ipc.O_RDWR, initial_value = 1),\\\nposix_ipc.Semaphore(\"/log2\", posix_ipc.O_CREAT, mode = posix_ipc.O_RDWR, initial_value = 1),\\\nposix_ipc.Semaphore(\"/log3\", posix_ipc.O_CREAT, mode = posix_ipc.O_RDWR, initial_value = 1)]\n\ndef termin(signum, frame):\n\tplt.close('all')\n\tfor s in sem:\n\t\ttry:\n\t\t\tval = s.value\n\t\texcept posix_ipc.ExistentialError:\n\t\t\tcontinue\n\t\telse:\t\n\t\t\tif val< 1:\n\t\t\t\tsem.release()\n\t\t\tS.close()\n\n\nif __name__ == \"__main__\":\n\tprint(\"commencing plot creation\")\n\tsignal.signal(signal.SIGTERM, termin)\n\n\ttimes = []\n\n\tfor i in (0, 1, 2):\n\t\t#print(\"semafor \" + str(sem[i].name) + \" wartosc \" + str(sem[i].value))\n\n\t\tsem[i].acquire();\n\n\t\tfp = open((\"logs/log\" + str(i+1) + \".txt\"), \"r\")\n\t\ttimeVec = []\n\t\tbuf = \"\"\n\t\twhile(True):\n\t\t\tbuf = fp.readline()\n\t\t\tif not buf:\n\t\t\t\tbreak\n\t\t\ttimeVec.append(long(buf))\n\t\ttimes.append(timeVec)\n\t\tsem[i].release()\n\t\tsem[i].close()\n\n\t\ttimes[0][0] = times[0][1]\n\n\t\tsrednia = sum(times[i])/float(len(times[i]))\n\t\tprint(\"log\" + str(i+1))\n\t\tprint(\"\\tsrednia: \" + str(srednia))\n\t\torange = 0\n\t\tfor time in times[i]:\n\t\t\torange = orange + (time - srednia)**2\n\n\t\tprint(\"\\todchylenie standardowe: \" + str(math.sqrt(orange)/len(times[i])))\n\n\t\tplt.figure(2*i+1)\n\t\tn, bins, patches = plt.hist(x=times[i], bins=30, color='#0504aa', alpha=0.7, rwidth=0.4)\n\t\tplt.grid(axis='y', alpha=0.75)\n\t\tplt.xlabel('Opoznienie [us]')\n\t\tplt.ylabel('Wystapienia')\n\t\tplt.title(\"Histogram \" + str(i+1))\n\t\tplt.ylim(ymax=1.1*n.max())\n\n\t\tplt.figure(2*i)\n\t\tplt.plot(times[i])\n\t\tplt.grid(axis='y', alpha=0.75)\n\t\tplt.xlabel('Czas')\n\t\tplt.ylabel('Opoznienie [us]')\n\t\tplt.title(\"Przebieg czasowy \" + str(i+1))\n\t\tplt.ylim(ymax=1.1*max(times[i]))\n\n\tprint(\"ploting successfull\")\n\tplt.show()\n\tprint(\"plots terminated\")\n\texit(0)\n"
},
{
"alpha_fraction": 0.6741935610771179,
"alphanum_fraction": 0.6806451678276062,
"avg_line_length": 16.22222137451172,
"blob_id": "676f44c48d5c70b533acbbf09da68e4a83fcc19c",
"content_id": "c6bd7336ff7b2c3b2aa7bb0e585af45f6181edfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 18,
"path": "/README.md",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "# SCZR-voice-modulation\nzeby sie pobawic summonerem: sudo ./build/summoner\\\nDostęp do pamięci współdzielonej wymaga roota.\n\n# Instalacja SoundTouch:\n\n```\n$ ./bootstrap\n```\n```\n$ ./configure --prefix=/usr --docdir=/usr/share/doc/soundtouch-2.2 --enable-integer-samples\n```\n```\n$ make\n```\n```\n$ make install\n```\n"
},
{
"alpha_fraction": 0.6387315988540649,
"alphanum_fraction": 0.6494903564453125,
"avg_line_length": 21.544681549072266,
"blob_id": "41a16d14f46c04a7ad83a35869787b50be95ede3",
"content_id": "5f0724743656aa7552bdab7740b9f84c8116e7fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5298,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 235,
"path": "/src/summoner.c",
"repo_name": "Szewoj/Voice-Modulation",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <sys/types.h>\n#include <stdbool.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <signal.h>\n#include <Python.h>\n#include <string.h>\n#include <semaphore.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <sys/wait.h>\n#include <sys/mman.h>\n#include <pthread.h>\n\npthread_spinlock_t *samp_raw_sl, *samp_mod_sl;\n\nvoid openSystem(bool* system_on, pid_t* system_pid);\nvoid runCapture(pid_t* system_pid);\nvoid runModulator(pid_t* system_pid, int param);\nvoid runPlayback(pid_t* system_pid);\nvoid closeSystem(bool* system_on, pid_t* system_pid);\nvoid createPlots(bool* plotter_on, pid_t* plotter_pid_ptr);\nvoid closePlots(bool* plotter_on, pid_t plotter_pid);\nint getChoice(const char* text, int max);\nbool goodButton(int c, int max);\nvoid unlinkSemaphores();\nvoid unlinkShm();\nvoid unlinkSpinlock();\n\nint main(int argc, char const *argv[])\n{\n\tpid_t system_pid[3];\n\tpid_t plotter_pid;\n\tbool system_on = false;\n\tbool plotter_on = false;\n\tint choice;\n\tconst char* MENU_TEXT = \"\\nMake a choice:\\n1 - open system\\n2 - close system\\n3 - plot times\\n4 - close plots\\n5 - exit\\n\";\n\n\tint fd_samp_raw, fd_samp_mod;\n\n\tfd_samp_raw = shm_open(\"/samp_raw\", O_CREAT | O_RDWR, 0666);\n\tftruncate(fd_samp_raw, sizeof(pthread_spinlock_t));\n\tsamp_raw_sl = mmap(0, sizeof(pthread_spinlock_t), PROT_READ | PROT_WRITE, MAP_SHARED, fd_samp_raw, 0);\n\n\tfd_samp_mod = shm_open(\"/samp_mod\", O_CREAT | O_RDWR, 0666);\n\tftruncate(fd_samp_mod, sizeof(pthread_spinlock_t));\n\tsamp_mod_sl = mmap(0, sizeof(pthread_spinlock_t), PROT_READ | PROT_WRITE, MAP_SHARED, fd_samp_mod, 0);\n\n\n\tpthread_spin_init(samp_raw_sl, PTHREAD_PROCESS_SHARED);\n\tpthread_spin_init(samp_mod_sl, PTHREAD_PROCESS_SHARED);\n\n\n\n\t\n\tfor(;;){\n\t\tchoice = getChoice(MENU_TEXT, 5);\n\t\tswitch(choice){\n\t\t\tcase 1:\n\t\t\t\topenSystem(&system_on, system_pid);\n\t\t\tbreak;\n\t\t\tcase 2:\n\t\t\t\tcloseSystem(&system_on, system_pid);\n\t\t\tbreak;\n\t\t\tcase 3:\n\t\t\t\tcreatePlots(&plotter_on, &plotter_pid);\n\t\t\tbreak;\n\t\t\tcase 4:\n\t\t\t\tclosePlots(&plotter_on, plotter_pid);\n\t\t\tbreak;\n\t\t\tcase 5:\n\t\t\t\tcloseSystem(&system_on, system_pid);\n\t\t\t\tclosePlots(&plotter_on, plotter_pid);\n\t\t\t\tunlinkSemaphores();\n\t\t\t\tunlinkSpinlock();\n\t\t\t\tunlinkShm();\n\t\t\t\tputs(\"closing system\");\n\t\t\t\texit(0);\n\t\t\tbreak;\n\t\t\tdefault:\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn 0;\n}\n\nvoid openSystem(bool* system_on, pid_t* system_pid){\n\tif(!*system_on){\n\t\tconst char* PARAM_TEXT = \"\\n1 - modulation\\n2 - no effects\\n\";\n\t\tint param = getChoice(PARAM_TEXT, 2);\n\n\t\trunCapture(system_pid);\n\t\trunModulator(system_pid, param);\n\t\trunPlayback(system_pid);\n\n\t\t*system_on = true;\n\t\tputs(\"system launched\");\n\t}\n\telse{\n\t\tputs(\"system already on\");\n\t}\n}\n\nvoid runCapture(pid_t* system_pid){\n\tsystem_pid[0] = fork();\n\tif(system_pid[0] == -1){\n\t\tfprintf(stderr, \"process creation failed\");\n\t\texit(errno);\n\t}\n\telse if(system_pid[0] == 0){\n\t\texeclp(\"build/capture\", \"capture\", NULL);\n\t\tfprintf(stderr, \"capture execution failed\");\n\t\texit(errno);\n\t}\t\n}\n\nvoid runModulator(pid_t* system_pid, int param){\n\tsystem_pid[1] = fork();\n\tif(system_pid[1] == -1){\n\t\tfprintf(stderr, \"process creation failed\");\n\t\texit(errno);\n\t}\n\telse if(system_pid[1] == 0){\n\t\tchar p[2];\n\t\tsnprintf(p, 2, \"%d\", param);\n\t\texeclp(\"build/modulator\", \"modulator\", p, NULL);\n\t\tfprintf(stderr, \"modulator execution failed\");\n\t\texit(errno);\n\t}\n}\n\nvoid runPlayback(pid_t* system_pid){\n\tsystem_pid[2] = fork();\n\tif(system_pid[2] == -1){\n\t\tfprintf(stderr, \"process creation failed\");\n\t\texit(errno);\n\t}\n\telse if(system_pid[2] == 0){\n\t\texeclp(\"build/playback\", \"playback\", NULL);\n\t\tfprintf(stderr, \"playback execution failed\");\n\t\texit(errno);\n\t}\t\n}\n\nvoid closeSystem(bool* system_on, pid_t system_pid[]){\n\tif(*system_on){\n\t\tfor(int i = 2; i >= 0; --i){\n\t\t\tkill(system_pid[i], SIGTERM);\n\t\t\twait(NULL);\n\t\t}\n\t\tputs(\"system closed\");\n\t\t*system_on = false;\n\t}\n\telse{\n\t\tputs(\"system already off\");\n\t}\n}\n\nvoid createPlots(bool* plotter_on, pid_t* plotter_pid_ptr){\n\tif(!*plotter_on){\n\t\tputs(\"launching plotter\");\n\t\t*plotter_pid_ptr = fork();\n\t\tif(*plotter_pid_ptr == -1){\n\t\t\tfprintf(stderr, \"process creation failed\");\n\t\t\texit(1);\n\t\t}else if(*plotter_pid_ptr == 0){\n\t\t\texeclp(\"python\", \"python\", \"scripts/plotter.py\", NULL);\n\t\t\tfprintf(stderr, \"plotter execution failed\");\n\t\t\texit(errno);\n\t\t}\n\t\t*plotter_on = true;\n\t}\n\telse{\n\t\tputs(\"plots already created\");\n\t}\n}\n\nvoid closePlots(bool* plotter_on, pid_t plotter_pid){\n\tif(*plotter_on){\n\t\tputs(\"closing plots\");\n\t\tkill(plotter_pid, SIGTERM);\n\t\t*plotter_on = false;\n\t}\n\telse{\n\t\tputs(\"no plots open\");\n\t}\t\n}\n\nint getChoice(const char* text, int max){\n\tputs(text);\n\tint choice;\n\tdo{\n\t\tscanf(\"%d\", &choice);\n\t\twhile((getchar()!='\\n')){};\n\t\tif(!goodButton(choice, max)){\n\t\t\tputs(\"unaccteptable input\");\n\t\t\tputs(text);\n\t\t}\n\t}while(!goodButton(choice, max));\n\treturn choice;\n}\n\nbool goodButton(int c, int max){\n\tint maxButton = max;\n\tfor(int i = maxButton; i>0; --i){\n\t\tif(c == i)\n\t\t\treturn true;\n\t}\n\treturn false;\n}\n\nvoid unlinkSemaphores(){\n\tsem_t* sem;\n\tchar semName[6];\n\tfor(int i = 3; i>0; --i){\n\t\tsnprintf(semName, 6, \"/log%d\", i);\n\t\tsem = sem_open(semName, O_CREAT, O_RDWR, 1);\n\t\tsem_close(sem);\n\t\tsem_unlink(semName);\n\t}\n}\n\nvoid unlinkShm(){\n\tshm_unlink(\"/raw\");\n\tshm_unlink(\"/mod\");\n\tshm_unlink(\"/samp_raw\");\n\tshm_unlink(\"/samp_mod\");\n}\n\nvoid unlinkSpinlock(){\n\tpthread_spin_destroy(samp_raw_sl);\n\tpthread_spin_destroy(samp_mod_sl);\n\n}\n"
}
] | 9 |
lilyhoratio/hb-oo-practice-melons
|
https://github.com/lilyhoratio/hb-oo-practice-melons
|
40ab2d760cd6dce8846e5903bb8bd0c66fc9ca51
|
e66160f73f9ff6a7d97425eda9337d674bcaa819
|
2548f2e82c9e9699c5bda75978d2ffa89f37f9c8
|
refs/heads/master
| 2020-05-15T09:32:25.466615 | 2019-04-19T01:19:22 | 2019-04-19T01:19:22 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5953072905540466,
"alphanum_fraction": 0.6122905015945435,
"avg_line_length": 31.434782028198242,
"blob_id": "5914777c1aeb5c1341f25eaa63334e67e14b9702",
"content_id": "04240352a352351ac5665d0b8cc6a728f3bdbfd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4475,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 138,
"path": "/harvest.py",
"repo_name": "lilyhoratio/hb-oo-practice-melons",
"src_encoding": "UTF-8",
"text": "############\n# Part 1 #\n############\n\n\nclass MelonType(object):\n \"\"\"A species of melon at a melon farm.\"\"\"\n\n def __init__(self, code, first_harvest, color, is_seedless, is_bestseller, \n name):\n \"\"\"Initialize a melon.\"\"\"\n\n self.pairings = []\n self.code = code\n self.first_harvest = int(first_harvest)\n self.color = color\n self.is_seedless = bool(is_seedless)\n self.is_bestseller = bool(is_bestseller)\n self.name = name\n\n # Fill in the rest\n\n def add_pairing(self, pairing):\n \"\"\"Add a food pairing to the instance's pairings list.\"\"\"\n if type(pairing) == str:\n pairing = [pairing]\n self.pairings.extend(pairing)\n # Fill in the rest\n\n def update_code(self, new_code):\n \"\"\"Replace the reporting code with the new_code.\"\"\"\n\n # Fill in the rest\n self.code = new_code\n\ndef make_melon_types():\n \"\"\"Returns a list of current melon types.\"\"\"\n\n muskmelon = MelonType(\"musk\", 1989, \"green\", True, True, \"Muskmelon\")\n muskmelon.add_pairing(\"mint\")\n casaba = MelonType(\"cas\", 2003, \"orange\", True, False, \"casaba\")\n casaba.add_pairing([\"strawberries\",\"mint\"])\n crenshaw = MelonType(\"cren\", 1996, \"green\", True, False, \"crenshaw\")\n crenshaw.add_pairing(\"proscuitto\")\n yellow_watermelon = MelonType(\"yw\", 2013, \"yellow\", True, True, \"yellow watermelon\")\n yellow_watermelon.add_pairing(\"ice cream\")\n\n # Fill in the rest\n all_melon_types = [muskmelon, casaba, crenshaw, yellow_watermelon]\n return all_melon_types\n\n# print(make_melon_types()) ## prints a list of objects: [<__main__.Melon Type object at 0x...> , ...]\n\ndef print_pairing_info(melon_types):\n \"\"\"Prints information about each melon type's pairings.\"\"\"\n\n for melon in melon_types:\n pairing_string = \"\"\n for pairingtype in melon.pairings:\n pairing_string += f\"\\n- {pairingtype}\"\n print(f\"{melon.name} pairs well with {pairing_string}\\n\")\n\n# print_pairing_info(make_melon_types())\n\ndef make_melon_type_lookup(melon_types):\n \"\"\"Takes a list of MelonTypes and returns a dictionary of melon type by code.\"\"\"\n\n # Fill in the rest\n melon_code = {}\n\n for melon in melon_types:\n melon_code[melon.code] = melon.name\n\n return melon_code \n\n# print(make_melon_type_lookup(make_melon_types()))\n\n############\n# Part 2 #\n############\n\nclass Melon(object):\n \"\"\"A melon in a melon harvest.\"\"\"\n\n # Fill in the rest\n # Needs __init__ and is_sellable methods\n def __init__(self, melon_type, shape_rating, \n color_rating, field_num, harvested_by):\n # melon_type is instance of Class melon type\n\n self.melon_type = melon_type\n self.shape_rating = float(shape_rating)\n self.color_rating = float(color_rating)\n self.field_num = int(field_num)\n self.harvested_by = harvested_by\n\n self.sellable = self.is_sellable(self.shape_rating, self.color_rating, self.field_num)\n\n def is_sellable(self, shape_rating, color_rating, field_num):\n if (shape_rating > 5) and (color_rating > 5) and (field_num != 3):\n return True\n else:\n return False\n\n\ndef make_melons(melon_types):\n \"\"\"Returns a list of Melon objects.\"\"\"\n\n melons_by_id = make_melon_type_lookup(melon_types)\n print(melons_by_id)\n\n # melon1 = Melon(yellow_watermelon, 8, 7, 2, \"Sheila\")\n melon1 = Melon(melons_by_id['yw'], 8, 7, 2, \"Sheila\")\n melon2 = Melon(melons_by_id[\"yw\"], 3, 4, 2, \"Sheila\")\n melon3 = Melon(melons_by_id[\"yw\"], 9, 8, 3, \"Sheila\")\n \n melon4 = Melon(melons_by_id[\"cas\"], 10, 6, 35, \"Sheila\")\n\n melon5 = Melon(melons_by_id[\"cren\"], 8, 9, 35, \"Michael\")\n melon6 = Melon(melons_by_id[\"cren\"], 8, 2, 35, \"Michael\")\n melon7 = Melon(melons_by_id[\"cren\"], 2, 3, 4, \"Michael\")\n\n melon8 = Melon(melons_by_id[\"musk\"], 6, 7, 4, \"Michael\")\n\n melon9 = Melon(melons_by_id[\"yw\"], 7, 10, 3, \"Sheila\")\n\n return [melon1, melon2, melon3, melon4, melon5, melon6, melon7, melon8, melon9]\n\ndef get_sellability_report(melons):\n \"\"\"Given a list of melon object, prints whether each one is sellable.\"\"\"\n \n # Fill in the rest \n for melon in make_melons(make_melon_types()):\n can_sell = \"CAN BE SOLD\" if melon.sellable else \"NOT SELLABLE\"\n print(f\"Harvested by {melon.harvested_by} from Field {melon.field_num} - {can_sell}\\n\")\n\n\nget_sellability_report(make_melons(make_melon_types()))"
},
{
"alpha_fraction": 0.5100401639938354,
"alphanum_fraction": 0.522088348865509,
"avg_line_length": 23.799999237060547,
"blob_id": "f2c15474def5259a1f194cf519386226b349b518",
"content_id": "da9248b250220cab79e0e5232ab8ff0d911ac452",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 10,
"path": "/readmelon.py",
"repo_name": "lilyhoratio/hb-oo-practice-melons",
"src_encoding": "UTF-8",
"text": "import harvest \n\nwith open(\"harvest_log.txt\") as f: \n ## don't need readline(f) - python2\n i = 1\n for line in f:\n line = f.split(\" \")\n melon_obj_name = \"melon\"+str(i)\n i +=1\n globals()[melon_obj_name] = Melon()\n\n"
}
] | 2 |
vipinsachdeva/elasticluster_full
|
https://github.com/vipinsachdeva/elasticluster_full
|
0199ee00e716f285173c8974fdf9570ab5d43470
|
71160196682a8d18a9547d5d28e8a885b067924d
|
01a085bb89225d0390316036a915b2b8d7403219
|
refs/heads/master
| 2021-05-15T11:35:44.706476 | 2017-10-25T22:37:37 | 2017-10-25T22:37:37 | 108,333,786 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6310867667198181,
"alphanum_fraction": 0.6375362873077393,
"avg_line_length": 27.981307983398438,
"blob_id": "db00f5bc629af7a06d0ce4b245c9e3415f1687f2",
"content_id": "6d078f03404163efd00efddce3cbf1f3bca6cc77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3101,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 107,
"path": "/lib/python2.7/site-packages/cli/tests/test_cli_app.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"CLI tools for Python.\n\nCopyright (c) 2009-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\"\"\"\n\nfrom cli.app import Abort, Application, CommandLineApp\nfrom cli.util import StringIO\n\nfrom cli import tests\n\nclass FakeApp(Application):\n \n def main(self):\n pass\n\nclass FakeCommandLineApp(CommandLineApp):\n \n def main(self):\n pass\n\nclass TestApplication(tests.AppTest):\n app_cls = FakeApp\n \n def test_discover_name(self):\n self.assertEqual(self.app.name, \"main\")\n\n def test_exit(self):\n @self.app_cls\n def app(app):\n pass\n\n self.assertRaises(SystemExit, app.run)\n\n def test_returns(self):\n self.assertEqual(self.app.run(), 0)\n\n def test_returns_value(self):\n @self.app_cls(exit_after_main=False)\n def app(app):\n return 1\n\n self.assertEqual(app.run(), 1)\n app.main = lambda app: \"foo\"\n self.assertEqual(app.run(), 1)\n\n def test_raise_exception1(self):\n @self.app_cls(exit_after_main=False, reraise=Exception)\n def app(app):\n raise RuntimeError(\"Just testing.\")\n\n self.assertRaises(RuntimeError, app.run)\n\n def test_raise_exception2(self):\n @self.app_cls(exit_after_main=False,\n reraise=(RuntimeError, AssertionError))\n def app(app):\n raise RuntimeError(\"Just testing.\")\n\n self.assertRaises(RuntimeError, app.run)\n\n def test_swallow_exception(self):\n @self.app_cls(exit_after_main=False, reraise=(ValueError, TypeError))\n def app(app):\n raise RuntimeError(\"Just testing.\")\n\n self.assertEqual(app.run(), 1)\n\n \nclass TestCommandLineApp(tests.AppTest):\n app_cls = FakeCommandLineApp\n\n def test_parse_args(self):\n app_cls = self.app_cls\n class Test(app_cls):\n \n def setup(self):\n app_cls.setup(self)\n self.add_param(\"-f\", \"--foo\", default=None)\n\n status, app = self.runapp(Test, \"test -f bar\")\n self.assertEqual(app.params.foo, \"bar\")\n\n def test_parse_args_version(self):\n class Test(self.app_cls): pass\n\n status = None\n try:\n self.runapp(Test, \"test -V\", version=\"1.0\")\n except Abort, e:\n status = e.status\n self.assertEqual(status, 0)\n\n def test_version(self):\n self.app.version = \"0.1\"\n self.app.run()\n"
},
{
"alpha_fraction": 0.597846269607544,
"alphanum_fraction": 0.6238395571708679,
"avg_line_length": 31.445783615112305,
"blob_id": "8294bbcf31445395a8479b87055bfec0292a5e9b",
"content_id": "7bdf5aa0f6a9160050d370f2070c9e74262f1c7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2693,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 83,
"path": "/lib/python2.7/site-packages/cli/tests/test_cli_profiler.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"CLI tools for Python.\n\nCopyright (c) 2009-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\"\"\"\n\nfrom cli.profiler import Profiler, update_wrapper, fmtsec\nfrom cli.util import StringIO\n\nfrom cli import tests\n\nclass TestProfiler(tests.BaseTest):\n\n def setUp(self):\n self.stdout = StringIO()\n self.profiler = Profiler(stdout=self.stdout)\n def func():\n \"\"\"foo\"\"\"\n return \"foo\"\n def wrapper(*args, **kwargs):\n return func()\n self.func = func\n self.wrapper = wrapper\n\n\n def test_wrap(self):\n wrapped = self.profiler.wrap(self.wrapper, self.func)\n self.assertEqual(wrapped(), \"foo\")\n self.assertEqual(wrapped.__doc__, self.func.__doc__)\n\n def test_anon_wrap(self):\n def __profiler_func():\n \"\"\"foo\"\"\"\n return \"foo\"\n wrapped = self.profiler.wrap(self.wrapper, __profiler_func)\n self.assertEqual(wrapped, \"foo\")\n\n def test_deterministic(self):\n # Sanity check...\n @self.profiler.deterministic\n def foo():\n pass\n foo()\n\n def test_statistical(self):\n # Sanity check...\n @self.profiler.statistical\n def foo():\n pass\n foo()\n\nclass TestUtils(tests.BaseTest):\n \n def test_update_wrapper(self):\n def foo():\n \"\"\"foo\"\"\"\n return \"foo\"\n def wrapper():\n \"\"\"wrapper\"\"\"\n return foo()\n wrapper = update_wrapper(wrapper, foo)\n self.assertEqual(wrapper.__doc__, foo.__doc__)\n self.assertEqual(wrapper.__name__, foo.__name__)\n\n def test_fmtsec(self):\n self.assertEqual(fmtsec(-1), \"-1.000000 s\")\n self.assertEqual(fmtsec(0), \"0 s\")\n self.assertEqual(fmtsec(1), \"1.000000 s\")\n self.assertEqual(fmtsec(1e9 + 1), \"1e+09 s\")\n self.assertEqual(fmtsec(1e6 + 1.3), \"1000001 s\")\n self.assertEqual(fmtsec(1003.02), \"1003.020 s\")\n self.assertEqual(fmtsec(1e-3 + .00003), \"1.030000 ms\")\n"
},
{
"alpha_fraction": 0.5579096078872681,
"alphanum_fraction": 0.5875706076622009,
"avg_line_length": 27.31999969482422,
"blob_id": "2fefd839c208d2e650153089f3c4a1cbeb1741a6",
"content_id": "4a04f8b434f9f09c69083ead13ed769d2ad10b98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 708,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 25,
"path": "/lib/python2.7/site-packages/cli/ext.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"Includes:\n argparse - 2010.02.01\n LICENSE: Apache License 2.0\n URL: http://argparse.googlecode.com/svn/tags/r101/argparse.py\n\n scripttest - 2010.03.04\n LICENSE: MIT\n URL: http://bitbucket.org/ianb/scripttest/src/tip/scripttest/__init__.py\n\"\"\"\nimport os\n\n# Add included module names to __all__.\n__all__ = [\"argparse\", \"scripttest\"]\nproject = os.path.basename(os.path.dirname(__file__))\next = project + \"._ext\"\n\nname, module = None, None\nfor name in __all__:\n try:\n module = __import__(name)\n except ImportError:\n module = __import__('.'.join((ext, name)), {}, {}, [ext])\n locals()[name] = module\n\ndel(ext, module, name, project)\n"
},
{
"alpha_fraction": 0.6887791156768799,
"alphanum_fraction": 0.6947777271270752,
"avg_line_length": 32.738094329833984,
"blob_id": "1bf16fc0071ae689b3ba0c06728f5b4c4e571fdd",
"content_id": "96c782e17accd33ce70efa61cd0cff69212692a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2834,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 84,
"path": "/lib/python2.7/site-packages/cli/tests/test_cli_log.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"CLI tools for Python.\n\nCopyright (c) 2009-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\"\"\"\nimport logging\nlogging.logMultiprocessing = 0\n\nfrom cli.ext import argparse\nfrom cli.log import CommandLineLogger, LoggingApp\n\nfrom cli import tests\n\nclass FakeLoggingApp(LoggingApp):\n \n def main(self):\n pass\n\nclass TestCommandLineLogger(tests.BaseTest):\n \n def setUp(self):\n self.fakens = argparse.Namespace()\n self.logger = CommandLineLogger(\"foo\")\n\n def test_setLevel(self):\n self.fakens.verbose = 0\n self.fakens.silent = False\n self.fakens.quiet = 0\n\n # The logger should start at 0.\n self.assertEqual(self.logger.level, 0)\n\n # Given the default input, it should be set to WARNING.\n self.logger.setLevel(self.fakens)\n self.assertEqual(self.logger.level, logging.WARNING)\n\n # Incrementing verbose should increase the logger's verbosity.\n self.fakens.verbose = 1\n self.logger.setLevel(self.fakens)\n self.assertEqual(self.logger.level, logging.INFO)\n\n # Incrementing quiet should decrease it.\n self.fakens.quiet = 1\n self.logger.setLevel(self.fakens)\n self.assertEqual(self.logger.level, logging.WARNING)\n self.fakens.quiet = 2\n self.logger.setLevel(self.fakens)\n self.assertEqual(self.logger.level, logging.ERROR)\n\n # And setting silent should shut it up completely.\n self.fakens.silent = True\n self.logger.setLevel(self.fakens)\n self.assertEqual(self.logger.level, logging.CRITICAL)\n\nclass TestLoggingApp(tests.AppTest):\n app_cls = FakeLoggingApp\n\n def test_setup_log(self):\n _, app = self.runapp(self.app_cls, \"test -vvv\")\n self.assertEqual(app.params.verbose, 3)\n self.assertEqual(app.log.level, logging.DEBUG)\n\n _, app = self.runapp(self.app_cls, \"test -vvv -qqq\")\n self.assertEqual(app.log.level, logging.WARNING)\n\n def test_no_stream_or_logfile(self):\n self.app.logfile = None\n self.app.stream = None\n\n self.app.run()\n\n # We shouldn't see anything here.\n self.app.log.critical(\"foo\")\n"
},
{
"alpha_fraction": 0.6863468885421753,
"alphanum_fraction": 0.6970069408416748,
"avg_line_length": 38.33871078491211,
"blob_id": "b975bc5737a7248756cd13dfb715398eb29d9689",
"content_id": "5485972ac5c40f0c26e13b40f0e382e73c6cb48e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2439,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 62,
"path": "/lib/python2.7/site-packages/cli/__init__.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"\\\nThe cli package is a framework for making simple, correct command\nline applications in Python. With cli, you can quickly add standard\n`command line parsing`_; `logging`_; `unit`_ and `functional`_ testing;\nand `profiling`_ to your CLI apps. To make it easier to do the right \nthing, cli wraps all of these tools into a single, consistent application \ninterface.\n\n.. _command line parsing: http://www.python.org/dev/peps/pep-0389/#deprecation-of-optparse\n.. _logging: http://docs.python.org/library/logging.html\n.. _unit: http://docs.python.org/library/unittest.html\n.. _functional: http://pythonpaste.org/scripttest/\n.. _profiling: http://docs.python.org/library/profile.html\n\"\"\"\n\n# pragma: no cover\n\n__project__ = \"pyCLI\"\n__version__ = \"2.0.3\"\n__package__ = \"cli\"\n__description__ = \"Simple, object-oriented approach to Python CLI apps\"\n__author__ = \"Will Maier\"\n__author_email__ = \"[email protected]\"\n__url__ = \"http://packages.python.org/pyCLI/\"\n\n# See http://pypi.python.org/pypi?%3Aaction=list_classifiers.\n__classifiers__ = [\n \"Programming Language :: Python :: 2.4\",\n \"Programming Language :: Python :: 2.5\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Environment :: Console\",\n \"Development Status :: 5 - Production/Stable\",\n] \n__keywords__ = \"command line application framework\"\n\n__requires__ = []\n\n# The following is modeled after the ISC license.\n__copyright__ = \"\"\"\\\n2009-2012 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\"\"\"\n\n__todo__ = \"\"\"\\\n* cli.app:\n * more tests\n * add Windows registry/OS X plist support (sekhmet)\n\"\"\"\n"
},
{
"alpha_fraction": 0.6356403231620789,
"alphanum_fraction": 0.6381471157073975,
"avg_line_length": 36.145748138427734,
"blob_id": "6f9f163a564928769708c74e44d4cf6833f8515f",
"content_id": "53b5ce9b19b3a38ead023f46a0b300651e29876b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9175,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 247,
"path": "/lib/python2.7/site-packages/cli/test.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"\\\n:mod:`cli.test` -- functional and unit test support\n---------------------------------------------------\n\nThis module provides support for easily writing both functional and\nunit tests for your scripts.\n\n.. versionadded:: 1.0.2\n\"\"\"\n\n__license__ = \"\"\"Copyright (c) 2008-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"\n\nimport os\nimport shlex\n\nfrom shutil import rmtree\nfrom tempfile import mkdtemp\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom cli.app import Abort\nfrom cli.ext import scripttest\nfrom cli.util import StringIO, trim\n\n__all__ = [\"AppTest\", \"FunctionalTest\"]\n\nclass AppTest(unittest.TestCase):\n \"\"\"An application test, based on :class:`unittest.TestCase`.\n\n :class:`AppTest` provides a simple :meth:`setUp` method\n to instantiate :attr:`app_cls`, your application's class.\n :attr:`default_kwargs` will be passed to the new application then.\n\n .. deprecated:: 1.1.1\n Use :class:`AppMixin` instead.\n \"\"\"\n app_cls = None\n \"\"\"An application, usually descended from :class:`cli.app.Application`.\"\"\"\n\n default_kwargs = {\n \"argv\": [],\n \"exit_after_main\": False\n }\n \"\"\"Default keyword arguments that will be passed to the new :class:`cli.app.Application` instance.\n\n By default, the application won't see any command line arguments and\n will not raise SystemExit when the :func:`main` function returns.\n \"\"\"\n\n def setUp(self):\n \"\"\"Set up the application.\n\n :meth:`setUp` instantiates :attr:`app_cls` and\n stores it at :attr:`app`. Test methods should call\n the application's :meth:`cli.app.Application.setup`,\n :meth:`cli.app.Application.pre_run` and\n :meth:`cli.app.Application.run` methods as necessary.\n \"\"\"\n kwargs = self.default_kwargs.copy()\n kwargs.update(getattr(self, \"kwargs\", {}))\n @self.app_cls(**kwargs)\n def app(app):\n pass\n self.app = app\n\n def runapp(self, cmd, environ={}, **kwargs):\n _kwargs = self.default_kwargs.copy()\n _kwargs.update(kwargs)\n self.app_cls(**kwargs)\n\nclass AppMixin(object):\n \"\"\"Useful methods for testing App classes.\n\n Note: This won't help for testing App _instances_.\n \"\"\"\n app_cls = None\n \"\"\"The Application class to test.\"\"\"\n args = ()\n \"\"\"The arguments to pass when instantiating the test Application.\"\"\"\n kwargs = {\n \"exit_after_main\": False,\n }\n \"\"\"The keyword arguments to pass when instantiating the test Application.\"\"\"\n\n def runapp(self, app_cls, cmd, **kwargs):\n \"\"\"Run the application.\n\n *app_cls* is a class that inherits from :class:`cli.app.Application`.\n *cmd* may be a string with command line arguments. If present, *cmd*\n will be parsed by :func:`shlex.split` and passed to the application\n as its *argv* keyword argument (overriding *argv* keys in both\n :attr:`default_kwargs` and *kwargs*). *kwargs* will be merged with\n :attr:`default_kwargs` and passed to the application as well.\n\n If *stdout* or *stderr* keys are not set in either *kwargs* or\n :attr:`default_kwargs`, new :class:`StringIO` instances will be\n used as temporary buffers for application output.\n\n Returns (status, app), where *status* is the application's return code\n and *app* is the application instance.\n \"\"\"\n _kwargs = self.kwargs.copy()\n _kwargs.update(kwargs)\n _kwargs[\"stdout\"] = _kwargs.get(\"stdout\", StringIO())\n _kwargs[\"stderr\"] = _kwargs.get(\"stderr\", StringIO())\n if cmd:\n _kwargs[\"argv\"] = shlex.split(cmd)\n app = app_cls(**_kwargs)\n app.setup()\n status = app.run()\n return status, app\n\n def assertAppDoes(self, app_cls, cmd, kwargs={}, stdout='', stderr='', status=0,\n raises=(), trim_output=trim):\n \"\"\"Fail the test if the app behaves unexpectedly.\n\n *app_cls*, *cmd* and *kwargs* will be passed to :meth:`runapp`. If the\n application raises an :class:`Exception` instance contained in the\n *raises* tuple, the test will pass. Otherwise, the application's stdout,\n stderr and return status will be compared with *stdout*, *stderr* and\n *status*, respectively (using :meth:`assertEqual`).\n \"\"\"\n try:\n returned, app = self.runapp(app_cls, cmd, **kwargs)\n except raises, e:\n return True\n if trim:\n stdout, stderr = trim(stdout), trim(stderr)\n self.assertEqual(status, returned)\n self.assertEqual(stdout, app.stdout)\n self.assertEqual(stderr, app.stderr)\n\n def assertAppAborts(self, app_cls, cmd, status=0, **kwargs):\n \"\"\"Fail unless the app aborts.\n\n *app_cls* must raise :class:`Abort` with a :data:`Abort.status` value\n equal to *status*.\n \"\"\"\n try:\n self.runapp(app_cls, cmd, **kwargs)\n except Abort, e:\n self.assertEqual(status, e.status)\n return True\n\n raise self.failureException(\"Abort not raised\")\n\nclass FunctionalTest(unittest.TestCase):\n \"\"\"A functional test, also based on :class:`unittest.TestCase`.\n\n Functional tests monitor an application's 'macro' behavior, making\n it easy to spot regressions. They can also be simpler to write and\n maintain as they don't rely on any application internals.\n\n The :class:`FunctionalTest` will look for scripts to run under\n :attr:`scriptdir`. It uses :class:`scripttest.TestFileEnvironment`\n to provide temporary working areas for the scripts; these scratch\n areas will be created under :attr:`testdir` (and are created and\n removed before and after each test is run).\n \"\"\"\n testdir = None\n scriptdir = None\n run_kwargs = {\n \"expect_stderr\": True,\n \"expect_error\": True,\n }\n\n def setUp(self):\n \"\"\"Prepare for the functional test.\n\n :meth:`setUp` creates the test's working directory. If\n the :mod:`unittest2` package is present, it also makes sure that\n differences in the test's standard err and output are presented\n using :class:`unittest2.TestCase.assertMultiLineEqual`. Finally,\n :meth:`setUp` instantiates the\n :class:`scripttest.TestFileEnvironment` and stores it at\n :attr:`env`.\n \"\"\"\n self._testdir = self.testdir\n if self._testdir is None:\n self._testdir = mkdtemp(prefix=\"functests-\")\n if not os.path.isdir(self._testdir):\n os.mkdir(self._testdir)\n path = os.environ.get(\"PATH\", '').split(':')\n path.append(self.scriptdir)\n self.env = scripttest.TestFileEnvironment(\n base_path=os.path.join(self._testdir, \"scripttest\"),\n script_path=path,\n )\n\n addTypeEqualityFunc = getattr(self, \"addTypeEqualityFunc\", None)\n if callable(addTypeEqualityFunc):\n addTypeEqualityFunc(str, \"assertMultiLineEqual\")\n\n def tearDown(self):\n \"\"\"Clean up after the test.\n\n :meth:`tearDown` removes the temporary working directory created\n during :meth:`setUp`.\n \"\"\"\n rmtree(self._testdir)\n\n def run_script(self, script, *args, **kwargs):\n \"\"\"Run a test script.\n\n *script*, *args* and *kwargs* are passed to :attr:`env`. Default keyword\n arguments are specified in :attr:`run_kwargs`.\n\n .. versionchanged:: 1.1.1\n :attr:`scriptdir` is no longer prepended to *script* before passing it\n to :attr:`env`. Instead, it is added to the env's *script_path* during\n :meth:`setUp`.\n \"\"\"\n _kwargs = self.run_kwargs.copy()\n _kwargs.update(kwargs)\n return self.env.run(script, *args, **_kwargs)\n\n def assertScriptDoes(self, result, stdout='', stderr='', returncode=0, trim_output=True):\n \"\"\"Fail if the result object's stdout, stderr and returncode are unexpected.\n\n *result* is usually a :class:`scripttest.ProcResult` with\n stdout, stderr and returncode attributes.\n \"\"\"\n if trim_output:\n stdout, stderr = trim(stdout), trim(stderr)\n self.assertEqual(returncode, result.returncode,\n \"expected returncode %d, got %d\" % (returncode, result.returncode))\n self.assertEqual(result.stdout, stdout,\n \"unexpected output on stdout\")\n self.assertEqual(result.stderr, stderr,\n \"unexpected output on stderr\")\n"
},
{
"alpha_fraction": 0.5994226336479187,
"alphanum_fraction": 0.6074036359786987,
"avg_line_length": 38.52349090576172,
"blob_id": "e781ae42bffdee18a8161fb16f4082e5e44a4154",
"content_id": "cc342e274ea556f2850fbb9cb4b6bf8b85ef5ac4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5889,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 149,
"path": "/lib/python2.7/site-packages/coloredlogs/converter.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "# Program to convert text with ANSI escape sequences to HTML.\n#\n# Author: Peter Odding <[email protected]>\n# Last Change: October 9, 2016\n# URL: https://coloredlogs.readthedocs.io\n\n\"\"\"Convert text with ANSI escape sequences to HTML.\"\"\"\n\n# Standard library modules.\nimport codecs\nimport os\nimport pipes\nimport re\nimport subprocess\nimport tempfile\n\n# External dependencies.\nfrom humanfriendly.terminal import clean_terminal_output\n\n# Portable color codes from http://en.wikipedia.org/wiki/ANSI_escape_code#Colors.\nEIGHT_COLOR_PALETTE = (\n 'black',\n 'red',\n 'rgb(78, 154, 6)', # green\n 'rgb(196, 160, 0)', # yellow\n 'blue',\n 'rgb(117, 80, 123)', # magenta\n 'cyan',\n 'white',\n)\n\n# Regular expression that matches strings we want to convert. Used to separate\n# all special strings and literal output in a single pass (this allows us to\n# properly encode the output without resorting to nasty hacks).\ntoken_pattern = re.compile('(https?://\\\\S+|www\\\\.\\\\S+|\\x1b\\\\[.*?m)', re.UNICODE)\n\n\ndef capture(command, encoding='UTF-8'):\n \"\"\"\n Capture the output of an external command as if it runs in an interactive terminal.\n\n :param command: The command name and its arguments (a list of strings).\n :param encoding: The encoding to use to decode the output (a string).\n :returns: The output of the command.\n\n This function runs an external command under ``script`` (emulating an\n interactive terminal) to capture the output of the command as if it was\n running in an interactive terminal (including ANSI escape sequences).\n \"\"\"\n with open(os.devnull, 'wb') as dev_null:\n # We start by invoking the `script' program in a form that is supported\n # by the Linux implementation [1] but fails command line validation on\n # the Mac OS X (BSD) implementation [2]: The command is specified\n # using the -c option and the typescript file is /dev/null.\n #\n # [1] http://man7.org/linux/man-pages/man1/script.1.html\n # [2] https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man1/script.1.html\n command_line = ['script', '-qc', ' '.join(map(pipes.quote, command)), '/dev/null']\n script = subprocess.Popen(command_line, stdout=subprocess.PIPE, stderr=dev_null)\n stdout, stderr = script.communicate()\n if script.returncode == 0:\n # If `script' succeeded we assume that it understood our command line\n # invocation which means it's the Linux implementation (in this case\n # we can use standard output instead of a temporary file).\n output = stdout.decode(encoding)\n else:\n # If `script' failed we assume that it didn't understand our command\n # line invocation which means it's the Mac OS X (BSD) implementation\n # (in this case we need a temporary file because the command line\n # interface requires it).\n fd, temporary_file = tempfile.mkstemp(prefix='coloredlogs-', suffix='-capture.txt')\n try:\n command_line = ['script', '-q', temporary_file] + list(command)\n subprocess.Popen(command_line, stdout=dev_null, stderr=dev_null).wait()\n with codecs.open(temporary_file, 'r', encoding) as handle:\n output = handle.read()\n finally:\n os.unlink(temporary_file)\n # Clean up backspace and carriage return characters and the 'erase line'\n # ANSI escape sequence and return the output as a Unicode string.\n return u'\\n'.join(clean_terminal_output(output))\n\n\ndef convert(text):\n \"\"\"\n Convert text with ANSI escape sequences to HTML.\n\n :param text: The text with ANSI escape sequences (a string).\n :returns: The text converted to HTML (a string).\n \"\"\"\n output = []\n for token in token_pattern.split(text):\n if token.startswith(('http://', 'https://', 'www.')):\n url = token\n if '://' not in token:\n url = 'http://' + url\n text = url.partition('://')[2]\n token = u'<a href=\"%s\" style=\"color: inherit;\">%s</a>' % (html_encode(url), html_encode(text))\n elif token.startswith('\\x1b['):\n ansi_codes = token[2:-1].split(';')\n if ansi_codes == ['0']:\n token = '</span>'\n else:\n styles = []\n for code in ansi_codes:\n if code == '1':\n styles.append('font-weight: bold;')\n elif code.startswith('3') and len(code) == 2:\n styles.append('color: %s;' % EIGHT_COLOR_PALETTE[int(code[1])])\n if styles:\n token = '<span style=\"%s\">' % ' '.join(styles)\n else:\n token = ''\n else:\n token = html_encode(token)\n token = encode_whitespace(token)\n output.append(token)\n return ''.join(output)\n\n\ndef encode_whitespace(text):\n \"\"\"\n Encode whitespace so that web browsers properly render it.\n\n :param text: The plain text (a string).\n :returns: The text converted to HTML (a string).\n\n The purpose of this function is to encode whitespace in such a way that web\n browsers render the same whitespace regardless of whether 'preformatted'\n styling is used (by wrapping the text in a ``<pre>...</pre>`` element).\n \"\"\"\n text = text.replace('\\r\\n', '\\n')\n text = text.replace('\\n', '<br>\\n')\n text = text.replace(' ', ' ')\n return text\n\n\ndef html_encode(text):\n \"\"\"\n Encode characters with a special meaning as HTML.\n\n :param text: The plain text (a string).\n :returns: The text converted to HTML (a string).\n \"\"\"\n text = text.replace('&', '&')\n text = text.replace('<', '<')\n text = text.replace('>', '>')\n text = text.replace('\"', '"')\n return text\n"
},
{
"alpha_fraction": 0.7560107111930847,
"alphanum_fraction": 0.7631344795227051,
"avg_line_length": 34.09375,
"blob_id": "5645d8ce96827e62a3d456f74fae3d641a2c3b21",
"content_id": "90ecd7ce9e1bf8aeba82158c2e432bbc5dedb4a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1123,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 32,
"path": "/lib/python2.7/site-packages/cli/tests/test_cli_daemon.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"CLI tools for Python.\n\nCopyright (c) 2009-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\"\"\"\n\nfrom cli.daemon import DaemonizingApp\n\nfrom cli import tests\n\nclass FakeDaemonizingApp(DaemonizingApp):\n \n def main(self):\n pass\n\nclass TestDaemonizingApp(tests.AppTest):\n app_cls = FakeDaemonizingApp\n\n def test_parse_args(self):\n _, app = self.runapp(self.app_cls, \"test -d\")\n self.assertEqual(app.params.daemonize, True)\n"
},
{
"alpha_fraction": 0.6265822649002075,
"alphanum_fraction": 0.6455696225166321,
"avg_line_length": 20.066667556762695,
"blob_id": "dd7468f0c82f4ed648d823fd4c1099f62e5f13d8",
"content_id": "5d568e20c2023ff0d4f26823071639530019acd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 15,
"path": "/lib/python2.7/site-packages/voluptuous/__init__.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "# flake8: noqa\n\ntry:\n from schema_builder import *\n from validators import *\n from util import *\n from error import *\nexcept ImportError:\n from .schema_builder import *\n from .validators import *\n from .util import *\n from .error import *\n\n__version__ = '0.9.3'\n__author__ = 'tusharmakkar08'\n"
},
{
"alpha_fraction": 0.6536215543746948,
"alphanum_fraction": 0.6558578610420227,
"avg_line_length": 35.922019958496094,
"blob_id": "dee08ace1af95d256b5beb2d1ea0351d35b329cf",
"content_id": "fd495f037b373d02fbafc4db8271b82091bc2c25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8049,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 218,
"path": "/lib/python2.7/site-packages/cli/log.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"\\\n:mod:`cli.log` -- logging applications\n--------------------------------------\n\nLogging applications use the standard library :mod:`logging` module to\nhandle log messages.\n\"\"\"\n\n__license__ = \"\"\"Copyright (c) 2008-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"\n\nimport logging\nimport sys\n\nfrom logging import Formatter, StreamHandler\n\nfrom cli.app import CommandLineApp, CommandLineMixin, Application\n\n__all__ = [\"LoggingApp\", \"LoggingMixin\", \"CommandLineLogger\"]\n\n# Silence multiprocessing errors.\nlogging.logMultiprocessing = 0\n\nclass FileHandler(logging.FileHandler):\n\n def close(self):\n \"\"\"Override close().\n \n We leave the file open because the application may have\n multiple threads that still need to write to it. Python\n should GC the fd when it goes out of scope, anyway.\n \"\"\"\n pass # pragma: no cover\n\nclass NullHandler(logging.Handler):\n \"\"\"A blackhole handler.\n\n NullHandler simply ignores all messages it receives.\n \"\"\"\n\n def emit(self, record):\n \"\"\"Ignore the record.\"\"\"\n pass\n\nclass CommandLineLogger(logging.Logger):\n \"\"\"Provide extra configuration smarts for loggers.\n\n In addition to the powers of a regular logger, a\n :class:`CommandLineLogger` can set its verbosity levels based on a\n populated :class:`argparse.Namespace`.\n \"\"\"\n default_level = logging.WARN\n \"\"\"An integer representing the default logging level.\n\n Default: :data:`logging.WARN` (only warning messages will be\n shown).\n \"\"\"\n silent_level = logging.CRITICAL\n \"\"\"An integer representing the silent logging level.\n\n Default: :data:`logging.CRITICAL` (only critical messages will\n be shown).\n \"\"\"\n\n def setLevel(self, ns):\n \"\"\"Set the logger verbosity level.\n\n *ns* is an object with :attr:`verbose`, :attr:`quiet` and\n :attr:`silent` attributes. :attr:`verbose` and :attr:`quiet` may\n be positive integers or zero; :attr:`silent` is ``True`` or ``False``.\n If :attr:`silent` is True, the logger's level will be set to\n :attr:`silent_level`. Otherwise, the difference between\n :attr:`quiet` and :attr:`verbose` will be multiplied by 10 so it\n fits on the standard logging scale and then added to\n :attr:`default_level`.\n \"\"\"\n if not hasattr(ns, \"quiet\"):\n return logging.Logger.setLevel(self, ns)\n level = self.default_level + (10 * (ns.quiet - ns.verbose))\n\n if ns.silent:\n level = self.silent_level\n elif level <= logging.NOTSET:\n level = logging.DEBUG\n\n self.level = level\n\nclass LoggingMixin(object):\n \"\"\"A mixin for command-line applications that knows how to log.\n\n The :class:`LoggingMixin` requires :class:`cli.app.CommandLineMixin`\n and allows command line configuration of the application logger. In\n addition to those supported by the standard :class:`cli.app.Application` and\n :class:`cli.app.CommandLineMixin`, arguments are:\n\n *stream* is an open file object to which the log messages will be\n written. By default, this is standard output (not standard error, as\n might be expected).\n\n *logfile* is the name of a file which will be opened by the\n :class:`logging.FileHandler`.\n\n *message_format* and *date_format* are passed directly to the \n :class:`CommandLineLogger` and are interpreted as in the \n :mod:`logging` package.\n\n If *root* is True, the :class:`LoggingMixin` will make itself the root\n logger. This means that, for example, code that knows nothing about\n the :class:`LoggingMixin` can inherit its verbosity level, formatters\n and handlers.\n \"\"\"\n\n def __init__(self, stream=sys.stdout, logfile=None,\n message_format=\"%(asctime)s %(message)s\", \n date_format=\"%Y-%m-%dT%H:%M:%S\", root=True, **kwargs):\n self.logfile = logfile\n self.stream = stream\n self.message_format = message_format\n self.date_format = date_format\n self.root = root\n\n def setup(self):\n \"\"\"Configure the :class:`LoggingMixin`.\n\n This method adds the :option:`-l`, :option:`q`,\n :option:`-s` and :option:`-v` parameters to the\n application and instantiates the :attr:`log` attribute.\n \"\"\"\n # Add logging-related options.\n self.add_param(\"-l\", \"--logfile\", default=self.logfile, \n help=\"log to file (default: log to stdout)\")\n self.add_param(\"-q\", \"--quiet\", default=0, help=\"decrease the verbosity\",\n action=\"count\")\n self.add_param(\"-s\", \"--silent\", default=False, help=\"only log warnings\",\n action=\"store_true\")\n self.add_param(\"-v\", \"--verbose\", default=0, help=\"raise the verbosity\",\n action=\"count\")\n\n # Create logger.\n logging.setLoggerClass(CommandLineLogger)\n self.log = logging.getLogger(self.name)\n self.formatter = Formatter(fmt=self.message_format, datefmt=self.date_format)\n\n self.log.level = self.log.default_level\n\n # If requested, make our logger the root.\n if self.root:\n logging.root = self.log\n logging.Logger.root = self.log\n logging.Logger.manager = logging.Manager(self.log)\n\n def pre_run(self):\n \"\"\"Set the verbosity level and configure the logger.\n\n The application passes the :attr:`params` object\n to the :class:`CommandLineLogger`'s special\n :meth:`CommandLineLogger.setLevel` method to set the logger's\n verbosity and then initializes the logging handlers. If the\n :attr:`logfile` attribute is not ``None``, it is passed to a\n :class:`logging.FileHandler` instance and that is added to the\n handler list. Otherwise, if the :attr:`stream` attribute is\n not ``None``, it is passed to a :class:`logging.StreamHandler`\n instance and that becomes the main handler.\n\n \"\"\"\n self.log.setLevel(self.params)\n\n self.log.handlers = []\n if self.params.logfile is not None:\n file_handler = FileHandler(self.params.logfile)\n file_handler.setFormatter(self.formatter) # pragma: no cover\n self.log.addHandler(file_handler)\n elif self.stream is not None:\n stream_handler = StreamHandler(self.stream)\n stream_handler.setFormatter(self.formatter)\n self.log.addHandler(stream_handler)\n\n # The null handler simply drops all messages.\n if not self.log.handlers:\n self.log.addHandler(NullHandler())\n\nclass LoggingApp(LoggingMixin, CommandLineMixin, Application):\n \"\"\"A logging application.\n\n This class simply glues together the base :class:`Application`,\n :class:`LoggingMixin` and other mixins that provide necessary functionality.\n\n .. versionchanged:: 1.0.4\n Actual functionality moved to :class:`LoggingMixin`.\n \"\"\"\n \n def __init__(self, main=None, **kwargs):\n CommandLineMixin.__init__(self, **kwargs)\n LoggingMixin.__init__(self, **kwargs)\n Application.__init__(self, main, **kwargs)\n\n def setup(self):\n Application.setup(self)\n CommandLineMixin.setup(self)\n LoggingMixin.setup(self)\n\n def pre_run(self):\n Application.pre_run(self)\n CommandLineMixin.pre_run(self)\n LoggingMixin.pre_run(self)\n"
},
{
"alpha_fraction": 0.6146632432937622,
"alphanum_fraction": 0.6202046275138855,
"avg_line_length": 27.962963104248047,
"blob_id": "f1d097c839dbb3cf15297641df8df6ef8e5f405b",
"content_id": "601b453f52591dbe440690550fab943310e1fc69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2346,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 81,
"path": "/lib/python2.7/site-packages/cli/tests/__init__.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"CLI tools for Python.\n\nCopyright (c) 2009-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\"\"\"\n\n__test__ = False\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom cli import test\nfrom cli.test import AppMixin\n\nclass BaseTest(unittest.TestCase):\n pass\n\nclass DecoratorTests(object):\n\n def test_run(self):\n # This servers as a nice sanity check.\n self.app.run()\n\n def test_decorate_callable(self):\n @self.app_cls\n def foo(app):\n pass\n self.assertEqual(foo.name, \"foo\")\n\n def test_instantiate_and_decorate_callable(self):\n @self.app_cls(name=\"foo\")\n def bar(app):\n pass\n self.assertEqual(bar.name, \"foo\")\n\n def test_wrap_non_function_callable(self):\n class foo(object):\n def __call__(self, app):\n pass\n # Required in Python 2.4.\n foo = foo()\n foo.__name__ = \"foo\"\n foo = self.app_cls(foo)\n setattr(foo, \"__name__\", \"foo\")\n\n self.assertEqual(foo.name, \"foo\")\n \n def test_subclass(self):\n cls = self.app_cls\n class Test(cls):\n \n def __init__(self, main=None, **kwargs):\n cls.__init__(self, main, **kwargs)\n self.exit_after_main = False\n\n def pre_run(self):\n # Skip things like CLI parsing...\n pass\n \n def main(self):\n return 0\n self.assertEqual(Test().run(), 0)\n\nclass AppTest(DecoratorTests, AppMixin, BaseTest):\n\n def setUp(self):\n BaseTest.setUp(self)\n _, self.app = self.runapp(self.app_cls, \"foo\")\n"
},
{
"alpha_fraction": 0.761904776096344,
"alphanum_fraction": 0.761904776096344,
"avg_line_length": 24.200000762939453,
"blob_id": "197c0c3915b9bcd9aa20a6c1c99c74c958ed9c68",
"content_id": "12e3b0128f4a633135a54fc6659c4f901b566bce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 5,
"path": "/lib/python2.7/site-packages/cli/tests/functional/__init__.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom cli.test import FunctionalTest\n\nFunctionalTest.scriptdir = os.path.join(os.path.dirname(__file__), \"scripts\")\n"
},
{
"alpha_fraction": 0.6373787522315979,
"alphanum_fraction": 0.6406736373901367,
"avg_line_length": 34.94078826904297,
"blob_id": "b7f856808e6a9d8a91321e909e1289ff857d2759",
"content_id": "94bf5b4d33ec35cd961405b7fd8796de6d92955b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5463,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 152,
"path": "/lib/python2.7/site-packages/cli/daemon.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"\\\n:mod:`cli.daemon` -- daemonizing applications\n---------------------------------------------\n\nDaemonizing applications run in the background, forking themselves off.\n\"\"\"\n\n__license__ = \"\"\"\nCopyright (c) 2008-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"\n\n__todo__ = \"\"\"\\\n * when daemonized, catch exceptions and write them to app.log\n\"\"\".split(\" * \")\n\nimport os\nimport sys\n\nfrom cli.app import CommandLineApp, CommandLineMixin, Application\nfrom cli.log import LoggingMixin\n\n__all__ = [\"DaemonizingApp\", \"DaemonizingMixin\"]\n\nclass DaemonizingMixin(object):\n \"\"\"A command-line application that knows how to daemonize.\n\n The :class:`DaemonizingMixin` requires the :class:`cli.log.LoggingMixin`\n (for it's not very helpful to daemonize without being able to log\n messages somewhere). In addition to those supported by the standard\n :class:`cli.app.Application`, :class:`cli.app.CommandLineMixin` and\n :class:`cli.log.LoggingMixin`, arguments are:\n\n *pidfile* is a string pointing to a file where the application will\n write its process ID after it daemonizes. If it is ``None``, no such\n file will be created.\n\n *chdir* is a string pointing to a directory to which the application\n will change after it daemonizes.\n\n *null* is a string representing a file that will be opened to\n replace stdin, stdout and stderr when the application daemonizes. By\n default, this :data:`os.path.devnull`.\n \"\"\"\n\n def __init__(self, pidfile=None, chdir='/', null=os.path.devnull, **kwargs):\n self.pidfile = pidfile\n self.chdir = chdir\n self.null = null\n\n def setup(self):\n \"\"\"Configure the :class:`DaemonizingMixin`.\n\n This method adds the :option:`-d`, :option:`u`,\n and :option:`-p` parameters to the application.\n \"\"\"\n # Add daemonizing options.\n self.add_param(\"-d\", \"--daemonize\", default=False, action=\"store_true\",\n help=\"run the application in the background\")\n self.add_param(\"-u\", \"--user\", default=None, \n help=\"change to USER[:GROUP] after daemonizing\")\n self.add_param(\"-p\", \"--pidfile\", default=None, \n help=\"write PID to PIDFILE after daemonizing\")\n\n def daemonize(self):\n \"\"\"Run in the background.\n\n :meth:`daemonize` must be called explicitly by the application\n when it's ready to fork into the background. It forks, flushes\n and replaces stdin, stderr and stdout with the open :attr:`null`\n file and, if requested on the command line, writes its PID to a\n file and changes user/group.\n \"\"\"\n if os.fork(): sys.exit(0)\n os.umask(0) \n os.setsid() \n if os.fork(): sys.exit(0)\n\n self.stdout.flush()\n self.stderr.flush()\n si = open(self.null, 'r')\n so = open(self.null, 'a+')\n se = open(self.null, 'a+', 0)\n os.dup2(si.fileno(), self.stdin.fileno())\n os.dup2(so.fileno(), self.stdout.fileno())\n os.dup2(se.fileno(), self.stderr.fileno())\n\n if self.params.pidfile:\n self.log.debug(\"Writing pidfile %s\", self.params.pidfile)\n pidfile = open(self.params.pidfile, 'w')\n pidfile.write('%i\\n' % os.getpid())\n pidfile.close()\n\n if self.params.user:\n import grp\n import pwd\n delim = ':'\n user, sep, group = self.params.user.partition(delim)\n\n # If group isn't specified, try to use the username as\n # the group.\n if delim != sep:\n group = user\n self.log.debug(\"Changing to %s:%s\", user, group)\n os.setgid(grp.getgrnam(group).gr_gid)\n os.setuid(pwd.getpwnam(user).pw_uid)\n\n self.log.debug(\"Changing directory to %s\", self.chdir)\n os.chdir(self.chdir)\n\n return True\n\nclass DaemonizingApp(\n DaemonizingMixin, LoggingMixin, CommandLineMixin, Application):\n \"\"\"A daemonizing application.\n\n This class simply glues together the base :class:`Application`,\n :class:`DaemonizingMixin` and other mixins that provide necessary\n functionality.\n\n .. versionchanged:: 1.0.4\n Actual functionality moved to :class:`DaemonizingMixin`.\n \"\"\"\n\n def __init__(self, main=None, **kwargs):\n DaemonizingMixin.__init__(self, **kwargs)\n LoggingMixin.__init__(self, **kwargs)\n CommandLineMixin.__init__(self, **kwargs)\n Application.__init__(self, main, **kwargs)\n\n def setup(self):\n Application.setup(self)\n CommandLineMixin.setup(self)\n LoggingMixin.setup(self)\n DaemonizingMixin.setup(self)\n\n def pre_run(self):\n Application.pre_run(self)\n CommandLineMixin.pre_run(self)\n LoggingMixin.pre_run(self)\n"
},
{
"alpha_fraction": 0.658595621585846,
"alphanum_fraction": 0.6634382605552673,
"avg_line_length": 33.41666793823242,
"blob_id": "2d061687aa15c07e17a42997f6eb721ac690c1a0",
"content_id": "bebea8b8c8f0a12785b639a298ca3b07c9c3a7c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 413,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 12,
"path": "/bin/elasticluster",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "#!/home/vsachde/elasticluster/bin/python\n# EASY-INSTALL-ENTRY-SCRIPT: 'elasticluster','console_scripts','elasticluster'\n__requires__ = 'elasticluster'\nimport re\nimport sys\nfrom pkg_resources import load_entry_point\n\nif __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\.pyw?|\\.exe)?$', '', sys.argv[0])\n sys.exit(\n load_entry_point('elasticluster', 'console_scripts', 'elasticluster')()\n )\n"
},
{
"alpha_fraction": 0.627084493637085,
"alphanum_fraction": 0.629349946975708,
"avg_line_length": 34.7905387878418,
"blob_id": "f5288d731f47015276696f81e8dd64bf2676c1b2",
"content_id": "7b6db5284ec5cadbebdb08b3cc7733050f28982b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15891,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 444,
"path": "/lib/python2.7/site-packages/cli/app.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"\\\n:mod:`cli.app` -- basic applications\n------------------------------------\n\nThe :mod:`cli.app` module establishes the basis for all of the other\napplications and is a good place to start when looking to extend\n:class:`Application` functionality or to understand the basic API.\n\"\"\"\n\n__license__ = \"\"\"Copyright (c) 2008-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"\n\n__todo__ = \"\"\"\\\n\"\"\".split(\" * \")\n\nimport os\nimport sys\n\nfrom cli._ext import argparse\nfrom cli.util import ifelse, ismethodof\n\n__all__ = [\"Application\", \"CommandLineApp\", \"CommandLineMixin\"]\n\nclass Error(Exception):\n pass\n\nclass Abort(Error):\n \"\"\"Raised when an application exits unexpectedly.\n\n :class:`Abort` takes a single integer argument indicating the exit status of\n the application.\n\n .. versionadded:: 1.0.4\n \"\"\"\n\n def __init__(self, status):\n self.status = status\n message = \"Application terminated (%s)\" % self.status\n super(Abort, self).__init__(message, self.status)\n\nclass Application(object):\n \"\"\"An application.\n \n :class:`Application` constructors should always be called with\n keyword arguments, though the *main* argument may be passed\n positionally (as when :class:`Application` or its subclasses are\n instantiated as decorators). Arguments are:\n\n *main* is the callable object that performs the main work of the\n application. The callable must accept an :class:`Application`\n instance as its sole argument. If *main* is ``None``, it is assumed that\n a valid callable will be passed to the :meth:`__call__` method (when\n using an :class:`Application` instance as a decorator). If *main* is\n not None, the :meth:`setup` method will be called, allowing\n subclasses to customize the order in which certain setup steps are\n executed.\n\n *name* is the name of the application itself. If *name* is not ``None``,\n the :attr:`name` property will inspect the :attr:`main` callable and\n use its function or class name.\n\n *exit_after_main* determines whether the application will call\n :func:`sys.exit` after :attr:`main` completes.\n\n *stdin*, *stderr* and *stdout* are file objects that represent the \n usual application input and outputs. If they are ``None``, they will\n be replaced with :data:`sys.stdin`, :data:`sys.stderr` and\n :data:`sys.stdout`, respectively.\n\n *version* is a string representing the application's version.\n\n *description* is a string describing the application. If\n *description* is ``None``, the :attr:`description` property will use\n the :attr:`main` callable's :attr:`__doc__` attribute instead.\n\n *argv* is a list of strings representing the options passed on the\n command line. If *argv* is ``None``, :data:`sys.argv` will be used\n instead.\n\n *profiler* is a :class:`cli.profiler.Profiler` instance, or ``None`` (default).\n If not ``None``, the profiler will be available to the running application.\n\n *reraise* is a tuple of exception classes: if an exception is\n raised by :attr:`main` and it is listed here, then it will be\n propagated upwards by :attr:`post_run`; otherwise it will just\n cause :attr:`post_run` to exit with return code 1.\n\n In all but a very few cases, subclasses that override the constructor\n should call :meth:`Application.__init__` at the end of the\n overridden method to ensure that the :meth:`setup` method is\n called.\n \"\"\"\n main = None\n\n def __init__(self, main=None, name=None, exit_after_main=True, stdin=None, stdout=None,\n stderr=None, version=None, description=None, argv=None,\n profiler=None, reraise=(Exception,), **kwargs):\n self._name = name\n self.exit_after_main = exit_after_main\n self.stdin = stdin and stdin or sys.stdin\n self.stdout = stdout and stdout or sys.stdout\n self.stderr = stderr and stderr or sys.stderr\n self.version = version\n self.argv = argv\n if argv is None:\n self.argv = sys.argv\n self._description = description\n\n self.profiler = profiler\n self.reraise = reraise\n \n if main is not None:\n self.main = main\n\n if getattr(self, \"main\", None) is not None:\n self.setup()\n\n def __call__(self, main):\n \"\"\"Wrap the *main* callable and return an :class:`Application` instance.\n \n This method is useful when it is necessary to pass keyword\n arguments to the :class:`Application` constructor when\n decorating callables. For example::\n \n @cli.Application(stderr=None)\n def foo(app):\n pass\n \n In this case, :meth:`setup` will occur during :meth:`__call__`,\n not when the :class:`Application` is first constructed.\n \"\"\"\n self.main = main\n\n self.setup()\n\n return self\n\n def setup(self):\n \"\"\"Configure the :class:`Application`.\n\n This method is provided so that subclasses can easily customize\n the configuration process without having to reimplement the base\n constructor. :meth:`setup` is called once, either by the base\n constructor or :meth:`__call__`.\n \"\"\"\n pass\n\n @property\n def name(self):\n \"\"\"A string identifying the application.\n\n Unless specified when the :class:`Application` was created, this\n property will examine the :attr:`main` callable and use its\n name (:attr:`__name__` or :attr:`func_name` for classes or\n functions, respectively).\n \"\"\"\n name = self._name\n if name is None:\n name = getattr(self.main, 'func_name', self.main.__name__)\n return name\n\n @property\n def description(self):\n \"\"\"A string describing the application.\n\n Unless specified when the :class:`Application` was created, this\n property will examine the :attr:`main` callable and use its\n docstring (:attr:`__doc__` attribute).\n \"\"\"\n if self._description is not None:\n return self._description\n else:\n return getattr(self.main, \"__doc__\", \"\")\n\n def pre_run(self):\n \"\"\"Perform any last-minute configuration.\n\n The :meth:`pre_run` method is called by the :meth:`run` method\n before :attr:`main` is executed. This is a good time to do\n things like read a configuration file or parse command line\n arguments. The base implementation does nothing.\n \"\"\"\n pass\n\n def post_run(self, returned):\n \"\"\"Clean up after the application.\n\n After :attr:`main` has been called, :meth:`run` passes the return value\n (or :class:`Exception` instance raised) to this method. By default,\n :meth:`post_run` decides whether to call :func:`sys.exit` (based on the\n value of the :attr:`exit_after_main` attribute) or pass the value back\n to :meth:`run`. Subclasses should probably preserve this behavior.\n \"\"\"\n # Interpret the returned value in the same way sys.exit() does.\n if returned is None:\n returned = 0\n elif isinstance(returned, Abort):\n returned = returned.status\n elif isinstance(returned, self.reraise):\n # raising the last exception preserves traceback\n raise\n else:\n try:\n returned = int(returned)\n except:\n returned = 1\n \n if self.exit_after_main:\n sys.exit(returned)\n else:\n return returned\n\n def run(self):\n \"\"\"Run the application, returning its return value.\n\n This method first calls :meth:`pre_run` and then calls :attr:`main`,\n passing it an instance of the :class:`Application` itself as its only\n argument. The return value (or :class:`Exception` instance raised) is\n then passed to :meth:`post_run` which may modify it (or terminate the\n application entirely).\n \"\"\"\n self.pre_run()\n\n args = (self,)\n if ismethodof(self.main, self):\n args = ()\n try:\n returned = self.main(*args)\n except Exception, e:\n returned = e\n\n return self.post_run(returned)\n\nclass ArgumentParser(argparse.ArgumentParser):\n \"\"\"This subclass makes it easier to test ArgumentParser.\n\n Unwrapped, :class:`argparse.ArgumentParser` checks the sys module for\n stdout, stderr and argv at several points. This wrapper class moves all of\n these checks into instantiation (except for :attr:`prog`, which is a\n property).\n\n .. versionchanged:: 1.1.1\n The *stdout* and *stderr* options replace *file* (which was present until 1.1.1);\n *argv* is added.\n \"\"\"\n\n def __init__(self, stdout=None, stderr=None, argv=None, **kwargs):\n self.stdout = ifelse(stdout, stdout is not None, sys.stdout)\n self.stderr = ifelse(stderr, stderr is not None, sys.stderr)\n self.argv = ifelse(argv, argv is not None, sys.argv)\n self._prog = kwargs.get(\"prog\", None)\n super(ArgumentParser, self).__init__(**kwargs)\n\n def get_prog(self):\n\n prog = self._prog\n if prog is None:\n prog = os.path.basename(self.argv[0])\n return prog\n\n def set_prog(self, value):\n self._prog = value\n\n prog = property(get_prog, set_prog, doc= \"\"\"\\\n Return or lookup the program's name.\n\n If :attr:`_prog` is None, returns the first element in the :attr:`argv`\n list.\n \"\"\")\n del(get_prog, set_prog)\n\n def parse_known_args(self, args=None, namespace=None):\n \"\"\"If *args* is None, use :attr:`argv`, not :data:`sys.argv`.\"\"\"\n if args is None:\n args = self.argv[1:]\n return super(ArgumentParser, self).parse_known_args(args, namespace)\n\n def _print_message(self, message, file=None):\n \"\"\"If *file* is None, use :attr:`stdout` instead of :data:`sys.stdout`.\n\n .. versionchanged:: 1.1.1\n Previously used :attr:`file`, which is now :attr:`stdout`.\n \"\"\"\n if file is None: # pragma: no cover\n file = self.stdout\n if message:\n message = unicode(message)\n super(ArgumentParser, self)._print_message(message, file)\n\n def exit(self, status=0, message=None):\n \"\"\"If *message* is not None, write it to :attr:`stderr` instead of :data:`sys.stderr`.\"\"\"\n if message:\n self.stderr.write(unicode(message))\n super(ArgumentParser, self).exit(status, message=None)\n\n def error(self, message):\n \"\"\"Write *message* to :attr:`stderr` instead of :data:`sys.stderr`.\"\"\"\n self.print_usage(self.stderr)\n self.exit(2, u\"%s: error: %s\\n\" % (self.prog, message))\n\nclass CommandLineMixin(object):\n \"\"\"A command line application.\n\n Command line applications extend the basic :class:`Application`\n framework to support command line parsing using the :mod:`argparse`\n module. As with :class:`Application` itself, *main* should be a\n callable. Other arguments are:\n\n *usage* is a string describing command line usage of the\n application. If it is not supplied, :mod:`argparse` will\n automatically generate a usage statement based on the application's\n parameters.\n\n *epilog* is text appended to the argument descriptions.\n\n The rest of the arguments are passed to the :class:`Application`\n constructor.\n \"\"\"\n prefix = '-'\n argparser_factory = ArgumentParser\n formatter = argparse.HelpFormatter\n\n params = None\n \"\"\"The :attr:`params` attribute is an object with attributes\n containing the values of the parsed command line arguments.\n Specifically, its an instance of :class:`argparse.Namespace`,\n but only the mapping of attributes to argument values should be\n relied upon.\n \"\"\"\n\n def __init__(self, usage=None, epilog=None, **kwargs):\n self.usage = usage\n self.epilog = epilog\n self.actions = {}\n self.params = argparse.Namespace()\n\n def setup(self):\n \"\"\"Configure the :class:`CommandLineMixin`.\n\n During setup, the application instantiates the\n :class:`argparse.ArgumentParser` and adds a version parameter\n (:option:`-V`, to avoid clashing with :option:`-v`\n verbose).\n \"\"\"\n self.argparser = self.argparser_factory(\n prog=self.name,\n usage=self.usage,\n description=self.description,\n epilog=self.epilog,\n prefix_chars=self.prefix,\n argv=self.argv,\n stdout=self.stdout,\n stderr=self.stderr,\n )\n\n # We add this ourselves to avoid clashing with -v/verbose.\n if self.version is not None:\n self.add_param(\n \"-V\", \"--version\", action=\"version\", \n version=(\"%%(prog)s %s\" % self.version),\n help=(\"show program's version number and exit\"))\n\n def add_param(self, *args, **kwargs):\n \"\"\"Add a parameter.\n\n :meth:`add_param` wraps\n :meth:`argparse.ArgumentParser.add_argument`, storing the\n parameter options in a dictionary. This information can be used\n later by other subclasses when deciding whether to override\n parameters.\n \"\"\"\n action = self.argparser.add_argument(*args, **kwargs)\n self.actions[action.dest] = action\n return action\n\n def update_params(self, params, newparams):\n \"\"\"Update a parameter namespace.\n\n The *params* instance will be updated with the names and values\n from *newparams* and then returned.\n\n .. versionchanged:: 1.0.2\n :meth:`update_params` expects and returns\n :class:`argparse.Namespace` instances; previously, it took\n keyword arguments and updated :attr:`params` itself. This is\n now left to the caller.\n \"\"\"\n for k, v in vars(newparams).items():\n setattr(params, k, v)\n\n return params\n\n def pre_run(self):\n \"\"\"Parse command line.\n\n During :meth:`pre_run`, :class:`CommandLineMixin`\n calls :meth:`argparse.ArgumentParser.parse_args`. The results are\n stored in :attr:`params`. \n\n ..versionchanged:: 1.1.1\n\n If :meth:`argparse.ArgumentParser.parse_args` raises SystemExit but\n :attr:`exit_after_main` is not True, raise Abort instead.\n \"\"\"\n try:\n ns = self.argparser.parse_args()\n except SystemExit, e:\n if self.exit_after_main:\n raise\n else:\n raise Abort(e.code)\n self.params = self.update_params(self.params, ns)\n\nclass CommandLineApp(CommandLineMixin, Application):\n \"\"\"A command line application.\n\n This class simply glues together the base :class:`Application` and\n :class:`CommandLineMixin`.\n\n .. versionchanged: 1.0.4\n\n Actual functionality moved to :class:`CommandLineMixin`.\n \"\"\"\n \n def __init__(self, main=None, **kwargs):\n CommandLineMixin.__init__(self, **kwargs)\n Application.__init__(self, main, **kwargs)\n\n def setup(self):\n Application.setup(self)\n CommandLineMixin.setup(self)\n"
},
{
"alpha_fraction": 0.6652406454086304,
"alphanum_fraction": 0.6748663187026978,
"avg_line_length": 30.875,
"blob_id": "930dc3760584ddceaaa0286235b25c903f9596a2",
"content_id": "8d4c2db6b073ced76fc932545dad9c1052f294eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2805,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 88,
"path": "/lib/python2.7/site-packages/cli/util.py",
"repo_name": "vipinsachdeva/elasticluster_full",
"src_encoding": "UTF-8",
"text": "\"\"\"cli.util - miscellaneous helpers\n\nCopyright (c) 2008-2010 Will Maier <[email protected]>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"\n\nimport sys\n\nfrom cli.profiler import Stats, fmtsec, update_wrapper\n\ntry:\n import io\n BaseStringIO = io.StringIO\nexcept (ImportError, AttributeError):\n import StringIO\n BaseStringIO = StringIO.StringIO\n\n\nclass StringIO(BaseStringIO):\n \n def write(self, s):\n BaseStringIO.write(self, unicode(s))\n\ndef trim(string):\n \"\"\"Trim whitespace from strings.\n\n This implementation is a (nearly) verbatim copy of that proposed in PEP-257:\n\n http://www.python.org/dev/peps/pep-0257/\n \"\"\"\n if not string:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = string.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxint\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxint:\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed) + \"\\n\"\n\ndef ifelse(a, predicate, b):\n \"\"\"Return *a* if *predicate* evaluates to True; else *b*.\n\n This emulates the logic of the if..else ternary operator introduced in\n Python 2.5.\n \"\"\"\n if predicate:\n return a\n else:\n return b\n\ndef ismethodof(method, obj):\n \"\"\"Return True if *method* is a method of *obj*.\n\n *method* should be a method on a class instance; *obj* should be an instance\n of a class.\n \"\"\"\n # Check for both 'im_self' (Python < 3.0) and '__self__' (Python >= 3.0).\n cls = obj.__class__\n mainobj = getattr(method, \"im_self\",\n getattr(method, \"__self__\", None))\n return isinstance(mainobj, cls)\n"
}
] | 16 |
Enrrod/StatisticsPython
|
https://github.com/Enrrod/StatisticsPython
|
f24cbb7e1ae048aa18fc303a01c54ca62c49cc8e
|
ac1987b565cd4151ddb26cb021b7f70ee9989b70
|
d734903372e9f8fac093bac5cd0ec0f13b8dae69
|
refs/heads/master
| 2020-03-19T03:23:04.353878 | 2018-06-21T06:57:09 | 2018-06-21T06:57:09 | 135,723,037 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8255813717842102,
"alphanum_fraction": 0.8255813717842102,
"avg_line_length": 42,
"blob_id": "ae08a1b223776847adcd933d021794d996abf13f",
"content_id": "67ca2dc461c49f381a4f7777e2249b34a83eca6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 4,
"path": "/README.md",
"repo_name": "Enrrod/StatisticsPython",
"src_encoding": "UTF-8",
"text": "# StatisticsPython\nData statistics analysis using python libraries.\n\nThe main idea is to cover the common options of SPSS software for computing statistics over some data.\n"
},
{
"alpha_fraction": 0.4936971068382263,
"alphanum_fraction": 0.5027564167976379,
"avg_line_length": 53.14258575439453,
"blob_id": "15f38cb2233f9360596515a43c61c783459f4d6c",
"content_id": "6950e3cb6b2a3769c0397f3a8ce67fefcf7cb96a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 28479,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 526,
"path": "/StatisticsFunctions.py",
"repo_name": "Enrrod/StatisticsPython",
"src_encoding": "UTF-8",
"text": "#-*- coding: utf-8 -*-\n\nfrom xlrd import open_workbook\nfrom collections import OrderedDict, Counter, namedtuple\nfrom scipy import stats\nfrom prettytable import PrettyTable as PT\nimport unicodedata\nimport xlsxwriter as xls\nimport itertools\nimport pyvttbl as pt\n\n# -----DATA IMPORT AND EXPORT FUNCTIONS---------------------------------------------------------------------------------\n\n\ndef dataRead(file):\n '''This function reads an xls file and creates a dictionary containing the variable names and the\n data stored in each one.\n INPUT: Xls file route (string).\n OUTPUT: Excel data stored in a dictionary (dict).'''\n book = open_workbook(file)\n sheet = book.sheet_by_index(0)\n cols = sheet.ncols\n data = OrderedDict()\n headers = sheet.row(0)\n for h in range(len(headers)):\n if isinstance(headers[h].value, unicode):\n data[unicodedata.normalize('NFKD', headers[h].value).encode('ascii','ignore')] = []\n for column in range(cols):\n col = sheet.col(column)\n key = col[0].value\n if isinstance(key, unicode):\n key = unicodedata.normalize('NFKD', key).encode('ascii', 'ignore')\n for i in range(1,len(col)):\n value = col[i].value\n if isinstance(value, unicode):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n data[key].append(value)\n return data\n\n\ndef exportResult(table, path):\n '''This function exports a table obtained by an statistical test to an .xlsx file in the designed route.\n INPUT: Table to export (list) and the path of the file ending with file_name.xlsx (string).\n OUTPUT: Xlsx file saved in the designed route.'''\n workbook = xls.Workbook(path)\n worksheet = workbook.add_worksheet()\n row = 0\n for i in range(len(table)):\n data = table[i]\n for j in range(len(data)):\n worksheet.write(row, j, data[j])\n row = row + 1\n workbook.close()\n print \"Data saved\"\n\n# -----T-TEST FUNCTIONS-------------------------------------------------------------------------------------------------\n\n\ndef pairedTtest(data, printSig, *measures):\n '''This function computes the paired T-test for pairs of measures from data dictionary.\n INPUT: data is the dictionary containing the data names and values (dict). printSig is\n a boolean variable, True: the function only prints the significative results, False:\n the function prints all the values (bool). *measures contain all the pairs of\n variables to compare (strings).\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n if not isinstance(data, dict):\n print ('Error: data must be a dict. Use dataRead function to import your excel data.')\n else:\n if not isinstance(printSig, bool):\n print ('Error: printSig must be a bool. True: the function only prints the siginificative results/ False: '\n 'the function prints all the results.')\n else:\n if len(measures) % 2 == 0:\n results = OrderedDict()\n for i in range(0,len(measures), 2):\n testName = measures[i] + '/' + measures[i + 1]\n res = stats.ttest_rel(data[measures[i]], data[measures[i + 1]])\n results[testName] = res\n table_matrix = [['Paired T-test', 'Test Statistic', 'p-Value']]\n if printSig:\n m = results.keys()\n for k in range(len(m)):\n pVal = results[m[k]][1]\n if pVal < 0.05:\n table_matrix.append([m[k], results[m[k]][0], results[m[k]][1]])\n else:\n m = results.keys()\n for k in range(len(m)):\n table_matrix.append([m[k], results[m[k]][0], results[m[k]][1]])\n table = PT(table_matrix[0])\n for row in range(1,len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n else:\n print('Error: Measures must be paired two by two')\n return table_matrix\n\ndef indepTtest(data, printSig, groupBy, *measures):\n '''This function computes the independent T-test for measures grouped by groupBy from data dictionary.\n INPUT: data is the dictionary containing the data names and values (dict). printSig is a boolean\n variable, True: the function only prints the significative results, False: the function\n prints all the values (bool). groupBy is a list that contains 3 values, the first is the\n grouping variable, the second and the third are the groups to differentiate (list). *measures\n contain all the pairs of variables to compare (strings).\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n if not isinstance(data, dict):\n print ('Error: data must be a dict. Use dataRead function to import your excel data.')\n else:\n if not isinstance(printSig, bool):\n print ('Error: printSig must be a bool. True: the function only prints the siginificative results/ False: '\n 'the function prints all the results.')\n else:\n if not isinstance(groupBy, list) and len(groupBy) == 3:\n print('Error: groupBy must be a list with three elements, the first one is the variable of grouping,'\n ' the second and the third are the groups to compare.')\n else:\n indexG1 = []\n indexG2 = []\n results = OrderedDict()\n for i in range(len(data[groupBy[0]])):\n if data[groupBy[0]][i] == groupBy[1]:\n indexG1.append(i)\n elif data[groupBy[0]][i] == groupBy[2]:\n indexG2.append(i)\n for i in range(len(measures)):\n m1 = []\n m2 = []\n for g1 in range(len(indexG1)):\n m1.append(data[measures[i]][g1])\n for g2 in range(len(indexG2)):\n m2.append(data[measures[i]][g2])\n levene = stats.levene(m1, m2)\n if levene[1] > 0.05:\n testName = measures[i] + ' (' + groupBy[1] + '/' + groupBy[2] + ')'\n res = stats.ttest_ind(m1, m2, equal_var = True)\n results[testName] = [levene, res]\n elif levene[1] < 0.05:\n testName = measures[i] + ' (' + groupBy[1] + '/' + groupBy[2] + ')'\n res = stats.ttest_ind(m1, m2, equal_var=False)\n results[testName] = [levene, res]\n table_matrix = [['Independent T-test', 'Levene Statistic', 'Levene p-Value','Test Statistic',\n 'p-Value']]\n if printSig:\n m = results.keys()\n for k in range(len(m)):\n pVal = results[m[k]][1][1]\n if pVal < 0.05:\n table_matrix.append([m[k], results[m[k]][0][0], results[m[k]][0][1], results[m[k]][1][0],\n results[m[k]][1][1]])\n else:\n m = results.keys()\n for k in range(len(m)):\n table_matrix.append([m[k], results[m[k]][0][0], results[m[k]][0][1], results[m[k]][1][0],\n results[m[k]][1][1]])\n table = PT(table_matrix[0])\n for row in range(1, len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n return table_matrix\n\n# -----CORRELATION TEST FUNCTIONS---------------------------------------------------------------------------------------\n\n\ndef pearsonCorrel(data, printSig, *measures):\n '''This function computes the Pearson correlation over all the possible pairs of the variables included.\n INPUT: data is the dictionary containing the data names and values (dict). printSig is a boolean\n variable, True: the function only prints the significative results, False: the function\n prints all the values (bool). *measures contain all the variables to compare (strings).\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n if not isinstance(data, dict):\n print ('Error: data must be a dict. Use dataRead function to import your excel data.')\n else:\n if not isinstance(printSig, bool):\n print ('Error: printSig must be a bool. True: the function only prints the siginificative results/ False: '\n 'the function prints all the results.')\n else:\n if not len(measures) >= 2:\n print('Error: At least two measures are necessary to compute correlation.')\n else:\n pairs = list(itertools.combinations(measures, 2))\n results = OrderedDict()\n for i in range(len(pairs)):\n testName = pairs[i][0] + '/' + pairs[i][1]\n res = stats.pearsonr(data[pairs[i][0]], data[pairs[i][1]])\n results[testName] = res\n table_matrix = [['Pearson correlation', 'Correl. coefficient', 'p-Value']]\n if printSig:\n m = results.keys()\n for k in range(len(m)):\n pVal = results[m[k]][1]\n if pVal < 0.05:\n table_matrix.append([m[k], results[m[k]][0], results[m[k]][1]])\n else:\n m = results.keys()\n for k in range(len(m)):\n table_matrix.append([m[k], results[m[k]][0], results[m[k]][1]])\n table = PT(table_matrix[0])\n for row in range(1,len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n return table_matrix\n\n# -----OTHER TEST FUNCTIONS---------------------------------------------------------------------------------------------\n\n\ndef normalityTest(data, printSig, *measures):\n '''This function computes the normality test for the variables included.\n INPUT: data is the dictionary containing the data names and values (dict). printSig is\n a boolean variable, True: the function only prints the significative results, False:\n the function prints all the values (bool). *measures contain all the variables to\n compute the test over (strings).\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n if not isinstance(data, dict):\n print ('Error: data must be a dict. Use dataRead function to import your excel data.')\n else:\n if not isinstance(printSig, bool):\n print ('Error: printSig must be a bool. True: the function only prints the siginificative results/ False: '\n 'the function prints all the results.')\n else:\n results = OrderedDict()\n for i in range(len(measures)):\n testName = measures[i]\n res = stats.normaltest(data[measures[i]])\n results[testName] = res\n table_matrix = [['Normality test', 'Test Statistic', 'p-Value']]\n if printSig:\n m = results.keys()\n for k in range(len(m)):\n pVal = results[m[k]][1]\n if pVal < 0.05:\n table_matrix.append([m[k], results[m[k]][0], results[m[k]][1]])\n else:\n m = results.keys()\n for k in range(len(m)):\n table_matrix.append([m[k], results[m[k]][0], results[m[k]][1]])\n table = PT(table_matrix[0])\n for row in range(1, len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n return table_matrix\n\n# -----GROUPED T-TEST FUNCTIONS-----------------------------------------------------------------------------------------\n\n\ndef analyzeBy(data, sortBy):\n '''This function sorts a data dictionary in different dictionaries, one for each category in the grouping\n variable.\n INPUT: data is the dictionary containing the data names and values (dict). sortBy is the name of the\n grouping variable (string).\n OUTPUT: The output is a dictionary containing several dictionaries, one for each grouping category (dict).'''\n if not isinstance(data, dict):\n print ('Error: data must be a dict. Use dataRead function to import your excel data.')\n else:\n if not isinstance(sortBy, basestring):\n print('Error: sortBy must be a string with the name of the variable by wich you would want to group the'\n ' data.')\n else:\n tempData = data.copy()\n groupList = tempData[sortBy]\n del tempData[sortBy]\n cat = Counter(groupList)\n categories = cat.keys()\n sortedData = OrderedDict()\n for i in range(len(categories)):\n sortedData[categories[i]] = OrderedDict()\n for i in range(len(tempData.keys())):\n for j in range(len(sortedData.keys())):\n sortedData[sortedData.keys()[j]][tempData.keys()[i]] = []\n for i in range(len(groupList)):\n for j in range(len(tempData.keys())):\n sortedData[groupList[i]][tempData.keys()[j]].append(tempData[tempData.keys()[j]][i])\n return sortedData\n\n\ndef groupedPairedTtest(data, sortBy, printSig, *measures):\n '''This function computes the paired T-test for pairs of measures from data dictionary.\n INPUT: data is the dictionary containing the data names and values (dict). printSig is\n a boolean variable, True: the function only prints the significative results, False:\n the function prints all the values (bool). *measures contain all the pairs of\n variables to compare (strings).\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n sortedData = analyzeBy(data, sortBy)\n if not isinstance(printSig, bool):\n print ('Error: printSig must be a bool. True: the function only prints the siginificative results/ False: '\n 'the function prints all the results.')\n else:\n if len(measures) % 2 == 0:\n fullResults = OrderedDict()\n for i in range(len(sortedData.keys())):\n groupName = sortedData.keys()[i]\n tempData = sortedData[sortedData.keys()[i]]\n results = OrderedDict()\n for j in range(0, len(measures), 2):\n testName = measures[j] + '/' + measures[j + 1]\n res = stats.ttest_rel(tempData[measures[j]], tempData[measures[j + 1]])\n results[testName] = res\n fullResults[groupName] = results\n table_matrix = [['', 'Paired T-test', 'Test Statistic', 'p-Value']]\n for i in range(len(fullResults.keys())):\n if printSig:\n results = fullResults[fullResults.keys()[i]]\n m = results.keys()\n for k in range(len(m)):\n pVal = results[m[k]][1]\n if pVal < 0.05:\n if k == 0:\n table_matrix.append([fullResults.keys()[i], m[k], results[m[k]][0], results[m[k]][1]])\n else:\n table_matrix.append(['', m[k], results[m[k]][0], results[m[k]][1]])\n else:\n results = fullResults[fullResults.keys()[i]]\n m = results.keys()\n for k in range(len(m)):\n if k == 0:\n table_matrix.append([fullResults.keys()[i], m[k], results[m[k]][0], results[m[k]][1]])\n else:\n table_matrix.append(['', m[k], results[m[k]][0], results[m[k]][1]])\n table = PT(table_matrix[0])\n for row in range(1, len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n else:\n print('Error: Measures must be paired two by two')\n return table_matrix\n\n\ndef groupedIndepTtest(data, sortBy, printSig, groupBy, *measures):\n '''This function computes the paired T-test for pairs of measures from data dictionary.\n INPUT: data is the dictionary containing the data names and values (dict). printSig is\n a boolean variable, True: the function only prints the significative results, False:\n the function prints all the values (bool). *measures contain all the pairs of\n variables to compare (strings).\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n sortedData = analyzeBy(data, sortBy)\n if not isinstance(printSig, bool):\n print ('Error: printSig must be a bool. True: the function only prints the siginificative results/ False: '\n 'the function prints all the results.')\n else:\n if not isinstance(groupBy, list) and len(groupBy) == 3:\n print('Error: groupBy must be a list with three elements, the first one is the variable of grouping,'\n ' the second and the third are the groups to compare.')\n else:\n fullResults = OrderedDict()\n for i in range(len(sortedData.keys())):\n groupName = sortedData.keys()[i]\n tempData = sortedData[sortedData.keys()[i]]\n indexG1 = []\n indexG2 = []\n results = OrderedDict()\n for j in range(len(tempData[groupBy[0]])):\n if tempData[groupBy[0]][j] == groupBy[1]:\n indexG1.append(j)\n elif tempData[groupBy[0]][j] == groupBy[2]:\n indexG2.append(j)\n for j in range(len(measures)):\n m1 = []\n m2 = []\n for g1 in range(len(indexG1)):\n m1.append(tempData[measures[j]][g1])\n for g2 in range(len(indexG2)):\n m2.append(tempData[measures[j]][g2])\n levene = stats.levene(m1, m2)\n if levene[1] > 0.05:\n testName = measures[j] + ' (' + groupBy[1] + '/' + groupBy[2] + ')'\n res = stats.ttest_ind(m1, m2, equal_var=True)\n results[testName] = [levene, res]\n elif levene[1] < 0.05:\n testName = measures[j] + ' (' + groupBy[1] + '/' + groupBy[2] + ')'\n res = stats.ttest_ind(m1, m2, equal_var=False)\n results[testName] = [levene, res]\n fullResults[groupName] = results\n table_matrix = [['', 'Independent T-test', 'Levene Statistic', 'Levene p-Value', 'Test Statistic',\n 'p-Value']]\n for i in range(len(fullResults.keys())):\n if printSig:\n results = fullResults[fullResults.keys()[i]]\n m = results.keys()\n for k in range(len(m)):\n pVal = results[m[k]][1][1]\n if pVal < 0.05:\n if k == 0:\n table_matrix.append([fullResults.keys()[i], m[k], results[m[k]][0][0],\n results[m[k]][0][1], results[m[k]][1][0], results[m[k]][1][1]])\n else:\n table_matrix.append(['', m[k], results[m[k]][0][0],\n results[m[k]][0][1], results[m[k]][1][0], results[m[k]][1][1]])\n else:\n results = fullResults[fullResults.keys()[i]]\n m = results.keys()\n for k in range(len(m)):\n if k == 0:\n table_matrix.append([fullResults.keys()[i], m[k], results[m[k]][0][0],\n results[m[k]][0][1], results[m[k]][1][0], results[m[k]][1][1]])\n else:\n table_matrix.append(['', m[k], results[m[k]][0][0],\n results[m[k]][0][1], results[m[k]][1][0], results[m[k]][1][1]])\n table = PT(table_matrix[0])\n for row in range(1, len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n return table_matrix\n\n\n# -----ANOVA FUNCTIONS--------------------------------------------------------------------------------------------------\n\n\ndef repeatedMeasuresAnova(data, subID, conditionName, *measures):\n '''This function computes a ANOVA for repeated measures over the variables defined along\n with the condition factor.\n INPUT: data is the dictionary containing the data names and values (dict). subID is the name\n of the variable that codes the identifier of the subjects(string). conditionName is the\n name of the condition over you want to compute the ANOVA (string). *measures contain\n pairs of variable / condition over you want to compute the ANOVA.\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n if not isinstance(data, dict):\n print ('Error: data must be a dict. Use dataRead function to import your excel data.')\n else:\n if not isinstance(subID, basestring):\n print ('Error: subID must be a string containing the name of the variable with the subjects ID.')\n else:\n if not isinstance(conditionName, basestring):\n print ('Error: conditionName must be a string containing the name of the condition analyzed in the'\n ' anova.')\n else:\n errorCount = 0\n for elem in range(len(measures)):\n if not isinstance(measures[elem], tuple):\n errorCount = errorCount + 1\n if errorCount != 0:\n print('Error: measures must contain tuples with a data variable and an associated condition.')\n else:\n Sub = namedtuple('Sub', ['Sub_id', 'measure', 'condition'])\n df = pt.DataFrame()\n for i in range(len(measures)):\n meas = data[measures[i][0]]\n for j in range(len(meas)):\n df.insert(Sub(data[subID][j], meas[j], measures[i][1])._asdict())\n aov = df.anova('measure', sub='Sub_id', wfactors=['condition'])\n table_matrix = [['Source', '', 'Type III SS', 'df', 'SM', 'F', '.Sig'],\n\n [conditionName, 'Sphericity Assumed', aov[('condition',)]['ss'], aov[('condition',)]['df'],\n aov[('condition',)]['mss'], aov[('condition',)]['F'], aov[('condition',)]['p']],\n\n ['', 'Greenhouse-Geiser', aov[('condition',)]['ss'], aov[('condition',)]['df_gg'],\n aov[('condition',)]['mss_gg'], aov[('condition',)]['F_gg'], aov[('condition',)]['p_gg']],\n\n ['', 'Hyunh-Feldt', aov[('condition',)]['ss'], aov[('condition',)]['df_hf'],\n aov[('condition',)]['mss_hf'], aov[('condition',)]['F_hf'], aov[('condition',)]['p_hf']],\n\n ['', 'Box', aov[('condition',)]['ss'], aov[('condition',)]['df_lb'], aov[('condition',)]['mss_lb'],\n aov[('condition',)]['F_lb'], aov[('condition',)]['p_lb']],\n\n ['Error(' + conditionName + ')', 'Sphericity Assumed', aov[('condition',)]['sse'], aov[('condition',)]['dfe'],\n aov[('condition',)]['mse'], '-', '--'],\n\n ['', 'Greenhouse-Geiser', aov[('condition',)]['sse'], aov[('condition',)]['dfe_gg'],\n aov[('condition',)]['mse_gg'], '-', '--'],\n\n ['', 'Hyunh-Feldt', aov[('condition',)]['sse'], aov[('condition',)]['dfe_hf'],\n aov[('condition',)]['mse_hf'], '-', '--'],\n\n ['', 'Box', aov[('condition',)]['sse'], aov[('condition',)]['dfe_lb'],\n aov[('condition',)]['mse_lb'], '-', '--']]\n table = PT(table_matrix[0])\n for row in range(1, len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n return table_matrix\n\n\n# -----POST HOC-TESTs---------------------------------------------------------------------------------------------------\n\n\ndef repMeasBonferroniCorrect(data, printSig, *measures):\n '''This function computes the Bonferroni correction for pairwise combination of measures\n from data dictionary.\n INPUT: data is the dictionary containing the data names and values (dict). printSig is\n a boolean variable, True: the function only prints the significative results, False:\n the function prints all the values (bool). *measures contain all the variables to\n compute the pairwise tests (strings).\n OUTPUT: The function prints a table in the terminal containing all the tests computed.'''\n if not isinstance(data, dict):\n print ('Error: data must be a dict. Use dataRead function to import your excel data.')\n else:\n if not isinstance(printSig, bool):\n print ('Error: printSig must be a bool. True: the function only prints the siginificative results/ False: '\n 'the function prints all the results.')\n else:\n n = len(measures)\n results = OrderedDict()\n for i in range(0, len(measures)):\n results[measures[i]] = []\n for j in range(0, len(measures)):\n if j != i:\n res = stats.ttest_rel(data[measures[i]], data[measures[j]])\n results[measures[i]].append([measures[j], res])\n table_matrix = [['Bonferroni correction', 'Pairwise T-test', 'Test Statistic', 'p-Value']]\n if printSig:\n m = results.keys()\n for k in range(len(m)):\n count = 0\n for t in range(len(results[m[k]])):\n pVal = results[m[k]][t][1][1]\n if pVal < (0.05 / n):\n count = count + 1\n if count == 1:\n table_matrix.append([m[k], results[m[k]][t][0], results[m[k]][t][1][0], results[m[k]][t][1][1]])\n else:\n table_matrix.append(['', results[m[k]][t][0], results[m[k]][t][1][0], results[m[k]][t][1][1]])\n table_matrix.append(['Sig if p-Value < ' + str(0.05 / n), '-', '--', '---'])\n else:\n m = results.keys()\n for k in range(len(m)):\n for t in range(len(results[m[k]])):\n if t == 0:\n table_matrix.append(\n [m[k], results[m[k]][t][0], results[m[k]][t][1][0], results[m[k]][t][1][1]])\n else:\n table_matrix.append(\n ['', results[m[k]][t][0], results[m[k]][t][1][0], results[m[k]][t][1][1]])\n table_matrix.append(['Sig if p-Value < ' + str(0.05 / n), '-', '--', '---'])\n table = PT(table_matrix[0])\n for row in range(1, len(table_matrix)):\n table.add_row(table_matrix[row])\n print table\n return table_matrix\n"
}
] | 2 |
koegs30/python-challenge
|
https://github.com/koegs30/python-challenge
|
be1f339f83ec91b72c9111bdcd369fe4a4b4e726
|
34019263f789c4a1ae5024fefa2df7923016acf6
|
da2ef2780b53a0c50175e2e7eaa6bcd14b50b1d1
|
refs/heads/master
| 2020-11-24T00:54:40.531956 | 2019-12-30T21:41:16 | 2019-12-30T21:41:16 | 227,891,774 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6269503831863403,
"alphanum_fraction": 0.6319149136543274,
"avg_line_length": 30.35555648803711,
"blob_id": "fb490fd4d672238335433761fac0f9e34bdd72b3",
"content_id": "480005f46c6008a55a3024e5ad892f419805a81b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1410,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 45,
"path": "/PyPoll/PyPoll.final.py",
"repo_name": "koegs30/python-challenge",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\n\npoll_csv = os.path.join('Resources','election_data.csv')\n\nwith open(poll_csv,newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n csv_header = next(csvreader)\n\n poll_list = list(csvreader)\n\n# split nested list into three lists\nvoterID_list, county_list, candidate_list = map(list, zip(*poll_list))\n\ntotal_votes = len(voterID_list)\nprint(\"Election Results\")\nprint(\"----------------------------\")\nprint(\"Total Votes: \"+ str(total_votes))\nprint(\"----------------------------\")\n\n# determine list of candidates who received votes\nseen = set()\nuniq = []\nfor x in candidate_list:\n if x not in seen:\n uniq.append(x)\n seen.add(x)\n\n# total number of votes each candidate received\ncandidate_count = [[candidate,candidate_list.count(candidate)] for candidate in set(candidate_list)]\n\n# percentage of total votes for each candidate\ncand_sorted = sorted(candidate_count, key = lambda x: x[1], reverse=True)\npercentage = float(0)\nfor (name,votes) in cand_sorted:\n percentage = (votes / total_votes)*100\n formatted_perc = '{:.3f}%'.format(percentage)\n print(str(name) + \": \" + str(formatted_perc) + \" (\" + str(votes) + \")\")\n\n# winner of election based on popular vote\nname_list, vote_list = map(list, zip(*cand_sorted))\nwinner = name_list[0]\nprint(\"----------------------------\")\nprint(\"Winner: \" + str(winner))\nprint(\"----------------------------\")"
},
{
"alpha_fraction": 0.6969696879386902,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 10,
"blob_id": "131a2e79bd505dd12dbe7e1f4f889d833539ad3a",
"content_id": "e194f11b2e1bf943ab2b5f61750073df35da881c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 3,
"path": "/README.md",
"repo_name": "koegs30/python-challenge",
"src_encoding": "UTF-8",
"text": "# python-challenge\n\n# Homework 3\n"
},
{
"alpha_fraction": 0.6183544397354126,
"alphanum_fraction": 0.6227847933769226,
"avg_line_length": 31.26530647277832,
"blob_id": "ee1cf41cc3f261902cf3bbe4347ca4d7d59798ec",
"content_id": "1ab402d2b52e7e1b310810ce88c3b5ed61483cd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1580,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 49,
"path": "/PyBank/PyBank.final.py",
"repo_name": "koegs30/python-challenge",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\n\nbank_csv = os.path.join('Resources', \"budget_data.csv\")\n\nwith open(bank_csv,newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n csv_header = next(csvreader)\n \n print(\"Financial Analysis\")\n print(\"--------------------------------\") \n\n# find total number of months included in dataset\n bank_list = list(csvreader)\n # print(bank_list)\n total_months = len(bank_list)\n print(\"Total Months: \" + str(total_months)) \n \n# find the net total of \"Profits/Losses\" over the entire period \nwith open(bank_csv,newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n sum_total = 0\n for row in csvreader:\n sum_total = sum(int(row[1]) for row in csvreader)\n formatted_sum = '${}'.format(sum_total)\n print(\"Total: \" + str(formatted_sum))\n\ndate_list, PL_list = map(list, zip(*bank_list))\n\nPL_list = list(map(int, PL_list))\n\ndiff = [j - i for i, j in zip(PL_list[: -1], PL_list[1 :])] \n\n# def Average(diff): \navg_change = float((sum(diff) / len(diff)))\navg_formatted = '${:.2f}'.format(avg_change)\nprint(\"Average Change: \" + str(avg_formatted))\n\nmax_increase = (max(diff))\nmax_inc_formatted = '${}'.format(max_increase)\n\ndate_inc = (date_list[diff.index(max(diff))+1])\nprint(\"Greatest Increase in Profits: \" + date_inc + \" \" + \"(\" + str(max_inc_formatted) +\")\")\nmax_decrease = (min(diff))\nmax_dec_formatted = '${}'.format(max_decrease)\n\ndate_dec = (date_list[diff.index(min(diff))+1])\nprint(\"Greatest Decrease in Profits: \" + date_dec + \" \" + \"(\" + str(max_dec_formatted) + \")\")"
}
] | 3 |
chrono-meter/katagami.py
|
https://github.com/chrono-meter/katagami.py
|
99177d0a714c6e2ad590129f9088460a7709b64c
|
1b1642729396ed94845bff88cabaad90cb49da2d
|
656ef361224f8256afa02e2a844437b04097ca8e
|
refs/heads/master
| 2020-06-07T12:58:45.455021 | 2014-09-24T05:20:25 | 2014-09-24T05:20:25 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.49779197573661804,
"alphanum_fraction": 0.5108403563499451,
"avg_line_length": 29.632902145385742,
"blob_id": "e41cd164958077e959e73bee6650ccfa1a5022e1",
"content_id": "c88c9460348023c539569d7ad21e1d604d6f4ad3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 45063,
"license_type": "no_license",
"max_line_length": 251,
"num_lines": 1471,
"path": "/katagami.py",
"repo_name": "chrono-meter/katagami.py",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nr\"\"\"katagami: a simple xml/html template library\n============================================\n\nThis library is one of many `Python templating libraries\n<http://wiki.python.org/moin/Templating>`_.\n\n\nFeatures\n--------\n * Based on XML's Processing instructions (`<?...?>`)\n * Simple features\n * Python script inside XML/HTML with any level indentation\n * `Inline Python expression`_\n * `Embed Python script`_\n * `Block structure`_\n * `Encoding detection`_\n * `Iteratable rendering`_\n * Supports both of Python 2 and Python 3\n * As fast as `mako <http://www.makotemplates.org/>`_\n\n\nExample\n-------\n\nMake a HTML string with `inline Python expression`_ and Python's `for` (`Block\nstructure`_)::\n\n >>> from katagami import render_string, dprint as print\n >>> print(render_string('''<html>\n ... <body>\n ... <? for name in names: {?>\n ... <p>hello, <?=name?></p>\n ... <?}?>\n ... </body>\n ... </html>''', {'names': ['world', 'python']}))\n <html>\n <body>\n <BLANKLINE>\n <p>hello, world</p>\n <BLANKLINE>\n <p>hello, python</p>\n <BLANKLINE>\n </body>\n </html>\n\n\nInline Python expression\n------------------------\n\nThis feature evaluates your inline expression and output to result::\n\n >>> print(render_string('''<html><body>\n ... <?='hello, world'?>\n ... </body></html>'''))\n <html><body>\n hello, world\n </body></html>\n\nBy the default, this example raises an exception, evaluated expression must be\n`str` (`unicode` in Python 2)::\n\n >>> print(render_string('''<html><body>\n ... <?=1?>\n ... </body></html>''')) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: Can't convert 'int' object to str implicitly\n\nSet the `cast_string` feature::\n\n >>> print(render_string('''<?py\n ... from katagami import cast_string\n ... ?><html><body>\n ... <?=1?>\n ... </body></html>'''))\n <html><body>\n 1\n </body></html>\n\nAlso set the `except_hook` feature::\n\n >>> print(render_string('''<?py\n ... from katagami import except_hook\n ... ?><html><body>\n ... <?=1?>\n ... </body></html>'''))\n <html><body>\n Can't convert 'int' object to str implicitly\n </body></html>\n\n\nEmbed Python script\n-------------------\n\nAll indentation will be arranged automatically::\n\n >>> print(render_string('''<html>\n ... <?py\n ... # It is a top level here. This works fine.\n ... if 1:\n ... msg = 'message from indented script'\n ... ?>\n ... <body>\n ... <p><?=msg?></p>\n ... <?py msg = 'message from single line script' # This works fine too. ?>\n ... <p><?=msg?></p>\n ... <? if 1: {?>\n ... <?py\n ... # Is is nested here. This also works fine.\n ... msg = 'message from nested indented script'\n ... ?>\n ... <p><?=msg?></p>\n ... <?}?>\n ... </body>\n ... </html>'''))\n <html>\n <BLANKLINE>\n <body>\n <p>message from indented script</p>\n <BLANKLINE>\n <p>message from single line script</p>\n <BLANKLINE>\n <BLANKLINE>\n <p>message from nested indented script</p>\n <BLANKLINE>\n </body>\n </html>\n\n\nBlock structure\n---------------\n\nIndentation with C-style block structure::\n\n >>> print(render_string('''<html>\n ... <body>\n ... <p>hello, \n ... <? try: {?>\n ... <?=name?>\n ... <?} except NameError: {?>\n ... NameError\n ... <?} else: {?>\n ... never output here\n ... <?}?>\n ... </p>\n ... </body>\n ... </html>'''))\n <html>\n <body>\n <p>hello, \n <BLANKLINE>\n <BLANKLINE>\n NameError\n <BLANKLINE>\n </p>\n </body>\n </html>\n\nNote\n~~~~\n\n * '<? }' and '{ ?>' are wrong. Don't insert space. '<?}' and '{?>' are correct.\n * Ending colon (':') is required.\n * Block closing '<?}?>' is required.\n\n\nIteratable rendering\n--------------------\n\nRender with iteration::\n\n >>> renderer = render_string('''<html><body>\n ... <p>hello, <?= name ?></p>\n ... </body></html>''', {'name': 'world'}, flags=returns_iter)\n >>> print(list(renderer))\n ['<html><body>\\n <p>hello, ', 'world', '</p>\\n </body></html>']\n\n\nEncoding detection\n------------------\n\nEncoding will be detected automatically::\n\n >>> print(render_string(b'''<html>\n ... <head><meta charset=\"shift-jis\"></head>\n ... <body>\\x93\\xfa\\x96{\\x8c\\xea</body>\n ... </html>'''))\n <html>\n <head><meta charset=\"shift-jis\"></head>\n <body>\\u65e5\\u672c\\u8a9e</body>\n </html>\n\nSupported formats:\n\n * <?xml encoding=\"ENCODING\"?>\n * <meta charset=\"ENCODING\">\n * <meta http-equiv=\"Content-Type\" content=\"MIMETYPE; ENCODING\">\n\n\nTips\n----\n\nGet the translated result::\n\n >>> renderer = render_string('''<html><body>\n ... <p><?= name ?></p>\n ... </body></html>''',\n ... flags=returns_renderer)\n >>> print(renderer.script)\n __file__ = \"<template-script#0>\"\n __encoding__ = \"utf-8\"\n def __main__():\n yield \"<html><body>\\n <p>\"\n # -*- line 2, column 7 -*-\n yield name\n yield \"</p>\\n </body></html>\"\n\nHistory\n-------\n\n * 2.1.0 remove caching, remove i18n, fix bug, add `wheezy.web.templates` support\n * 2.0.2 fix empty template error, change behavior of feature flags import\n * 2.0.1 improve backward compatibility of the test\n * 2.0.0 change a lot and add some features\n * 1.1.0 change api, add except_handler, add shorthand of gettext (<?_message?>),\n some fixes\n * 1.0.3 fix ignoring `encoding` argument, fix indent bug, add `renderString`\n * 1.0.2 improve doctest compatibility, some fixes\n * 1.0.1 fix bugs, docs, speed\n * 1.0.0 remove backward compatibility\n\"\"\"\n\ffrom __future__ import print_function, unicode_literals, division\n\n__version__ = '2.1.0'\n__author__ = __author_email__ = '[email protected]'\n__license__ = 'PSF'\n__url__ = 'http://pypi.python.org/pypi/katagami'\n# http://pypi.python.org/pypi?%3Aaction=list_classifiers\n__classifiers__ = [i.strip() for i in '''\\\n Development Status :: 4 - Beta\n Intended Audience :: Developers\n License :: OSI Approved :: Python Software Foundation License\n Operating System :: OS Independent\n Programming Language :: Python :: 2.7\n Programming Language :: Python :: 3\n Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries\n Topic :: Software Development :: Libraries :: Python Modules\n Topic :: Text Processing :: Markup :: HTML\n Topic :: Text Processing :: Markup :: XML\n '''.strip().splitlines()]\n #Development Status :: 5 - Production/Stable\n\nimport sys\nimport traceback\nimport os\nimport os.path\nimport re\nimport io\nimport tokenize\nimport pprint\nimport unicodedata\nimport logging; logger = logging.getLogger(__name__); del logging\nimport unittest\n\n\n__all__ = (\n 'render_file',\n 'render_string',\n 'render_resource',\n 'returns_bytes',\n 'returns_iter',\n 'returns_renderer',\n )\n\n\n\f#\n# constants\n#\nfeatures = (\n 'cast_string',\n 'except_hook',\n )\nTAB = ' '\nPREFIX, SUFFIX = '<?', '?>'\nreturns_bytes = 1\nreturns_iter = 2\nreturns_renderer = 4\ncast_string = 10\nexcept_hook = 20\nnotgiven = object()\n\n\n\f#\n# backward compatibility\n#\n# NOTE: PEP: 373, Extend Python 2.7 life till 2020.\nif sys.version < '2.7':\n raise RuntimeError('not supported version: %s' % sys.version)\nif sys.version < '3':\n # doctest compatibility with Python 2 and Python 3\n __doc__ = re.sub(\n \"Can't convert '(.*?)' object to str implicitly\",\n \"Can't convert '\\\\1' object to unicode implicitly\",\n __doc__)\n BytesType = str\n StringType = unicode\n def next(generator):\n return generator.next()\nelse:\n BytesType = bytes\n StringType = str\n\n\ndef Py_UNICODE_ISPRINTABLE(ch):\n \"\"\"Returns 1 for Unicode characters to be hex-escaped when repr()ed,\n 0 otherwise.\n All characters except those characters defined in the Unicode character\n database as following categories are considered printable.\n * Cc (Other, Control)\n * Cf (Other, Format)\n * Cs (Other, Surrogate)\n * Co (Other, Private Use)\n * Cn (Other, Not Assigned)\n * Zl Separator, Line ('\\u2028', LINE SEPARATOR)\n * Zp Separator, Paragraph ('\\u2029', PARAGRAPH SEPARATOR)\n * Zs (Separator, Space) other than ASCII space('\\x20').\n\n http://hg.python.org/releasing/3.4.1/file/ea310ca42bb2/Objects/unicodectype.c#l147\n \"\"\"\n return unicodedata.category(ch) not in ('Cc', 'Cf', 'Cs', 'Co', 'Cn', 'Zl',\n 'Zp', 'Zs')\n\n\ndef py3_repr_str(string):\n \"\"\"Python 3 str.__repr__\n http://hg.python.org/releasing/3.4.1/file/ea310ca42bb2/Objects/unicodeobject.c#l12289\n \"\"\"\n if sys.version >= '3':\n return repr(string)\n\n quote = '\\''\n if '\\'' in string:\n if '\"' in string:\n pass\n else:\n quote = '\"'\n\n result = quote\n\n for c in string:\n if c in (quote, '\\\\'):\n result += '\\\\' + c\n elif c == '\\t':\n result += '\\\\t'\n elif c == '\\n':\n result += '\\\\n'\n elif c == '\\r':\n result += '\\\\r'\n elif ord(c) < ord(' ') or ord(c) == 0x7f:\n result += '\\\\x%2.2x' % ord(c)\n elif ord(c) < 0x7f:\n result += c\n elif Py_UNICODE_ISPRINTABLE(c):\n result += c\n else:\n if ord(c) <= 0xff:\n result += '\\\\x%2.2x' % ord(c)\n elif ord(c) <= 0xffff:\n result += '\\\\u%4.4x' % ord(c)\n else:\n result += '\\\\U00%6.6x' % ord(c)\n\n result += quote\n\n return result\n\n\ndef py3_repr_bytes(bytes):\n \"\"\"Python 3 bytes.__repr__\n http://hg.python.org/releasing/3.4.1/file/ea310ca42bb2/Objects/bytesobject.c#l593\n \"\"\"\n if sys.version >= '3':\n return repr(bytes)\n\n # quote = '\"' if '\\'' in object else '\\''\n quote = b'\\''\n\n result = 'b' + quote\n\n for c in bytes:\n if c in (quote, b'\\\\'):\n result += '\\\\' + c\n elif c == b'\\t':\n result += '\\\\t'\n elif c == b'\\n':\n result += '\\\\n'\n elif c == b'\\r':\n result += '\\\\r'\n elif ord(c) < ord(b' ') or ord(c) >= 0x7f:\n result += '\\\\x%2.2x' % ord(c)\n else:\n result += c\n\n result += quote\n\n return result\n\n\nclass PrettyPrinter(pprint.PrettyPrinter):\n\n def format(self, object, context, maxlevels, level):\n if isinstance(object, StringType):\n return py3_repr_str(object), True, False\n elif isinstance(object, BytesType):\n return py3_repr_bytes(object), True, False\n\n return pprint._safe_repr(object, context, maxlevels, level)\n\n\ndef dprint(object):\n \"\"\"Print raw if object is string, else print repr-ed object with pprint.\n \"\"\"\n if isinstance(object, StringType):\n print(object)\n else:\n PrettyPrinter().pprint(object)\n\n\n\f#\n# utility functions\n#\n\ndef execcode(code, globals):\n \"\"\"This function resolves this problem in Python 2::\n SyntaxError: unqualified exec is not allowed in function it is a nested function\n \"\"\"\n exec(code, globals)\n\n\ndef literalize(s):\n # `unicode_escape` escapes '\\'' and '\\t' and '\\n' and '\\r' and '\\\\'.\n return '\"%s\"' % s.encode('unicode_escape').decode().replace('\"', '\\\\\"')\n\n\ndef decorate_attributes(**kwargs):\n \"\"\"attributes decorator\"\"\"\n def result(function):\n for i in kwargs.items():\n setattr(function, *i)\n return function\n return result\n\n\nclass PythonTokens(list):\n\n @classmethod\n def tokenize(cls, readline):\n return cls(tokenize.generate_tokens(readline))\n\n def untokenize(self, ignore_position=True):\n tokens = self\n if sys.version < '3':\n # NOTE: Is this bug of tokenize.untokenize() ?\n # http://bugs.python.org/issue?%40columns=id%2Cactivity%2Ctitle%2Ccreator%2Cassignee%2Cstatus%2Ctype&%40sort=-activity&%40filter=status&%40action=searchid&ignore=file%3Acontent&%40search_text=untokenize+&submit=search&status=-1%2C1%2C2%2C3\n tokens = [(tokenize.NL, '\\n', (1, 0), (1, 1), '\\n')] + tokens\n if ignore_position:\n tokens = (token[:2] for token in tokens)\n return tokenize.untokenize(tokens)\n\n @classmethod\n def from_string(cls, string):\n assert isinstance(string, StringType)\n\n return cls.tokenize(io.StringIO(string).readline)\n\n @classmethod\n def from_file(cls, file):\n try:\n import pathlib\n except ImportError:\n Path = type(None)\n else:\n Path = pathlib.Path\n\n if isinstance(file, Path):\n with file.open() as fp:\n return cls.tokenize(fp.readline)\n else:\n with open(file) as fp:\n return cls.tokenize(fp.readline)\n\n @property\n def linetokens(self):\n chunk = []\n for token in self:\n chunk.append(token)\n if token[0] in (tokenize.NEWLINE, tokenize.NL):\n yield chunk\n chunk = []\n\n if chunk:\n yield chunk\n\n def strip_comments(self):\n for token in self[:]:\n if token[0] == tokenize.COMMENT:\n self.remove(token)\n\n def set_indent(self, indent=''):\n r\"\"\"Justify indentation.\n\n >>> tokens = PythonTokens.from_string('''\n ... # comment\n ... a\n ... if 1:\n ... b\n ... ''')\n >>> tokens.set_indent()\n >>> dprint(tokens.untokenize())\n <BLANKLINE>\n # comment\n a \n if 1 :\n b \n <BLANKLINE>\n\n >>> tokens = PythonTokens.from_string('''\n ... # comment\n ... a\n ... if 1:\n ... b\n ... ''')\n >>> tokens.set_indent(TAB * 2)\n >>> dprint(tokens.untokenize())\n <BLANKLINE>\n # comment\n a \n if 1 :\n b \n <BLANKLINE>\n \"\"\"\n TokenInfo = getattr(tokenize, 'TokenInfo', lambda *a: a)\n first_indent = None\n\n for i, token in enumerate(self):\n # found firstmost indent\n if first_indent is None:\n if token[0] == tokenize.INDENT:\n first_indent = token[1]\n # NOTE: no tokenize.ENCODING in python2\n elif token[0] in (tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT):\n pass\n else:\n first_indent = ''\n\n # string = indent string starts from beggining of the line\n if token[0] == tokenize.INDENT:\n if first_indent:\n assert token[1].startswith(first_indent)\n token = TokenInfo(token[0],\n indent + token[1][len(first_indent):],\n *token[2:])\n else:\n token = TokenInfo(token[0], indent + token[1], *token[2:])\n\n self[i] = token\n\n if not first_indent:\n self.insert(0, (tokenize.INDENT, indent))\n\n def get_first_tokens(self):\n r\"\"\"Get the firstline tokens.\n\n >>> tokens = PythonTokens.from_string('''\n ... # comment\n ... \"docstring\"\n ... some_statement\n ... ''')\n >>> dprint(' '.join(token[1] for token in tokens.get_first_tokens()))\n some_statement\n \"\"\"\n found = False\n for token in self:\n # skip comment and docstring\n if token[0] in (tokenize.NL, tokenize.INDENT, tokenize.COMMENT,\n tokenize.STRING, ):\n pass\n elif token[0] == tokenize.NEWLINE:\n if found:\n break\n else:\n found = True\n yield token\n\n def get_encoding(self):\n raise NotImplementedError()\n\n\ndef get_encodings_from_content(bytes):\n r\"\"\"Search xml/html encoding and return it.\n\n >>> dprint(get_encodings_from_content(\n ... b'<?xml version=\"1.0\" encoding=\"UTF-8\"?>'))\n UTF-8\n >>> dprint(get_encodings_from_content(\n ... b\"<?xml version='1.0' encoding='UTF-8'?>\"))\n UTF-8\n >>> dprint(get_encodings_from_content(\n ... b'<?xml version=1.0 encoding=UTF-8?>'))\n UTF-8\n\n >>> dprint(get_encodings_from_content(b'<meta charset=\"UTF-8\">'))\n UTF-8\n >>> dprint(get_encodings_from_content(b\"<meta charset='UTF-8'>\"))\n UTF-8\n >>> dprint(get_encodings_from_content(b'<meta charset=UTF-8>'))\n UTF-8\n\n >>> dprint(get_encodings_from_content(\n ... b'<meta http-equiv=\"Content-Type\" content=\"text/html; ' \\\n ... b'charset=UTF-8\">'))\n UTF-8\n >>> dprint(get_encodings_from_content(\n ... b\"<meta http-equiv='Content-Type' content='text/html; \" \\\n ... b\"charset=UTF-8'>\"))\n UTF-8\n \"\"\"\n # TODO: see requests.utils.get_encodings_from_content\n encoding_patterns = (\n # <?xml encoding=\"utf-8\"?>\n (b'<\\?xml\\\\s+.*?encoding=[\"\\']?([^\\\\s\"\\']+)[\"\\']?.*?\\?>', ),\n # <meta charset=\"UTF-8\">\n # <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">\n (b'<meta\\\\s+.*?charset=[\"\\']?([^\\\\s\"\\']+)[\"\\']?.*?>', ),\n )\n\n if not isinstance(bytes, BytesType):\n bytes = bytes.encode('ascii', 'ignore')\n\n for pattern in encoding_patterns:\n s = bytes\n match = None\n for i in pattern:\n match = re.search(i, s, re.DOTALL | re.IGNORECASE)\n if not match:\n break\n s = match.group(0)\n if match and match.group(1):\n return match.group(1).decode()\n\n return 'utf-8'\n\n\n\f#\n# module functions\n#\n\nclass Translator(object):\n # TODO: subclass or wrap or extend or inherit template...\n _name_counter = 0\n\n def __init__(self, file):\n self._makescript(file)\n\n try:\n self.code = compile(self.script, self.name, 'exec')\n except SyntaxError as e:\n lineno, offset = self._find_original_pos(e.lineno, e.offset)\n new_exception = SyntaxError(\n '%s near the line' % e.msg, (\n e.filename,\n lineno,\n offset,\n self._template_body.splitlines()[lineno - 1],\n ))\n new_exception.__cause__ = e\n raise new_exception\n\n def __call__(self, context, flags=0):\n \"\"\"Execute the template script.\n\n * `context` -- dict. Execution namespace. Note that this argument is\n changed on rendering.\n * `flags` -- Change output behavior. This value is combination of\n returns_bytes or returns_iter.\n * `return` -- str or bytes or generator. See `flags`.\n \"\"\"\n result = self._exectamplate(context, flags)\n if flags & returns_iter:\n return result\n elif flags & returns_bytes:\n return BytesType().join(result)\n else:\n return StringType().join(result)\n\n def _makescript(self, file):\n \"\"\"make a script string from a template file\n \n * `file` -- file-like object\n \"\"\"\n # check argument\n if sys.version < '3':\n if not hasattr(file, 'read'):\n raise TypeError('%r is not supported type' % file)\n else:\n if not isinstance(file, io.IOBase):\n raise TypeError('%r is not supported type' % file)\n \n # read all\n template_body = file.read()\n\n # detect encoding\n encoding = getattr(file, 'encoding', '')\n if not encoding:\n try:\n encoding = get_encodings_from_content(template_body)\n except Exception:\n logger.debug('encoding detection error', exc_info=True)\n # check encoding registered in Python\n try:\n b'test string'.decode(encoding)\n except LookupError:\n encoding = ''\n if not encoding:\n encoding = sys.getdefaultencoding()\n if encoding == 'ascii':\n encoding = 'utf-8'\n\n # cast string\n if isinstance(template_body, BytesType):\n template_body = template_body.decode(encoding)\n\n # save variables\n if hasattr(file, 'name'):\n self.name = file.name\n else:\n self.name = '<template-script#%d>' % self._name_counter\n self._name_counter += 1\n self.encoding = encoding\n self.features = 0\n self._template_body = template_body\n\n # loop vars\n self._lines = []\n self._indent = []\n self._current_position = (1, 0)\n self._firstmost_executable = True\n last = 0\n pattern = re.compile(\n re.escape(PREFIX) + '(?P<body>.*?)' + re.escape(SUFFIX), re.DOTALL)\n\n for match in pattern.finditer(template_body):\n start, end = match.span()\n\n # get pos\n _ = template_body[:start].splitlines()\n self._current_position = (len(_), len(_[-1])) if _ else (1, 0)\n del _\n\n # leading chunk\n chunk = template_body[last:start]\n if chunk:\n self._appendline('yield ' + literalize(chunk))\n last = end\n\n # insert marker\n self._appendline(\n '# -*- line %d, column %d -*-' % self._current_position)\n\n # process PI\n chunk = match.group('body')\n\n for i in sorted(i for i in dir(self) if i.startswith('_handle_')):\n handler = getattr(self, i)\n if re.match(handler.pattern, chunk):\n handler(chunk)\n if getattr(handler, 'executable', True):\n self._firstmost_executable = False\n break\n\n # not supported <?...?>\n else:\n chunk = PREFIX + chunk + SUFFIX\n self._appendline('yield ' + literalize(chunk))\n\n # trailing chunk\n chunk = template_body[last:]\n if chunk:\n self._appendline('yield ' + literalize(chunk))\n\n # check remaining indentation\n if self._indent:\n lineno, offset = self._indent[-1]\n raise IndentationError(\n 'brace is not closed', (\n self.name,\n lineno,\n offset,\n self._template_body.splitlines()[lineno - 1],\n ))\n\n # make a script\n prefix = [\n '__file__ = %s' % literalize(self.name),\n # '__name__ = \"__main__\"',\n '__encoding__ = %s' % literalize(self.encoding),\n # make a code as function for `yield` and `return`\n 'def __main__():',\n ]\n if not self._lines:\n self._lines.insert(0, 'pass')\n self.script = '\\n'.join(prefix) + '\\n' \\\n + '\\n'.join(TAB + i for i in self._lines)\n\n # cleanup\n del self._lines\n assert not self._indent\n del self._indent\n del self._current_position\n del self._firstmost_executable\n\n def _exectamplate(self, context, flags=0):\n # see https://github.com/mitsuhiko/jinja2/blob/master/jinja2/debug.py\n # `Python 3.4.1 <http://hg.python.org/releasing/3.4/file/8671f89107c8/Python/traceback.c>`\n # _Py_DisplaySourceLine -> io.open\n # `Python 2.7.7 <http://hg.python.org/releasing/2.7.7/file/4b38a5a36536/Python/traceback.c>`\n # _Py_DisplaySourceLine -> fopen\n def _fix_error_pos(e):\n filename, lineno, funcname, _ \\\n = traceback.extract_tb(sys.exc_info()[2])[-1]\n\n if filename == self.name and funcname == '__main__':\n lineno, offset = self._find_original_pos(lineno)\n new_exception = type(e)(*e.args)\n new_exception.__cause__ = e\n code = compile('\\n' * (lineno - 1) + 'raise new_exception',\n self.name, 'exec')\n execcode(code, locals())\n\n # python2 doesn't allow using return and yield in same function\n execcode(self.code, context)\n\n executor = context['__main__']()\n # TODO: module['__main__'](**context) ?\n if executor is None: # The template is empty or that has only scripts.\n raise StopIteration\n\n # run (iterate) template code and fetch string chunks\n try:\n value = notgiven\n while 1:\n if value is notgiven:\n try:\n value = next(executor)\n except Exception as e:\n _fix_error_pos(e)\n raise\n\n # TODO: handle generator type\n if not isinstance(value, StringType):\n if self.features & cast_string:\n f = executor.gi_frame\n if f and '__cast_string__' in f.f_locals:\n value = f.f_locals['__cast_string__'](value)\n elif f and '__cast_string__' in f.f_globals:\n value = f.f_globals['__cast_string__'](value)\n else:\n value = StringType(value)\n else:\n continue\n else:\n try:\n value = executor.throw(\n TypeError,\n 'Can\\'t convert \\'%s\\' object to %s implicitly' % (\n type(value).__name__, StringType.__name__))\n except Exception as e:\n _fix_error_pos(e)\n raise\n\n if flags & returns_bytes:\n value = value.encode(self.encoding)\n yield value\n\n value = notgiven\n\n except StopIteration:\n pass\n\n finally:\n executor.close()\n\n def _appendline(self, line):\n self._lines.append(TAB * len(self._indent) + line)\n\n def _embedscript(self, script, posmarker=True):\n tokens = PythonTokens.from_string(script)\n\n if posmarker:\n _tokens = tokens\n tokens = PythonTokens()\n\n for line in _tokens.linetokens:\n pos = line[0][2]\n if pos[0] == 1:\n pos = self._current_position[0] + pos[0] - 1, \\\n self._current_position[1] + pos[1]\n else:\n pos = self._current_position[0] + pos[0] - 1, pos[1]\n tokens.append((tokenize.COMMENT,\n '# -*- line %d, column %d -*-' % pos))\n tokens.append((tokenize.NL, '\\n'))\n tokens.extend(line)\n\n tokens.set_indent(TAB * len(self._indent))\n self._lines.extend(tokens.untokenize().splitlines())\n\n def _find_original_pos(self, lineno, column=0):\n # find markers\n pos = (lineno, 0)\n pattern = re.compile(\n '\\s*# -\\*- line (?P<line>\\d+), column (?P<column>\\d+) -\\*-\\s*')\n\n for currrent_lineno, line in enumerate(self.script.splitlines()):\n currrent_lineno += 1 # lineno starts from 1\n matched = pattern.match(line)\n if matched:\n pos = (int(matched.group('line')),\n int(matched.group('column')))\n if currrent_lineno >= lineno:\n return pos\n\n raise ValueError()\n\n # <?=...?>\n @decorate_attributes(pattern='^=')\n def _handle_inline_expression(self, chunk):\n r\"\"\"Inline Python expression.\n\n >>> dprint(render_string('hello, <?=name?>', {'name': 'world'}))\n hello, world\n\n >>> dprint(render_string('hello, <?=name # hello ?>', {'name': 'world'}))\n hello, world\n\n >>> dprint(render_string('hello, <?= # comment\\nname # hello ?>', {'name': 'world'}))\n hello, world\n\n\n Without cast_string, except_hook:\n >>> dprint(render_string('''\n ... hello, <?=name?>\n ... ''', {'name': 1}).strip()) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: ...\n\n With cast_string:\n >>> dprint(render_string('''<?py\n ... from katagami import cast_string\n ... ?>\n ... hello, <?=name?>\n ... ''', {'name': 1}).strip())\n hello, 1\n\n With except_hook:\n >>> dprint(render_string('''<?py\n ... from katagami import except_hook\n ... ?>\n ... hello, <?=name?>\n ... ''', {'name': 1}).strip()) # doctest:+ELLIPSIS\n hello, Can't convert 'int' object to ... implicitly\n\n\n Customize cast_string behavior:\n >>> dprint(render_string('''<?py\n ... from katagami import cast_string\n ... def __cast_string__(o):\n ... \"__cast_string__ must be return str (or unicode in Python 2)\"\n ... return '[%s]' % o\n ... ?>\n ... hello, <?=name?>\n ... ''', {'name': 1}).strip())\n hello, [1]\n\n Give __cast_string__ via context:\n >>> dprint(render_string('''<?py\n ... from katagami import cast_string\n ... ?>\n ... hello, <?=name?>\n ... ''',\n ... {'name': 1, '__cast_string__': lambda o: '<%s>' % o}).strip())\n hello, <1>\n\n Give __cast_string__ via default_context:\n >>> default_context['__cast_string__'] = lambda o: '(%s)' % o\n >>> dprint(render_string('''<?py\n ... from katagami import cast_string\n ... ?>\n ... hello, <?=name?>\n ... ''', {'name': 1}).strip())\n hello, (1)\n >>> del default_context['__cast_string__']\n\n\n Curtomize except_hook behavior:\n >>> dprint(render_string('''<?py\n ... from katagami import except_hook\n ... def __except_hook__(typ, val, tb):\n ... \"__except_hook__ must be return str (or unicode in Python 2)\"\n ... return '%s catched' % typ.__name__\n ... ?>\n ... hello, <?=name?>\n ... ''', {'name': 1}).strip())\n hello, TypeError catched\n\n Give __except_hook__ via context:\n >>> dprint(render_string('''<?py\n ... from katagami import except_hook\n ... ?>\n ... hello, <?=name?>\n ... ''',\n ... {'name': 1, '__except_hook__': lambda t, v, tb: str(v)}).strip()) # doctest:+ELLIPSIS\n hello, Can't convert 'int' object to ... implicitly\n\n Give __except_hook__ via default_context:\n >>> default_context['__except_hook__'] = lambda t, v, tb: str(v)\n >>> dprint(render_string('''<?py\n ... from katagami import except_hook\n ... ?>\n ... hello, <?=name?>\n ... ''', {'name': 1}).strip()) # doctest:+ELLIPSIS\n hello, Can't convert 'int' object to ... implicitly\n >>> del default_context['__except_hook__']\n \"\"\"\n # sanitize expression\n tokens = PythonTokens.from_string(chunk[1:])\n tokens.strip_comments()\n expr = tokens.untokenize().strip()\n\n # except_hook is enabled\n if self.features & except_hook:\n self._embedscript('''\n try:\n yield %s\n except:\n if '__except_hook__' in locals():\n yield locals()['__except_hook__'](\n *__import__('sys').exc_info())\n elif '__except_hook__' in globals():\n yield globals()['__except_hook__'](\n *__import__('sys').exc_info())\n else:\n yield %s(__import__('sys').exc_info()[1])\n ''' % (expr, StringType.__name__), posmarker=False)\n\n # normal mode, except_hook is disabled\n else:\n self._appendline('yield ' + expr)\n\n # <?py...?>\n @decorate_attributes(pattern='^py')\n def _handle_embed_script(self, chunk):\n r\"\"\"Embed Python script.\n\n Simple:\n >>> dprint(render_string('''\n ... <?py\n ... name = 'world'\n ... ?>\n ... hello, <?=name?>\n ... ''').strip())\n hello, world\n\n Simple, different level indentation:\n >>> dprint(render_string('''\n ... <?py\n ... name = 'joe'\n ... ?>\n ... <?py\n ... name = 'world'\n ... ?>\n ... hello, <?=name?>\n ... ''').strip())\n hello, world\n\n Nested with statement:\n >>> dprint(render_string('''\n ... <? if 1: {?>\n ... <?py\n ... name = 'world'\n ... ?>\n ... <?}?>\n ... hello, <?=name?>\n ... ''').strip())\n hello, world\n\n Get feature flags from top of the embedded script:\n >>> render_string('''<?py\n ... # comment\n ... \"docstring\"\n ... from katagami import cast_string, except_hook\n ... ?>\n ... ''', flags=returns_renderer).features\n 30\n >>> render_string('''<html><?py\n ... # comment\n ... \"docstring\"\n ... from katagami import cast_string, except_hook\n ... ?></html>\n ... ''', flags=returns_renderer).features\n 30\n \"\"\"\n # top of the template and first-line is `from katagami import ***`\n if self._firstmost_executable:\n firstline = list(\n PythonTokens.from_string(chunk[2:]).get_first_tokens())\n prefix = ' '.join(i[1] for i in firstline[:3])\n if prefix == 'from %s import' % __name__:\n for token in firstline[3:]:\n if token[0] == tokenize.NAME and token[1] in self.features:\n self.features |= globals()[token[1]]\n\n self._embedscript(chunk[2:])\n\n # <?}...{?>\n @decorate_attributes(pattern='(^}|.*{$)')\n def _handle_block(self, chunk):\n r\"\"\"Bridge Python and XML by brace.\n\n Simple, if-else:\n >>> dprint(render_string('''\n ... <? if 1: {?>\n ... hello, world\n ... <?} else: {?>\n ... hidden area\n ... <?}?>\n ... ''').strip())\n hello, world\n\n Nested:\n >>> dprint(render_string('''\n ... <? if 1: {?>\n ... <? if 1: {?>\n ... hello, world\n ... <?}?>\n ... <?}?>''').strip())\n hello, world\n\n Simple, try-except-finally:\n >>> dprint(re.sub('\\s+', ' ', render_string('''\n ... hello,\n ... <? try: {?>\n ... <?=name?>\n ... <?} except NameError: {?>\n ... unknown\n ... <?} finally: {?>\n ... !\n ... <?}?>\n ... ''').strip()))\n hello, unknown !\n \"\"\"\n if chunk.startswith('}'):\n chunk = chunk[1:]\n try:\n self._indent.pop()\n except LookupError:\n lineno, offset = self._current_position\n raise IndentationError(\n 'brace is not started', (\n self.name,\n lineno,\n offset,\n self._template_body.splitlines()[lineno - 1],\n ))\n\n indent = None\n if chunk.endswith('{'):\n chunk = chunk[:-1]\n indent = self._current_position\n\n chunk = chunk.strip()\n if chunk:\n assert chunk.split()[0] not in ('def', 'class')\n self._appendline(chunk)\n\n if indent:\n self._indent.append(indent)\n\n # <?\\...?>\n @decorate_attributes(pattern='^\\\\\\\\', executable=False)\n def _handle_escape(self, chunk):\n r\"\"\"Escape XML PIs.\n\n >>> dprint(render_string('<?\\py \"hello, world\"?>'))\n <?py \"hello, world\"?>\n \"\"\"\n self._appendline('yield ' + literalize(PREFIX + chunk[1:] + SUFFIX))\n\n\n\f#\n# module globals\n#\ndefault_translator = Translator\ndefault_context = {\n # '__except_hook__': function(type, value, traceback) -> 'repr-ed error',\n # '__cast_string__': function(any_object) -> 'repr-ed object',\n # 'escape': xml.sax.saxutils.escape,\n # 'quoteattr': xml.sax.saxutils.quoteattr,\n }\n\n\ndef render_file(file_or_filename, context={}, flags=0):\n r\"\"\"Render a file-like object or a file.\n\n * `file_or_filename` -- file-like object or filename\n * `context` -- variables for template execution context\n * `flags` -- Combination of these values: returns_bytes, returns_iter,\n returns_renderer.\n \"\"\"\n if isinstance(file_or_filename, StringType):\n with open(file_or_filename, 'rb') as fp:\n template = default_translator(fp)\n else:\n template = default_translator(file_or_filename)\n\n if flags & returns_renderer:\n assert not context\n return template\n\n return template(dict(default_context, **context), flags)\n\n\ndef render_string(string_or_bytes, context={}, flags=0):\n r\"\"\"Render a string or a bytes.\n\n\n >>> tmpl = '<?=\"\\u3053\\u3093\\u306b\\u3061\\u306f\"?>'\n\n String-in, string-out:\n >>> dprint(render_string(tmpl))\n \\u3053\\u3093\\u306b\\u3061\\u306f\n\n Bytes-in, string-out:\n >>> dprint(render_string(tmpl.encode('utf-8')))\n \\u3053\\u3093\\u306b\\u3061\\u306f\n\n\n String-in, Bytes-out:\n >>> dprint(render_string(tmpl, flags=returns_bytes))\n b'\\xe3\\x81\\x93\\xe3\\x82\\x93\\xe3\\x81\\xab\\xe3\\x81\\xa1\\xe3\\x81\\xaf'\n\n Bytes-in, bytes-out:\n >>> dprint(render_string(tmpl.encode('utf-8'), flags=returns_bytes))\n b'\\xe3\\x81\\x93\\xe3\\x82\\x93\\xe3\\x81\\xab\\xe3\\x81\\xa1\\xe3\\x81\\xaf'\n\n String-in, string-iterator-out:\n >>> dprint(list(render_string(tmpl, flags=returns_iter)))\n ['\\u3053\\u3093\\u306b\\u3061\\u306f']\n\n String-in, bytes-iterator-out:\n >>> dprint(list(render_string(tmpl, flags=returns_bytes | returns_iter)))\n [b'\\xe3\\x81\\x93\\xe3\\x82\\x93\\xe3\\x81\\xab\\xe3\\x81\\xa1\\xe3\\x81\\xaf']\n\n Bytes-in, string-iterator-out:\n >>> dprint(list(render_string(tmpl.encode('utf-8'), flags=returns_iter)))\n ['\\u3053\\u3093\\u306b\\u3061\\u306f']\n\n Bytes-in, bytes-iterator-out:\n >>> dprint(list(render_string(tmpl.encode('utf-8'),\n ... flags=returns_bytes | returns_iter)))\n [b'\\xe3\\x81\\x93\\xe3\\x82\\x93\\xe3\\x81\\xab\\xe3\\x81\\xa1\\xe3\\x81\\xaf']\n\n\n >>> renderer = render_string(tmpl, flags=returns_renderer)\n >>> isinstance(renderer, (StringType, BytesType))\n False\n >>> context = {}\n >>> bool(context)\n False\n >>> dprint(renderer(context))\n \\u3053\\u3093\\u306b\\u3061\\u306f\n >>> bool(context)\n True\n \"\"\"\n if isinstance(string_or_bytes, StringType):\n string_or_bytes = io.StringIO(string_or_bytes)\n elif isinstance(string_or_bytes, BytesType):\n string_or_bytes = io.BytesIO(string_or_bytes)\n else:\n raise TypeError(string_or_bytes)\n\n template = default_translator(string_or_bytes)\n\n if flags & returns_renderer:\n assert not context\n return template\n\n return template(dict(default_context, **context), flags)\n\n\ndef render_resource(package_or_requirement, resource_name, context={}, flags=0):\n r\"\"\"Render a package resource via `pkg_resources.resource_stream()`.\n \"\"\"\n import pkg_resources\n\n template = default_translator(\n pkg_resources.resource_stream(package_or_requirement, resource_name))\n\n if flags & returns_renderer:\n assert not context\n return template\n\n return template(dict(default_context, **context), flags)\n\n\n# TODO: webob.dec.wsgify(TemplateApp(filename, **response_kwargs))\n\n\n\fclass KatagamiTemplate(object):\n\n def __init__(self, path=None, suffix='.html', flags=0,\n default_context=default_context, cache=None,\n update_on_modified=True):\n assert not (flags & returns_renderer)\n self.path = path\n self.suffix = suffix\n self.flags = flags\n self.default_context = default_context\n self.cache = cache\n self.update_on_modified = update_on_modified\n\n def _get_template_filename(self, template_name):\n return os.path.join(self.path, template_name + self.suffix)\n\n def _create_template(self, template_name):\n import katagami\n\n filename = self._get_template_filename(template_name)\n mtime = os.stat(filename).st_mtime \\\n if self.update_on_modified else -1\n\n if self.cache is not None and template_name in self.cache \\\n and self.cache[template_name].mtime >= mtime:\n result = self.cache[template_name]\n\n else:\n with open(filename, 'rb') as fp:\n result = katagami.Translator(fp)\n\n if self.cache is not None:\n result.mtime = mtime\n self.cache[template_name] = result\n\n return result\n\n def __call__(self, template_name, kwargs):\n \"\"\"Template contract is any callable of the following form:\n\n def render_template(self, template_name, **kwargs):\n return 'unicode string'\n\n https://pythonhosted.org/wheezy.web/userguide.html#contract\n \"\"\"\n\n template = self._create_template(template_name)\n\n context = {}\n context.update(self.default_context)\n context.update(kwargs)\n\n # get wheezy.web.handlers.base.BaseHandler.render_template\n # if 'render_template' not in context:\n # try:\n # context['render_template'] \\\n # = context['path_for'].__self__.render_template\n # except (LookupError, AttributeError):\n # pass\n\n return template(context, self.flags)\n\n\n# try:\n# import wheezy.web.templates\n# except ImportError:\n# pass\n# else:\n# wheezy.web.templates.KatagamiTemplate = KatagamiTemplate\n\n\nclass Test(unittest.TestCase):\n\n def render(self, chunk, lines=1, columns=0):\n return render_string(('\\n' * (lines - 1)) + (' ' * columns) + chunk)\n\n def test_empty_template(self):\n self.assertEqual(render_string(''), '')\n\n def test_syntax_error_position_mod(self):\n with self.assertRaises(SyntaxError) as cx:\n self.render('<?py syntax error ?>', 10, 10)\n self.assertEqual(cx.exception.lineno, 10)\n self.assertEqual(cx.exception.offset, 10)\n\n # missing brace closing\n with self.assertRaises(IndentationError) as cx:\n self.render('<? if 1: {?>', 5, 9)\n self.assertEqual(cx.exception.lineno, 5)\n self.assertEqual(cx.exception.offset, 9)\n\n # brace closing without opening\n with self.assertRaises(IndentationError) as cx:\n self.render('<?} elif 1: {?>', 11, 3)\n self.assertEqual(cx.exception.lineno, 11)\n self.assertEqual(cx.exception.offset, 3)\n\n def test_error_position_mod(self):\n try:\n self.render('<?= 1 ?>', 3, 7)\n except TypeError:\n filename, lineno, funcname, _ \\\n = traceback.extract_tb(sys.exc_info()[2])[-1]\n self.assertEqual(lineno, 3)\n\n\n\fif __name__ == '__main__':\n # register: setup.py check sdist register upload\n # upload: setup.py check sdist upload\n import __main__\n import os.path\n import doctest\n import distutils.core\n\n __main__.__name__ = os.path.splitext(os.path.basename(__file__))[0]\n sys.modules[__main__.__name__] = __main__\n target = __main__\n\n if 'check' in sys.argv:\n unittest.main(argv=sys.argv[:1], exit=False)\n doctest.testmod()\n try:\n import docutils.core\n except ImportError:\n pass\n else:\n s = docutils.core.publish_string(target.__doc__, writer_name='html')\n with open(os.path.splitext(__file__)[0] + '.html', 'wb') as fp:\n fp.write(s)\n\n # http://docs.python.org/3/distutils/apiref.html?highlight=setup#distutils.core.setup\n distutils.core.setup(\n name=target.__name__,\n version=target.__version__,\n description=target.__doc__.splitlines()[0],\n long_description=target.__doc__,\n author=target.__author__,\n author_email=target.__author_email__,\n url=target.__url__,\n classifiers=target.__classifiers__,\n license=target.__license__,\n py_modules=[target.__name__, ],\n )\n\n\n"
}
] | 1 |
JfGitHub31/dev-tool
|
https://github.com/JfGitHub31/dev-tool
|
73956bbffa8d545ff547596eeacdb5af7e50580d
|
0fef1fd57b5ab68a302758c141969cbf7e1238d6
|
9551a33ee97e31c97ebd1f59d0c007dad6717409
|
refs/heads/master
| 2020-06-03T06:54:45.784657 | 2019-06-12T09:42:23 | 2019-06-12T09:42:23 | 191,485,574 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4739130437374115,
"alphanum_fraction": 0.4763975143432617,
"avg_line_length": 22.769229888916016,
"blob_id": "406d3829dc13f6daa6d2755eb6afcf82e9efc025",
"content_id": "9177743561f17ca09916008fd5cd83cf3d74d4ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1644,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 65,
"path": "/svnrightscan/file3/srs-test.py",
"repo_name": "JfGitHub31/dev-tool",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport xlwt\r\nimport time\r\nimport configparser\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\n\r\n\r\ndef transform(arg):\r\n sec_ls = []\r\n opt_ls = []\r\n values_ls = []\r\n sec_a_ls = []\r\n\r\n conf = configparser.ConfigParser()\r\n conf.read(\"%s\" % arg, encoding=\"utf8\")\r\n sections = conf.sections()\r\n\r\n for sec in sections:\r\n _sec = \"[\" + sec + \"]\"\r\n sec_a_ls.append(sec)\r\n sec_ls.append(_sec)\r\n a = conf.options(sec)\r\n opt_ls.extend(a)\r\n opt_ls = list(set(opt_ls))\r\n\r\n for sec in sec_a_ls:\r\n for opt in opt_ls:\r\n try:\r\n values = conf.get(sec, opt)\r\n if values == \" \":\r\n values_ls.append(\" \")\r\n values_ls.append(values)\r\n except:\r\n values_ls.append(\"无\")\r\n\r\n ll = [values_ls[i:i + len(opt_ls)] for i in range(0, len(values_ls), len(opt_ls))]\r\n\r\n def collect():\r\n time_str = time.strftime(\"%Y/%m/%d\") + \" \" + time.strftime(\"%I:%M\")\r\n writer = pd.ExcelWriter(\"%s.xls\" % (arg + \"-report\"))\r\n df = DataFrame({\"扫描日期: %s\" % time_str: opt_ls})\r\n\r\n for i in range(len(sec_ls)):\r\n sec = sec_ls[i]\r\n df[sec] = ll[i]\r\n print(df)\r\n df.to_excel(writer, sheet_name='sheet1')\r\n writer.save()\r\n return collect\r\n\r\n\r\ndef main():\r\n try:\r\n arg = sys.argv[1]\r\n print(\"开始扫描.\")\r\n target = transform(arg)\r\n target()\r\n print(\"扫描完成.\")\r\n except:\r\n print(\"输入提示: srs [infile] [outfile]\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
},
{
"alpha_fraction": 0.5035461187362671,
"alphanum_fraction": 0.5106382966041565,
"avg_line_length": 10.75,
"blob_id": "fa8672c81a7ab6b1e2ec19082ac5dcff193aa629",
"content_id": "59c433aa57ea25557b4ce5585dc8b8c3a42865af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 12,
"path": "/svnrightscan/file1/test.py",
"repo_name": "JfGitHub31/dev-tool",
"src_encoding": "UTF-8",
"text": "import re\n\n\n# with open(\"VisualSVN-SvnAuthz.ini\") as f:\n# a = f.read()\n#\n# aa = re.findall(r\"\\[.*?\\]\", a)\n# print(len(aa))\n\nb = 4\n\na = b\n"
},
{
"alpha_fraction": 0.5069337487220764,
"alphanum_fraction": 0.5115562677383423,
"avg_line_length": 22.037036895751953,
"blob_id": "c3fe6c53a2f4fc0ff3045e044bc419d0ae1bf9fb",
"content_id": "dea0326bf7b66f8277f5de83730fe6d8c68703b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1298,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 54,
"path": "/svnrightscan/file1/src-test.py",
"repo_name": "JfGitHub31/dev-tool",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport configparser\r\nimport pandas as pd\r\nimport xlwt\r\nfrom pandas import DataFrame\r\n\r\n\r\ndef transform():\r\n sec_ls = []\r\n opt_ls = []\r\n values_ls = []\r\n sec_a_ls = []\r\n conf = configparser.ConfigParser()\r\n conf.read(\"VisualSVN-SvnAuthz.ini\", encoding=\"utf8\")\r\n sections = conf.sections()\r\n\r\n for sec in sections:\r\n\r\n _sec = \"[\" + sec + \"]\"\r\n sec_a_ls.append(sec)\r\n sec_ls.append(_sec)\r\n a = conf.options(sec)\r\n opt_ls.extend(a)\r\n opt_ls = list(set(opt_ls))\r\n # print(sec_ls, len(sec_ls))\r\n # print(opt_ls,len(opt_ls))\r\n\r\n for sec in sec_a_ls:\r\n for opt in opt_ls:\r\n try:\r\n values = conf.get(sec, opt)\r\n values_ls.append(values)\r\n except:\r\n values_ls.append(\"None\")\r\n\r\n ll = [values_ls[i:i + len(opt_ls)] for i in range(0, len(values_ls), len(opt_ls))]\r\n # print(ll,len(ll))\r\n\r\n def collect():\r\n writer = pd.ExcelWriter(\"123.xls\")\r\n df = DataFrame({\"opt_name\": opt_ls})\r\n\r\n for i in range(len(sec_ls)):\r\n sec = sec_ls[i]\r\n df[sec] = ll[i]\r\n print(df)\r\n\r\n df.to_excel(writer, sheet_name='sheet1')\r\n writer.save()\r\n return collect\r\n\r\n\r\ntarget = transform()\r\ntarget()\r\n"
},
{
"alpha_fraction": 0.4858548641204834,
"alphanum_fraction": 0.4975399672985077,
"avg_line_length": 22.22857093811035,
"blob_id": "e35d215bbc76c2f5ee409806954c6733cf0083b2",
"content_id": "e32aae2a9efa2407d6d0f184c8d12b2dfbae0567",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1698,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 70,
"path": "/svnrightscan/file2/srs.py",
"repo_name": "JfGitHub31/dev-tool",
"src_encoding": "UTF-8",
"text": "import sys,os\nimport configparser\nimport pandas as pd\nimport xlwt\nfrom pandas import DataFrame\n\n\n\"\"\"\n脚本运行\npython3 srs.py 参数, 结果生成一个excel表格\n注意: 脚本和文件在同一目录下\n\"\"\"\n\n\nclass SRS:\n sec_ls = []\n opt_ls = []\n values_ls = []\n\n def fun(self):\n arg = sys.argv[1]\n print(arg,type(arg))\n conf = configparser.ConfigParser()\n file_path = conf.get\n conf.read(\"%s\" % arg, encoding=\"utf8\")\n sections = conf.sections()\n\n for sec in sections:\n sec1 = \"[\" + sec + \"]\"\n self.sec_ls.append(sec1)\n a = conf.options(sec)\n self.opt_ls.extend(a)\n\n # print(sec_ls,len(sec_ls)) # 73个首选项\n opt1_ls = list(set(self.opt_ls))\n # print(opt1_ls,len(opt1_ls)) # 12 个配置参数\n\n for index in range(len(opt1_ls)):\n for sec in sections:\n try:\n values = conf.get(sec,opt1_ls[index])\n self.values_ls.append(values)\n except:\n self.values_ls.append(\"None\")\n ll = [self.values_ls[i:i + len(self.sec_ls)] for i in range(0, len(self.values_ls), len(self.sec_ls))]\n # print(ll)\n\n def run():\n writer = pd.ExcelWriter(\"2.xls\")\n df = DataFrame({\"sections_name\": self.sec_ls})\n for i in range(len(opt1_ls)):\n opt = opt1_ls[i]\n df[opt] = ll[i]\n print(df)\n\n df.to_excel(writer, sheet_name='sheet1')\n writer.save()\n return run\n\n\ndef main():\n srs = SRS()\n S = srs.fun()\n S()\n\nmain()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5580110549926758,
"alphanum_fraction": 0.5635359287261963,
"avg_line_length": 19.22222137451172,
"blob_id": "05e427460c457c0f829dfe15305df09720860875",
"content_id": "03012f4f363076a39edf4c876d6a632e62004052",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 9,
"path": "/svnrightscan/file3/test.py",
"repo_name": "JfGitHub31/dev-tool",
"src_encoding": "UTF-8",
"text": "# ecoding = utf8\nimport re\n\nimport time\n## dd/mm/yyyy格式\nprint (time.strftime(\"%Y/%m/%d\"))\nprint (time.strftime(\"%I:%M\"))\nprint(\"\", type(\"\"), len(\"\"))\nprint(\" \", type(\" \"), len(\" \"))"
}
] | 5 |
jlents/discover-flask
|
https://github.com/jlents/discover-flask
|
8b58aecf8b46b97329241975c537027ac5b542be
|
b7217189f57a104fbbc6caf058ccadf9ea7a0596
|
bb47cc44e311bc040dfeec9f81bec7ac2ed4d3c3
|
refs/heads/master
| 2018-01-09T15:31:50.067292 | 2017-01-14T17:09:38 | 2017-01-14T17:09:38 | 50,389,593 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7197309136390686,
"alphanum_fraction": 0.7197309136390686,
"avg_line_length": 26.875,
"blob_id": "fdbed25c79343f66cfb02e22542ccc394f912444",
"content_id": "3bae963bd7ac723b9703d81684085cf24deda2e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 16,
"path": "/db_create.py",
"repo_name": "jlents/discover-flask",
"src_encoding": "UTF-8",
"text": "from project import db\nfrom project.moels import BlogPost\n\n\n# create the db and the db tables\n# inits db based on schema defined in the models.py file\ndb.create_all()\n\n# insert\n# db.session.add(BlogPost(\"Good\", \"I'm good.\"))\n# db.session.add(BlogPost(\"Well\", \"I'm well.\"))\n# db.session.add(BlogPost(\"Flask\", \"discoverflask.com\"))\n# db.session.add(BlogPost(\"postgres\", \"we setup a local postgres instance.\"))\n\n# commit changes\ndb.session.commit()\n"
},
{
"alpha_fraction": 0.77173912525177,
"alphanum_fraction": 0.77173912525177,
"avg_line_length": 60.33333206176758,
"blob_id": "93a8a9058a940ce670e2280da9bd8a935b4e9026",
"content_id": "e146a5c3092a1af05e801daefe2ce4fdd0c32635",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 3,
"path": "/readme.md",
"repo_name": "jlents/discover-flask",
"src_encoding": "UTF-8",
"text": "[](https://travis-ci.org/jlents/discover-flask)\n\n#Just adding something for the sake of adding something.\n"
},
{
"alpha_fraction": 0.6963562965393066,
"alphanum_fraction": 0.7004048824310303,
"avg_line_length": 48.400001525878906,
"blob_id": "cbdcd08726715013d620b07ba5d3b51c41b103ef",
"content_id": "bc7d4fe03cf751e94cd104601fe93e9e71af676e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 10,
"path": "/sql.py",
"repo_name": "jlents/discover-flask",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\nwith sqlite3.connect(\"sample.db\") as connection:\n my_connection = connection.cursor()\n my_connection.execute(\"DROP TABLE posts\")\n # Only need \"\"\" for multi-line commands.\n # leaving them below for sake of this comment to make sense\n my_connection.execute(\"\"\"CREATE TABLE posts(title TEXT, description TEXT)\"\"\")\n my_connection.execute('INSERT INTO posts VALUES (\"Good\", \"I\\'m good\")')\n my_connection.execute('INSERT INTO posts VALUES (\"Well\", \"I\\'m well\")')\n"
},
{
"alpha_fraction": 0.5198675394058228,
"alphanum_fraction": 0.7019867300987244,
"avg_line_length": 15.777777671813965,
"blob_id": "45da542d0980b04d4e5057dbb10bc5e74f5b64c9",
"content_id": "d81e6a2a5d63bbbeefc85a3b3305a14b086f4961",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 36,
"path": "/requirements.txt",
"repo_name": "jlents/discover-flask",
"src_encoding": "UTF-8",
"text": "alembic==0.8.4\nappnope==0.1.0\nbcrypt==2.0.0\ncffi==1.5.0\ncoverage==4.0.3\ndecorator==4.0.6\nFlask==0.10.1\nFlask-Bcrypt==0.7.1\nFlask-Login==0.3.2\nFlask-Migrate==1.7.0\nFlask-Script==2.0.5\nFlask-SQLAlchemy==2.1\nFlask-Testing==0.4.2\nFlask-WTF==0.12\ngnureadline==6.3.3\ngunicorn==19.4.5\nipython==4.0.2\nipython-genutils==0.1.0\nitsdangerous==0.24\nJinja2==2.8\nMako==1.0.3\nMarkupSafe==0.23\npath.py==8.1.2\npexpect==4.0.1\npickleshare==0.5\npsycopg2==2.6.1\nptyprocess==0.5\npycparser==2.14\npython-editor==0.5\nsimplegeneric==0.8.1\nsix==1.10.0\nSQLAlchemy==1.0.11\ntraitlets==4.1.0\nWerkzeug==0.11.3\nwheel==0.24.0\nWTForms==2.1\n"
},
{
"alpha_fraction": 0.6470264196395874,
"alphanum_fraction": 0.6481277346611023,
"avg_line_length": 38.912086486816406,
"blob_id": "57ec35e588b533f2a3e8c2e1c019e9fa75408250",
"content_id": "ae2194676cb46e8f9508b3bf1f2f66c7404586f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3632,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 91,
"path": "/test.py",
"repo_name": "jlents/discover-flask",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom flask.ext.testing import TestCase\nfrom flask.ext.login import current_user\nfrom project import app, db\nfrom project.models import User, BlogPost\n# Here's a comment as well\n\nclass BaseTestCase(TestCase):\n \"\"\"A base test case.\"\"\"\n\n def create_app(self):\n app.config.from_object('config.TestConfig')\n return app\n\n def setUp(self):\n db.create_all()\n db.session.add(User(\"admin\", \"[email protected]\", \"admin\"))\n db.session.add(BlogPost(\"Test post\", \"This is a test. Only a test.\", 1))\n db.session.commit()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n\nclass FlaskTestCase(BaseTestCase):\n\n # Ensure that flask was set up correctly\n def test___index(self):\n response = self.client.get('/login', content_type='html/text')\n self.assertEqual(response.status_code, 200)\n\n # Ensure that login is required to view the welcome page\n def test___login_protected_welcome_page(self):\n response = self.client.get('/', follow_redirects=True)\n self.assertIn(b'Please log in to access this page.', response.data)\n\n # Ensure that login is required in order to logout\n def test___logout_requires_login(self):\n response = self.client.get('/logout', follow_redirects=True)\n self.assertIn(b'Please log in to access this page.', response.data)\n\n # Ensure that posts show up on the main page\n def test___posts_show_up_on_main_page(self):\n response = self.client.post(\n '/login',\n data=dict(username=\"admin\", password=\"admin\"),\n follow_redirects=True\n )\n self.assertIn(b'This is a test. Only a test.', response.data)\n\n\nclass UsersViewsTests(BaseTestCase):\n # Ensure that login page loads correctly\n def test___login_page_loads(self):\n response = self.client.get('/login', content_type='html/text')\n self.assertTrue(b'Please login' in response.data)\n\n # Ensure that login behaves as expected with proper credentials\n def test___correct_login(self):\n with self.client:\n response = self.client.post('/login', data=dict(username=\"admin\", password=\"admin\"), follow_redirects=True)\n self.assertIn(b'You were logged in', response.data)\n self.assertTrue(current_user.name == \"admin\")\n self.assertTrue(current_user.is_active)\n\n # Ensure that login behaves as expected with improper credentials\n def test___incorrect_login(self):\n response = self.client.post('/login', data=dict(username=\"ping\", password=\"pow\"), follow_redirects=True)\n self.assertIn(b'Invalid Credentials. Please try again', response.data)\n\n # Ensure that logout behaves as expected\n def test___correct_logout(self):\n # first logging in\n with self.client:\n self.client.post('/login', data=dict(username=\"admin\", password=\"admin\"), follow_redirects=True)\n response = self.client.get('/logout', follow_redirects=True)\n self.assertIn(b'You were logged out', response.data)\n self.assertFalse(current_user.is_active)\n\n # Test user registration\n def test___user_registration(self):\n with self.client:\n response = self.client.post('/register', data=dict(username=\"josh\", email=\"[email protected]\", password=\"joshua\", confirm=\"joshua\"), follow_redirects=True)\n self.assertIn(b'Welcome to Flask!', response.data)\n self.assertTrue(current_user.is_active)\n self.assertTrue(current_user.name == \"josh\")\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.7284595370292664,
"alphanum_fraction": 0.7310705184936523,
"avg_line_length": 24.53333282470703,
"blob_id": "df8539cd8a2855b97a8b468f32f42388ce401cbb",
"content_id": "7338e33dc7b02f2d5b79e5958661ea8beb3bcdb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 766,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 30,
"path": "/config.py",
"repo_name": "jlents/discover-flask",
"src_encoding": "UTF-8",
"text": "# default config\nimport os\n\n# Checkout the flask documentation for better practices with config\n\n\n# Ran this to set environment variable on heroku\n# heroku config:set APP_SETTINGS=config.ProductionConfig --remote heroku\n# We don't want our config settings in our repo for general security purposes\nclass BaseConfig(object):\n DEBUG = False\n # os.urandom(24) used to gen the key\n SECRET_KEY = ':M\\xe4\"2\\x8a\\x9dnWK\\x05D?z\\xde@+\\xcb\\x0f\\xcaF\\x1a\\x9f\\xc3'\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n\n\nclass TestConfig(BaseConfig):\n DEBUG = True\n TESTING = True\n WTF_CSRF_ENABLED = False\n SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'\n\n\nclass DevelopmentConfig(BaseConfig):\n DEBUG = True\n\n\nclass ProductionConfig(BaseConfig):\n DEBUG = False\n"
}
] | 6 |
paolopoli1980/missp
|
https://github.com/paolopoli1980/missp
|
e2121f2d03a91cec842f36ebcb85a5303ee9bf04
|
c786ac318327ca17fd338a36dc0abaeaff44721d
|
162e13955b7d1a730d898c35b9bee1547fb80178
|
refs/heads/master
| 2022-11-02T14:42:56.180060 | 2022-10-14T15:59:17 | 2022-10-14T15:59:17 | 71,396,684 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47362250089645386,
"alphanum_fraction": 0.5392731428146362,
"avg_line_length": 16.17021369934082,
"blob_id": "e9c45b9f07478fe5b06a6ad8126a7af166d4038b",
"content_id": "c71fbd80c84729d4cd0adb9af11e4350397700c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 47,
"path": "/v2in2017/ex10/make.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import random\r\nimport math\r\nf1=open(\"pos.aut\",\"w\")\r\nk=0\r\n\r\nfor i in range(20):\r\n\tx=random.uniform(-3+k,-1)\r\n\ty=random.uniform(1,3)\r\n\tz=random.uniform(0,0.5)\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\tf1.write(str(z)+str(\"\\n\"))\r\n\r\nfor i in range(20):\r\n\tx=random.uniform(1+k,3)\r\n\ty=random.uniform(1,3)\r\n\tz=random.uniform(0,0.5)\r\n\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\tf1.write(str(z)+str(\"\\n\"))\r\n \r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n\t\r\nf1=open(\"vinc.aut\",\"w\")\r\nfor i in range(30):\r\n\tz=i*0.1\t\r\n\tfor j in range(30):\r\n\t\ty=j*0.1\r\n\t\tx=0\r\n\t\tf1.write(str(x)+str(\"\\n\"))\r\n\t\tf1.write(str(y)+str(\"\\n\"))\r\n\t\tf1.write(str(z)+str(\"\\n\"))\r\nfor i in range(30):\r\n\ty=i*0.1\t\r\n\tfor j in range(60):\r\n\t\tx=j*0.1\r\n\t\tz=0\r\n\t\tf1.write(str(x-3)+str(\"\\n\"))\r\n\t\tf1.write(str(y)+str(\"\\n\"))\r\n\t\tf1.write(str(z)+str(\"\\n\"))\r\nf1.write(\"*\\n\")\r\nf1.close() "
},
{
"alpha_fraction": 0.3667426109313965,
"alphanum_fraction": 0.4145785868167877,
"avg_line_length": 14.115385055541992,
"blob_id": "5adcde6763b9bc0f5ad93917ef2f55c59b0a56e0",
"content_id": "b8bac4cecb3341dd09741470f149f3f82729101e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 26,
"path": "/v2in2017/wheelcollapse/makepos.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\n\r\n\r\nf1=open(\"pos.aut\",\"w\")\r\nangle=0\r\nconst=0\r\n\r\nfor j in range(1):\r\n for i in range(10):\r\n \r\n x=4*math.cos(angle)\r\n y=4*math.sin(angle)\r\n #z=0\r\n angle+=math.pi/10.0\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n #f1.write(str(z)+str(\"\\n\"))\r\n \r\n\r\n\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n \r\n\r\n \r\n"
},
{
"alpha_fraction": 0.5718469023704529,
"alphanum_fraction": 0.596318781375885,
"avg_line_length": 26.951515197753906,
"blob_id": "3f54235955e4fb895bf691d23cb07c35c79c41e8",
"content_id": "a8493043dddd438bb2342752f61a947ea4b965e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4781,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 165,
"path": "/show.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "from visual import *\r\nfrom time import sleep\r\n\r\ngruppi=input('numero di gruppi=')\r\ndim=input('inserisci la dimensione:=')\r\nframe=input('rate:=')\r\nraggio=[]\r\ncolore=[]\r\n\r\nfor i in range(gruppi):\r\n\tr=input(\"Inserisci raggio:=\")\r\n\tc=raw_input(\"inserisci colore:=\")\r\n\tt=input(\"inserisci il range del gruppo n.\"+str(i)+\":=\")\r\n\tfor j in range(t):\r\n\t\traggio.append(r)\r\n\t\tcolore.append(c)\r\n\t\t\r\n\t\r\n\r\n\r\nf1=open('pos.aut','r')\r\nstringa='aaa'\r\nr=0.5\r\n \r\n\r\n\r\npoint=[]\r\npointgraf=[]\r\ncontatore=-1\r\nif dim==1:\r\n\twhile (stringa!='*\\n'):\r\n\t\tcontatore+=1\r\n\t\tstringa=f1.readline()\r\n\t\tif (stringa!='*\\n'):\r\n\t\t\tprint (stringa)\r\n\t\t\tif (colore[contatore]==\"red\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.red))\t \t\r\n\t\t\tif (colore[contatore]==\"green\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.green))\t \t\r\n\t\t\tif (colore[contatore]==\"yellow\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.yellow))\t \t\r\n\t\t\tif (colore[contatore]==\"white\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.white))\t \t\r\n \r\n\r\n\r\n\tf1.close()\t\r\n\tf1=open('posres.aut','r')\r\n\twhile (stringa!='e\\n'):\r\n\t\tcont=0;\r\n\t\tstringa='a'\r\n\t\twhile (stringa!='*\\n'):\r\n\t\t\trate(frame)\r\n\t\t\tstringa=f1.readline()\r\n\t\t\tif (stringa!='*\\n' and stringa!='e\\n'):\r\n\t\t\t\t#print stringa\r\n\t\t\t\tpoint[cont].pos.x=float(stringa)\r\n\t\t\t\t#sleep(0.01)\r\n\t\t\tcont=cont+1;\t\r\n\r\nif dim==2:\r\n\tf2=open(\"vinc.aut\",\"r\")\r\n\tvx=\"xx\"\r\n\tvy=\"yy\"\r\n\twhile vx!=\"*\\n\":\r\n\t\tvx=f2.readline()\r\n\t\tif vx!='*\\n':\r\n\t\t\tvy=f2.readline()\r\n\t\t\tpointgraf.append(sphere(pos=(float(vx),float(vy),0), radius=0.05, color=color.white))\r\n\r\n\t\r\n\tball = sphere(pos=(0,0,0), radius=0.01)\r\n\t#cr = shapes.circle(radius=1, np=64)\r\n\tprint dim\r\n\twhile (stringa!='*\\n'):\r\n\t\tcontatore+=1\r\n\t\tstringa=f1.readline()\r\n\t\tstringa2=f1.readline()\r\n\r\n\t\tif (stringa!='*\\n'):\r\n\t\t\t#print (stringa)\r\n\t\t\t#print (stringa2)\r\n \t\t\tif (colore[contatore]==\"red\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.red))\t \t\r\n\t\t\tif (colore[contatore]==\"green\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.green))\t \t\r\n\t\t\tif (colore[contatore]==\"yellow\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.yellow))\t \t\r\n\t\t\tif (colore[contatore]==\"white\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.white))\t \t\r\n\r\n\r\n\tf1.close()\t\r\n\tf1=open('posres.aut','r')\r\n\twhile ((stringa!='e\\n') and (stringa2!='e\\n')):\r\n\t\tcont=0;\r\n\t\t\r\n\t\t#print cont\r\n\t\tstringa='a'\r\n\t\tstringa2='a'\r\n\t\twhile ((stringa!='*\\n') and (stringa2!='*\\n')):\r\n\t\t\tprint cont\r\n\t\t\trate(frame)\r\n\t\t\t\r\n\t\t\tstringa=f1.readline()\r\n\t\t\tif (stringa!='*\\n'):\r\n\t\t\t\tstringa2=f1.readline()\r\n\t\t\tif (stringa!='*\\n' and stringa!='e\\n' and stringa2!='*\\n' and stringa2!='e\\n'):\r\n\t\t\t\tprint stringa\r\n\t\t\t\tprint stringa2\r\n\t\t\t\tpoint[cont].pos.x=float(stringa)\r\n\t\t\t\tpoint[cont].pos.y=float(stringa2)\r\n\t\t\t\t#sleep(0.05)\r\n\t\t\tcont=cont+1;\t\t\t\t\r\nif dim==3:\r\n\tprint dim\r\n\twhile (stringa!='*\\n'):\r\n\t\tcontatore+=1\r\n\t\tstringa=f1.readline()\r\n\t\tstringa2=f1.readline()\r\n\t\tstringa3=f1.readline()\r\n\t\tif (stringa!='*\\n'):\r\n\t\t\tprint (stringa)\r\n\t\t\tprint (stringa2)\r\n\t\t\tprint (stringa3)\r\n\t\t\tif (colore[contatore]==\"red\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.red))\t \t\r\n\t\t\tif (colore[contatore]==\"green\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.green))\t \t\r\n\t\t\tif (colore[contatore]==\"yellow\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.yellow))\t \t\r\n\t\t\tif (colore[contatore]==\"white\"):\r\n\t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.white))\t \t\r\n \r\n\r\n\tf1.close()\t\r\n\tf1=open('posres.aut','r')\r\n\twhile ((stringa!='e\\n') and (stringa2!='e\\n') and (stringa3!='e\\n')):\r\n\t\tcont=0;\r\n\t\t#print cont\r\n\t\tstringa='a'\r\n\t\tstringa2='a'\r\n\t\tstringa3='a'\r\n\t\twhile ((stringa!='*\\n') and (stringa2!='*\\n') and (stringa3!='*\\n')):\r\n\t\t\t#print cont\r\n\t\t\trate(frame)\r\n\t\t\tstringa=f1.readline()\r\n\t\t\tif stringa=='*\\n':\r\n\t\t\t\tprint stringa\r\n\t\t\t\tcont=0\r\n\t\t\t\tstringa=f1.readline()\r\n\t\t\tif stringa!='*\\n':\r\n\t\t\t\tstringa2=f1.readline()\r\n\t\t\tif stringa2!='*\\n':\r\n\t\t\t\tstringa3=f1.readline()\r\n\t\t\t#print stringa2\r\n\t\t\tif (stringa!='*\\n' and stringa!='e\\n' and stringa2!='*\\n' and stringa2!='e\\n' and stringa3!='*\\n' and stringa3!='e\\n'):\r\n\t\t\t\tprint stringa\r\n\t\t\t\tprint stringa2\r\n\t\t\t\tprint stringa3\r\n\t\t\t\tpoint[cont].pos.x=float(stringa)\r\n\t\t\t\tpoint[cont].pos.y=float(stringa2)\r\n\t\t\t\tpoint[cont].pos.z=float(stringa3)\r\n\t\t\t\t#sleep(0.01)\r\n\t\t\tcont=cont+1;\t\t\t\t\r\n"
},
{
"alpha_fraction": 0.45178720355033875,
"alphanum_fraction": 0.5103906989097595,
"avg_line_length": 21.028846740722656,
"blob_id": "0e8413447a02a4ef9018ac1d5e57584e92ffd0ca",
"content_id": "a04c09c5d7e7601a67af89944819c6cf3f733165",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2406,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 104,
"path": "/staticstructurefinder1.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 06 22:11:24 2017\r\n\r\n@author: paolo\r\n\"\"\"\r\n\r\nimport random\r\nimport os\r\n\r\ndef buildpotmemfile(l1,l2,p1,p3):\r\n f2=open(\"potmem10.aut\",\"w\")\r\n \r\n for el in pot:\r\n \r\n f2.write(\"POT\\n\") \r\n f2.write(str(el)+\"\\n\")\r\n value=random.uniform(l1,l2)\r\n f2.write(str(value)+\"\\n\")\r\n \r\n for i in range(dim):\r\n value=random.uniform(p1,p3)\r\n f2.write(str(value)+str(\"\\n\"))\r\n f2.write(\"1000000\\n\")\r\n f2.write(\"*\\n\")\r\n f2.close()\r\n \r\n pass\r\ndef dynamics(n):\r\n f1.write(\"DYNAMICS\\n\")\r\n for i in range(n):\r\n f1.write(\"FOR\\n\")\r\n f1.write(\"(\"+str(i+1)+\",\"+str(i+1)+\")\\n\")\r\n f1.write(\"POT\\n\")\r\n f1.write(\"-\"+str(pot[i])+\"\\n\")\r\n f1.write(\"(\"+str(1)+\",\"+str(n)+\")\\n\")\r\n f1.write(\"(1,100000)\\n\")\r\n f1.write(\"ENDFOR\\n\")\r\n f1.write(\"ENDDYNAMICS\\n\") \r\n \r\n \r\n \r\ndef putbody(n,r1,r2,dim):\r\n f3=open(\"pos.aut\",\"w\")\r\n for j in range(n):\r\n for i in range(dim):\r\n casual=random.uniform(r1,r2)\r\n f3.write(str(casual)+\"\\n\")\r\n f3.write(\"*\\n\") \r\n \r\n \r\ndef build_main_static_part(nel,dim,maxstep,minstep,typesim,typemove,nsteps,chosen):\r\n global f1\r\n f1=open(\"main.aut\",\"w\")\r\n f1.write(\"SETTING\\n\")\r\n f1.write(\"NEL\\n\")\r\n f1.write(str(nel)+\"\\n\");\r\n f1.write(\"NSTEPS\\n\")\r\n f1.write(str(nsteps)+\"\\n\")\r\n f1.write(\"DIM\\n\")\r\n f1.write(str(dim)+\"\\n\")\r\n f1.write(\"MAXSTEP\\n\")\r\n f1.write(str(maxstep)+str(\"\\n\"))\r\n f1.write(\"MINSTEP\\n\")\r\n f1.write(str(minstep)+str(\"\\n\"))\r\n f1.write(\"TYEPSIM\\n\")\r\n f1.write(str(typesim)+str(\"\\n\"))\r\n f1.write(\"TYPEMOVE\\n\")\r\n f1.write(\"1\\n\")\r\n f1.write(\"PUSHUP\\n\")\r\n f1.write(\"1\\n\")\r\n f1.write(\"CHOSEN\\n\")\r\n f1.write(str(chosen)+\"\\n\")\r\n f1.write(\"MULTIFILE\\n\")\r\n f1.write(\"1\\n\")\r\n f1.write(\"TYPESTEP\\n\")\r\n f1.write(\"1\\n\")\r\n f1.write(\"MAXPOTAB\\n\")\r\n f1.write(\"5\\n\")\r\n\r\n f1.write(\"ENDSETTING\\n\")\r\n\r\nn=40\r\nnsteps=200\r\nmaxstep=0.05\r\nminstep=0.02\r\ndim=3\r\ntypesim=3\r\ntypemove=1\r\nl1=0.5\r\nl2=1.5\r\np1=0.2\r\nP2=1.8\r\np3=1.8\r\nchosen=5\r\nr1=-2\r\nr2=2\r\npot=[i+901 for i in range(n)]\r\n\r\nputbody(n,r1,r2,dim)\r\nbuild_main_static_part(n,dim,maxstep,minstep,typesim,typemove,nsteps,chosen)\r\nbuildpotmemfile(l1,l2,p1,p3)\r\ndynamics(n)\r\nf1.close()\r\n \r\n\r\n\r\n "
},
{
"alpha_fraction": 0.3482517600059509,
"alphanum_fraction": 0.39300698041915894,
"avg_line_length": 15.820512771606445,
"blob_id": "fdee6290918112821a1f93bcd6ec531c51fe1982",
"content_id": "95b5325d50cf09f21c37ce92ea58f4f384b342d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 715,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 39,
"path": "/v2in2017/wheelandworms/makepos.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\n\r\n\r\nf1=open(\"pos.aut\",\"w\")\r\nangle=0\r\nconst=0\r\n\r\nfor j in range(1):\r\n for i in range(8):\r\n \r\n x=2*math.cos(angle)\r\n y=2*math.sin(angle)\r\n z=0\r\n angle+=math.pi/4\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n f1.write(str(z)+str(\"\\n\"))\r\n \r\n\r\n\r\nfor j in range(2):\r\n for i in range(4):\r\n \r\n x=(2+0.5*i)*(-1)**j\r\n z=(-1)**j\r\n y=0\r\n angle+=math.pi/4\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n f1.write(str(z)+str(\"\\n\"))\r\n\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\n\r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n \r\n\r\n \r\n"
},
{
"alpha_fraction": 0.35100287199020386,
"alphanum_fraction": 0.4169054329395294,
"avg_line_length": 16.83333396911621,
"blob_id": "503ffec240eceb436eab7c486095e07ec125bec7",
"content_id": "ed8d0999380f288fa2dc72899aebb6c04429f205",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 698,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 36,
"path": "/v2in2017/oredered structure/makepos.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\nimport random\r\n\r\nf1=open(\"pos.aut\",\"w\")\r\nangle=0\r\nconst=0\r\n\r\nfor j in range(3):\r\n for i in range(10):\r\n casual=random.uniform(-0.15,0.15) \r\n x=casual\r\n casual=random.uniform(-0.15,0.15) \r\n y=casual\r\n if j==1:\r\n \r\n x+=2\r\n if j==2:\r\n y+=2\t\t\r\n z=0\r\n \r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n f1.write(str(z)+str(\"\\n\"))\r\n \r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"2\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"2\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n \r\n\r\n \r\n"
},
{
"alpha_fraction": 0.4661654233932495,
"alphanum_fraction": 0.5097744464874268,
"avg_line_length": 13.090909004211426,
"blob_id": "caafcd1e2349c0ec7e33189894b417143a83c915",
"content_id": "494db572cb1c1e41838ce6b4b69deb25b06955b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 665,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 44,
"path": "/v2in2017/bee1/make.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import random\r\nimport math\r\nf1=open(\"pos.aut\",\"w\")\r\nk=0\r\n\r\nfor i in range(20):\r\n x=random.uniform(0,0)\r\n y=random.uniform(2,3)\r\n z=0\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n f1.write(str(z)+str(\"\\n\"))\r\n\r\n\r\n#f1.write(\"*\\n\")\r\n\r\n#f1.close()\r\n\r\n\t\r\n#f1=open(\"vinc.aut\",\"w\")\r\ncont=0\r\ninc=0.25\r\nr=4\r\n\r\nwhile cont<2*math.pi:\r\n\r\n\tx=r*math.cos(cont)\r\n\ty=r*math.sin(cont)\r\n\tz=math.cos(4*cont)\r\n\t\r\n\tcont+=inc\r\n\t#z=random.uniform(0,2)\r\n\t#z=math.sin(cont)\r\n\tprint (str(x))\r\n\tprint (str(y))\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(z)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\n\r\n \r\n\r\nf1.write(\"*\\n\")\r\nf1.close() \r\n"
},
{
"alpha_fraction": 0.4382978677749634,
"alphanum_fraction": 0.5080851316452026,
"avg_line_length": 13.038461685180664,
"blob_id": "4b1d9c1cafc67c430f5faabae1f95fc46c6282bb",
"content_id": "bafaa84a4a4390332e6d97f9885f33abf55f4c02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1175,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 78,
"path": "/v2in2017/apipittrici/casualscript.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "\r\nimport random\r\nf1=open('pos.aut','w')\r\n\r\nx=-2.5\r\ny=0\r\nz=0\r\n#x=0\r\n#y=0\r\nf1.write(str(x)+str('\\n'))\r\nf1.write(str(y)+str('\\n'))\r\n#f1.write(str(z)+str('\\n'))\r\n\r\nfor i in range(10):\r\n\t\r\n\tx=random.uniform(-3,-2)\r\n\ty=random.uniform(-1,1)\r\n\t#x=random.uniform(-1,1)\r\n\t#y=random.uniform(-1,1)\r\n\tz=0\r\n\t#x=0\r\n \t#y=0\r\n\t#z=0\r\n\tf1.write(str(x)+str('\\n'))\r\n\tf1.write(str(y)+str('\\n'))\r\n\t#f1.write(str(z)+str('\\n'))\r\n\r\nx=2.5\r\ny=0\r\nz=0\r\n#x=0\r\n#y=0\r\n\r\nf1.write(str(x)+str('\\n'))\r\nf1.write(str(y)+str('\\n'))\r\n#f1.write(str(z)+str('\\n'))\r\n\r\nfor i in range(10):\r\n\t\r\n\tx=random.uniform(2,3)\r\n\ty=random.uniform(-1,1)\r\n\t#x=random.uniform(-1,1)\r\n\t#y=random.uniform(-1,1)\r\n\r\n\tz=0\r\n\t#x=0\r\n \t#y=0\r\n\t#z=0\r\n\tf1.write(str(x)+str('\\n'))\r\n\tf1.write(str(y)+str('\\n'))\r\n\t#f1.write(str(z)+str('\\n'))\r\n \r\n#x=0\r\n#y=0\r\n#z=0\r\n#f1.write(str(x)+str('\\n'))\r\n#f1.write(str(y)+str('\\n'))\r\n#f1.write(str(z)+str('\\n'))\r\n\r\n#for i in range(10):\r\n\t\r\n#\tx=random.uniform(-0.5,0.5)\r\n#\ty=random.uniform(-0.5,0.5)\r\n#\tx=random.uniform(-1,1)\r\n#\ty=random.uniform(-1,1)\r\n\r\n#\tz=0\r\n\t#x=0\r\n \t#y=0\r\n\t#z=0\r\n#\tf1.write(str(x)+str('\\n'))\r\n#\tf1.write(str(y)+str('\\n'))\r\n\t#f1.write(str(z)+str('\\n'))\r\n \r\n\r\nf1.write('*\\n')\r\n\r\n\r\nf1.close()\r\n"
},
{
"alpha_fraction": 0.38353413343429565,
"alphanum_fraction": 0.43574297428131104,
"avg_line_length": 16.961538314819336,
"blob_id": "ef45f2bf63b91184047d42d268888d2b546558ae",
"content_id": "107ac660d9faa5aae5b5dfbc847bb7839bd3a555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 26,
"path": "/v2in2017/bee2/cerchio.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\nimport random \r\nf1=open(\"pos.aut\",\"w\")\r\n\r\nteta=0\r\nwhile teta<6.28:\r\n teta=teta+6.28/20\r\n x=2*math.cos(teta)\r\n y=2*math.sin(teta)\r\n z=0\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n f1.write(str(z)+str(\"\\n\"))\r\n\r\n \r\nfor i in range(20):\r\n x=0\r\n y=-2\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n z=0\r\n f1.write(str(z)+str(\"\\n\"))\r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n\t\r\n"
},
{
"alpha_fraction": 0.46897661685943604,
"alphanum_fraction": 0.49266719818115234,
"avg_line_length": 25.66964340209961,
"blob_id": "c3136550d567a89327b2d3cfa527503cbe1a3ba1",
"content_id": "1387d07a2ff3afe26fd0785401563ebc6bf703bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6205,
"license_type": "no_license",
"max_line_length": 333,
"num_lines": 224,
"path": "/revisionedversion/automata.c",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "//********Name program: Probability interaction********************************\r\n\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n#include<time.h>\r\n#include \"reader.h\" //********it reads the script wrote in a main.aut file **************************\r\n#include \"reader.c\" \r\n#include \"calculate.h\" //*********In this file there is the procedure to calculate the results of the systems**********************\r\n#include \"calculate.c\"\r\n \r\n//void readfile(int*,float[], float[],float[],int[][10],int[][10],int[][10],int[][10]);\r\n//void calculate(int*,float[], float[],float[],int[][10],int[][10],int[][10],int[][10]);\r\n\r\n//******************With this function the setting table has written on the screen*******************************\r\n\r\nvoid table(int np,int n,float settings[],int typpot[][np],int whoactionbeg[][np],int whoactionend[][np],int begintime[][np],int endtime[][np],int fixed[],double zeroset[],int begif[][np], int endif[][np], int begintif[][np], int endintif[][np], double distif[][np],int **connectactive,double **lengthconnectactive,int preypredator[])\r\n{\r\nint i,j;\r\nprintf(\"POTENZIALE\"); \r\n\r\nfor (i=1;i<=n;i++)\r\n{\r\n\tprintf(\"potenziale %d\\n\",n);\r\n \r\n\tfor (j=0;j<np;j++)\r\n\t{\t\r\n\t\tprintf(\" %d \",typpot[i][j]);\r\n\t\t \r\n\t}\r\n\tprintf(\"action beg\\n\");\r\n\r\n\tfor (j=0;j<np;j++)\r\n\t{\r\n\t\tprintf(\" %d \",whoactionbeg[i][j]);\r\n\t\t \r\n\t}\r\n\r\n\tprintf(\"action end\\n\");\r\n\tfor (j=0;j<np;j++)\r\n\t{\r\n\t\tprintf(\" %d \",whoactionend[i][j]);\r\n\t\t \r\n\t}\r\n\r\n\tprintf(\"begin time\\n\");\r\n\tfor (j=0;j<np;j++)\r\n\t{\r\n\t\tprintf(\" %d \",begintime[i][j]);\r\n\t\t \r\n\t}\r\n\r\n\tprintf(\" end time\\n\");\r\n\tfor (j=0;j<np;j++)\r\n\t{\r\n\t\tprintf(\" %d \",endtime[i][j]);\r\n\t\t \r\n\t}\r\n \r\n} \r\nprintf(\"the settings \\n\");\r\n\r\nfor (i=0;i<=20;i++)\r\n{\r\n\tprintf(\"%f\\n\",settings[i]);\t\r\n}\r\n\r\nfor (i=1;i<=n;i++)\r\n{\r\n\tprintf(\"%d\",fixed[i]);\r\n}\r\nfor (i=1;i<=n;i++)\r\n{\r\n\tprintf(\"prey %d\",preypredator[i]);\r\n}\r\n\r\nprintf(\"\\n\");\r\nfor (i=1;i<=n;i++)\r\n{\r\n\tprintf(\"%f\",zeroset[i]);\r\n}\r\nfor (i=1;i<=n;i++)\r\n{\r\n printf(\"****\\n\");\r\n for (j=1;j<=n;j++)\r\n {\r\n\t printf(\"%d \",connectactive[i][j]);\r\n }\r\n} \r\nfor (i=1;i<=n;i++)\r\n{\r\n printf(\"****\\n\");\r\n for (j=1;j<=n;j++)\r\n {\r\n\t printf(\"%f \",lengthconnectactive[i][j]);\r\n }\r\n} \r\n\r\n}\r\nint main()\r\n{\r\nint n,np;\r\nchar riga[40];\r\n\r\n//************************************************************************************************************************\r\n//******* Two rows of the scripts is read, to undestand that are the numbers of the agents in the system******************\r\n//************************************************************************************************************************ \r\ntime_t m;\r\ntime_t now = time(NULL);\r\n\r\n\r\n\r\n\r\nFILE *f101 = fopen(\"posprob.aut\",\"w\");\r\nfclose(f101);\r\nFILE *f110 = fopen(\"movtable.aut\",\"w\");\r\nfclose(f110);\r\nFILE *f120 = fopen(\"tableprob.aut\",\"w\");\r\nfclose(f120);\r\n\r\nFILE *f100 = fopen(\"main.aut\",\"r\");\r\nwhile ((riga[0]!='N') || (riga[1]!='E') || (riga[2]!='L'))\r\n{\r\nfscanf(f100,\"%s\",riga);\t\r\n}\r\nfscanf(f100,\"%s\",riga);\t\r\nn=atoi(riga);\r\n\r\nfclose(f100);\r\nFILE *f200 = fopen(\"main.aut\",\"r\");\r\nwhile ((riga[0]!='M') || (riga[1]!='A') || (riga[2]!='X') || (riga[3]!='P'))\r\n{\r\nfscanf(f200,\"%s\",riga);\t\r\n}\r\nfscanf(f200,\"%s\",riga);\t\r\nnp=atoi(riga);\r\n\r\nfclose(f200);\r\nprintf(\"N %d\",n);\r\nprintf(\"Np %d\",np);\r\n\r\n//********************************************************************************************************************\r\n//******************************* variable to describe every elements that there are******************************\r\n\r\n \t\tdouble x[n+1][10];\r\n\t\tint whoactionbeg[n+1][np];\r\n\t\tint whoactionend[n+1][np]; \r\n\t\tint typpot[n+1][np];\r\n\t\tint begintime[n+1][np];\r\n\t\tint endtime[n+1][np];\r\n\t\tint fixed[n+1];\r\n double zeroset[n+1];\r\n\t\tint begif[n+1][np];\r\n\t\tint endif[n+1][np]; \r\n\t\tint begintif[n+1][np];\r\n\t\tint endintif[n+1][np]; \r\n double distif[n+1][np];\r\n double distprey;\r\n int **connectactive;\r\n double **lengthconnectactive;\r\n int preypredator[n+1];\r\n connectactive=(int **)malloc(sizeof(int *)*(n+2));\r\n int i,j;\r\n for(i=0; i<n+2; i++)\r\n connectactive[i]=(int *)malloc(sizeof(int)*(n+2));\r\n lengthconnectactive=(double **)malloc(sizeof(double *)*(n+2));\r\n for(i=0; i<n+2; i++)\r\n lengthconnectactive[i]=(double *)malloc(sizeof(double)*(n+2));\r\n\r\n \r\n//***********************************************************************************************************************\r\n//*********************************************************************************************************************** \t \r\n\r\nfloat settings[20]; //******in this variable there are very settings parameter of the system***************************\r\ndouble minimi[n+1];\r\n \r\nfor (i=0;i<=20;i++)\r\n{\r\n\r\n\tsettings[i]=0;\r\n}\r\nfor (i=0;i<=n;i++)\r\n{\r\n\tfixed[i]=0;\r\n\tminimi[i]=100000000000; \r\n zeroset[i]=0;\r\n preypredator[i]=-1;\r\n}\r\nprintf(\"minimi begin %f\",minimi[0]); \r\n\r\nfor (i=0;i<=n;i++)\r\n{\r\n for(j=0;j<=n;j++)\r\n {\r\n connectactive[i][j]=-1; \r\n lengthconnectactive[i][j]=0; \r\n }\r\n} \r\n\r\nfor (i=0;i<=n;i++)\r\n{\r\n\t\r\n\tfor (j=0;j<=np;j++)\r\n\t{\r\n\t\ttyppot[i][j]=0;\r\n\t\twhoactionbeg[i][j]=0;\r\n\t \twhoactionend[i][j]=0;\r\n\t\tbegintime[i][j]=0;\r\n\t\tendtime[i][j]=0;\r\n\t\tbegif[i][j]=0;\r\n\t\tendif[i][j]=0;\r\n\t\tbegintif[i][j]=0;\r\n\t\tendintif[i][j]=0;\r\n\t\tdistif[i][j]=0;\r\n \t\t\r\n\t}\r\n}\r\nreadfile(np,n,settings,x,whoactionbeg,whoactionend,typpot,begintime,endtime,fixed,zeroset,begif,endif,begintif,endintif,distif,connectactive, lengthconnectactive,preypredator,distprey); \r\ntable(np,n,settings,typpot,whoactionbeg,whoactionend,begintime,endtime,fixed,zeroset,begif,endif,begintif,endintif,distif,connectactive,lengthconnectactive,preypredator); \r\ncalculate(np,n,settings,x,typpot,whoactionbeg,whoactionend,begintime,endtime,fixed,zeroset,minimi,begif,endif,begintif,endintif,distif,connectactive,lengthconnectactive,preypredator,distprey);\r\ntable(np,n,settings,typpot,whoactionbeg,whoactionend,begintime,endtime,fixed,zeroset,begif,endif,begintif,endintif,distif,connectactive,lengthconnectactive,preypredator);\r\nm = difftime(time(NULL), now);\r\nprintf(\"tempo totale in secondi: %ld\\n\",m);\r\n\r\n}\r\n\r\n\t\r\n\r\n"
},
{
"alpha_fraction": 0.38305166363716125,
"alphanum_fraction": 0.401252806186676,
"avg_line_length": 42.35789489746094,
"blob_id": "3f6cbb6938089c2d16d32a89c3b0ce7d5622edfd",
"content_id": "df63cdd2e129d39b40182748e0983b85f7d9ee55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8461,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 190,
"path": "/v2in2017/show.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "\r\ngruppi=input('numero di gruppi=')\r\ndim=input('inserisci la dimensione:=')\r\n#frame=input('rate:=')\r\n#frame=int(frame)\r\nraggio=[]\r\ncolore=[]\r\ndim=int(dim)\r\nfor i in range(int(gruppi)):\r\n r=input(\"Inserisci raggio:=\")\r\n c=input(\"inserisci colore:=\")\r\n t=input(\"inserisci il range del gruppo n.\"+str(i)+\":=\")\r\n for j in range(int(t)):\r\n raggio.append(r)\r\n colore.append(c)\r\n \r\n \r\n\r\nfrom vpython import *\r\nfrom time import sleep\r\nscene = canvas(title='Missp',\r\n x=0, y=0, width=1366, height=768,\r\ncenter=vector(0,0,0), background=vector(1,1,1)) \r\nf1=open('pos.aut','r')\r\nstringa='aaa'\r\nr=float(0.5)\r\n \r\n\r\n\r\npoint=[]\r\npointgraf=[]\r\ncontatore=-1\r\nif dim==1:\r\n while (stringa!='*\\n'):\r\n contatore+=1\r\n stringa=f1.readline()\r\n if (stringa!='*\\n'):\r\n print (stringa)\r\n if (colore[contatore]==\"red\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.red)) \r\n if (colore[contatore]==\"green\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.green)) \r\n if (colore[contatore]==\"yellow\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.yellow)) \r\n if (colore[contatore]==\"white\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.white)) \r\n \r\n\r\n\r\n f1.close() \r\n f1=open('posres.aut','r')\r\n while (stringa!='e\\n'):\r\n cont=0;\r\n stringa='a'\r\n while (stringa!='*\\n'):\r\n rate(500)\r\n stringa=f1.readline()\r\n if (stringa!='*\\n' and stringa!='e\\n'):\r\n #print stringa\r\n point[cont].pos.x=float(stringa)\r\n #sleep(0.01)\r\n cont=cont+1; \r\n\r\nif dim==2:\r\n f2=open(\"vinc.aut\",\"r\")\r\n vx=\"xx\"\r\n vy=\"yy\"\r\n while vx!=\"*\\n\":\r\n vx=f2.readline()\r\n if vx!='*\\n':\r\n vy=f2.readline()\r\n pointgraf.append(sphere(pos=vec(float(vx),float(vy),0), radius=0.05, color=color.black))\r\n\r\n \r\n ball = sphere(pos=vec(0,0,0), radius=0.01)\r\n #cr = shapes.circle(radius=1, np=64)\r\n print (dim)\r\n\t\r\n while (stringa!='*\\n'):\r\n contatore+=1\r\n stringa=f1.readline()\r\n stringa2=f1.readline()\r\n\t\t\r\n if (stringa!='*\\n'):\r\n #print (stringa)\r\n #print (stringa2)\r\n if (colore[contatore]==\"red\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.red,make_trail=True))\r\n if (colore[contatore]==\"green\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.green,make_trail=True)) \r\n if (colore[contatore]==\"yellow\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.yellow,make_trail=True)) \r\n if (colore[contatore]==\"white\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.white,make_trail=True)) \r\n\r\n\r\n f1.close() \r\n f1=open('posres.aut','r')\r\n while ((stringa!='e\\n') and (stringa2!='e\\n')):\r\n cont=0;\r\n \r\n #print cont\r\n stringa='a'\r\n stringa2='a'\r\n while ((stringa!='*\\n') and (stringa2!='*\\n')):\r\n print (cont)\r\n rate(500)\r\n \r\n stringa=f1.readline()\r\n if (stringa!='*\\n'):\r\n stringa2=f1.readline()\r\n if (stringa!='*\\n' and stringa!='e\\n' and stringa2!='*\\n' and stringa2!='e\\n'):\r\n print (stringa)\r\n print (stringa2)\r\n point[cont].pos.x=float(stringa)\r\n point[cont].pos.y=float(stringa2)\r\n #sleep(0.05)\r\n cont=cont+1; \r\nif dim==3:\r\n f2=open(\"vinc.aut\",\"r\")\r\n vx=\"xx\"\r\n vy=\"yy\"\r\n vz=\"zz\"\r\n while vx!=\"*\\n\":\r\n vx=f2.readline()\r\n if vx!='*\\n':\r\n vy=f2.readline()\r\n vz=f2.readline()\r\n pointgraf.append(sphere(pos=vec(float(vx),float(vy),float(vz)), radius=0.05, color=color.black))\r\n\r\n \r\n ball = sphere(pos=vec(0,0,0), radius=0.01)\r\n\r\n\r\n print (dim)\r\n while (stringa!='*\\n'):\r\n contatore+=1\r\n stringa=f1.readline()\r\n stringa2=f1.readline()\r\n stringa3=f1.readline()\r\n if (stringa!='*\\n'):\r\n print (stringa)\r\n print (stringa2)\r\n print (stringa3)\r\n if (colore[contatore]==\"red\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.red,make_trail=True)) \r\n if (colore[contatore]==\"green\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.green,make_trail=True)) \r\n if (colore[contatore]==\"yellow\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.yellow,make_trail=True)) \r\n if (colore[contatore]==\"white\"):\r\n point.append(sphere(pos=vec(float(stringa),0,0), radius=float(raggio[contatore]), color=color.white,make_trail=True)) \r\n \r\n\r\n f1.close() \r\n f1=open('posres.aut','r')\r\n while ((stringa!='e\\n') and (stringa2!='e\\n') and (stringa3!='e\\n')):\r\n cont=0;\r\n #print cont\r\n stringa='a'\r\n stringa2='a'\r\n stringa3='a'\r\n while ((stringa!='*\\n') and (stringa2!='*\\n') and (stringa3!='*\\n')):\r\n #print cont\r\n rate(500)\r\n stringa=f1.readline()\r\n if stringa=='**\\n':\r\n exit()\r\n if stringa=='*\\n':\r\n print (stringa)\r\n cont=0\r\n stringa=f1.readline()\r\n if stringa!='*\\n':\r\n stringa2=f1.readline()\r\n if stringa2!='*\\n':\r\n stringa3=f1.readline()\r\n if stringa=='**\\n' or stringa=='**\\n':\r\n exit()\r\n\r\n #print stringa2\r\n if (stringa!='*\\n' and stringa!='e\\n' and stringa2!='*\\n' and stringa2!='e\\n' and stringa3!='*\\n' and stringa3!='e\\n'):\r\n print (stringa)\r\n print (stringa2)\r\n print (stringa3)\r\n point[cont].pos.x=float(stringa)\r\n point[cont].pos.y=float(stringa2)\r\n point[cont].pos.z=float(stringa3)\r\n print (point[cont].pos.z)\r\n print (\"***************\")\r\n #sleep(0.01)\r\n cont=cont+1; \r\n \r\n"
},
{
"alpha_fraction": 0.41907235980033875,
"alphanum_fraction": 0.44993355870246887,
"avg_line_length": 21.255617141723633,
"blob_id": "7b64d5daa741b93deb9899b728bea8effb1013b8",
"content_id": "b44f79e7c7a6f51ddb3a8139e63c2470f89b6d32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 16558,
"license_type": "no_license",
"max_line_length": 363,
"num_lines": 712,
"path": "/reader.c",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\r\n#include <stdlib.h>\r\n\r\nvoid readfile(int np,int n,float settings[],double x[][10],int whoactionbeg[][np],int whoactionend[][np],int typpot[][np],int begintime[][np],int endtime[][np],int fixed[],double zeroset[],int begif[][np],int endif[][np],int begintif[][np],int endintif[][np],double distif[][np],int **connectactive,double **lengthconnectactive,int preypredator[],double distprey)\r\n{ \r\n \r\n \r\n int nsost=n;\r\n \t\t\t\t\r\n\r\n char look;\r\n char stringa[40];\r\n FILE *f1 = fopen(\"main.aut\",\"r\");\r\n float number,number2;\r\n int contatore,i,k,w,j;\r\n int contatore2,numero;\r\n int ii=0;\r\n int contatore3[nsost+1];\r\n int contatore4[nsost+1];\r\n //printf(\"*************controllo n %d************\",*n);\t\t\t\r\n if( f1==NULL ) {\r\n \t printf(\"Si e' verificato un errore in apertura del file\\n\");\r\n \t exit(1);\r\n }\r\n look='F';\r\n\r\n\r\n while ((stringa[0]!='E') || (stringa[1]!='N') || (stringa[2]!='D') || (stringa[8]!='N') || (stringa[9]!='G'))\r\n { \t \r\n\t fscanf(f1,\"%s\",stringa);\r\n\t//serie di cicli if per riempire il vettore settings\r\n\t if ((stringa[0]=='S') &&(stringa[1]=='E') &&(stringa[2]=='T'))\r\n\t\t{ \r\n\t\t\tlook='V';\r\n\t\t\t//printf(\"ciao\");\r\n\t\t}\r\n\r\n\t if ((stringa[0]=='D') && (stringa[1]=='I') && (stringa[2]=='M') && (look=='V'))\r\n\t\t{\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[0]=number;\r\n \t\t\t \t\t\t\t\r\n\r\n\t\t}\r\n\r\n\r\n\t if ((stringa[0]=='T') && (stringa[1]=='Y') && (stringa[2]=='P') && (stringa[6]=='P') && (look=='V'))\r\n\t\t{\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[1]=number;\r\n \r\n\t\t}\r\n\r\n\t if ((stringa[0]=='N') && (stringa[1]=='S') && (stringa[2]=='T') && (stringa[4]=='P') && (look=='V'))\r\n\t\t{\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[2]=number;\r\n \r\n\t\t}\r\n\t \r\n\t if ((stringa[0]=='M') && (stringa[1]=='I') && (stringa[2]=='N') && (stringa[6]=='P') && (look=='V'))\r\n\t\t{\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[3]=number;\r\n \t\t}\r\n\t \t\r\n\t if ((stringa[0]=='S') && (stringa[1]=='T') && (stringa[2]=='O') && (stringa[8]=='S') && (look=='V'))\r\n\t\t{\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[4]=number;\r\n \t\t}\r\n\t if ((stringa[0]=='M') && (stringa[1]=='A') && (stringa[6]=='P'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[5]=number;\r\n \r\n\r\n }\r\n\r\n\t if ((stringa[0]=='M') && (stringa[1]=='E') && (stringa[2]=='M') && (stringa[3]=='F'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[6]=number;\r\n \r\n\r\n }\r\n\r\n\t if ((stringa[0]=='N') && (stringa[1]=='E') && (stringa[2]=='L'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[7]=number;\r\n \r\n\r\n }\r\n\r\n\t if ((stringa[0]=='M') && (stringa[1]=='U') && (stringa[2]=='L'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[8]=number;\r\n \r\n\r\n }\r\n\r\n\t if ((stringa[0]=='C') && (stringa[1]=='H') && (stringa[2]=='O'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[9]=number;\r\n \r\n\r\n }\t \t\r\n \r\n\r\n\t if ((stringa[0]=='M') && (stringa[1]=='I') && (stringa[6]=='P'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[10]=number;\r\n \r\n\r\n }\r\n\r\n\t if ((stringa[0]=='M') && (stringa[1]=='A') && (stringa[4]=='I') &&(stringa[8]=='P'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[11]=number;\r\n \r\n\r\n }\r\n\r\n\t if ((stringa[0]=='T') && (stringa[1]=='Y') && (stringa[4]=='M') &&(stringa[7]=='E'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[12]=number;\r\n \r\n\r\n }\r\n\r\n\t if ((stringa[0]=='T') && (stringa[1]=='Y') && (stringa[2]=='P') && (stringa[3]=='E') && (stringa[4]=='S') && (stringa[5]=='I') && (stringa[6]=='M'))\r\n\t {\r\n fscanf(f1,\"%f\",&number);\r\n\t\t\tsettings[13]=number;\r\n \r\n\r\n }\r\n\r\n\r\n if ((stringa[0]=='N') && (stringa[1]=='O') && (stringa[2]=='F') && (stringa[3]=='R') && (stringa[4]=='E') && (stringa[5]=='E'))\r\n\t {\r\n\r\n\t\t fscanf(f1,\"%f\",&number);\r\n\t \t\tsettings[14]=number;\r\n\t\t fscanf(f1,\"%f\",&number);\r\n \t \t\tsettings[17]=number;\r\n\r\n\r\n }\r\n \r\n if ((stringa[0]=='N') && (stringa[1]=='O') && (stringa[2]=='S') && (stringa[3]=='E') && (stringa[4]=='L') && (stringa[5]=='F'))\r\n\t {\r\n\r\n\t\t fscanf(f1,\"%f\",&number);\r\n\t \t\tsettings[15]=number;\r\n \r\n\r\n }\r\n if ((stringa[0]=='P') && (stringa[1]=='U') && (stringa[2]=='S') && (stringa[3]=='H') && (stringa[4]=='U') && (stringa[5]=='P'))\r\n\t {\r\n\r\n\t\t fscanf(f1,\"%f\",&number);\r\n\t \t\tsettings[16]=number;\r\n \r\n\r\n }\r\n \r\n \r\n\r\n\r\n\t if ((stringa[0]=='F') && (stringa[1]=='I') && (stringa[2]=='X'))\r\n \t\t{\r\n\r\n\t while ((stringa[0]!='E'))\r\n {\r\n \r\n fscanf(f1,\"%s\",stringa);\r\n if (stringa[0]!='E')\r\n\t {\t\r\n\t contatore=0;\r\n\t contatore2=0;\t\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\r\n\t char cpy5[contatore-1];\t\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpy5[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero5= atoi(cpy5);\r\n\t printf(\"numero %d\\n\",numero5);\r\n\r\n\t ii=0;\r\n\t char cpy6[contatore2-contatore-1];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy6[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero6= atoi(cpy6);\r\n\t printf(\"numero %d\\n\",numero6);\r\n for (k=numero5;k<=numero6;k++)\r\n\t {\r\n\t\tfixed[k]=1;\t\r\n\t }\r\n\r\n\t}\r\n\r\n\t}\r\n\r\n }\r\n\r\n\r\n\t if ((stringa[0]=='Z') && (stringa[1]=='E') && (stringa[2]=='R'))\r\n \t\t{\r\n\r\n\t while ((stringa[0]!='E'))\r\n {\r\n \r\n fscanf(f1,\"%s\",stringa);\r\n if (stringa[0]!='E')\r\n\t {\t\r\n\t contatore=0;\r\n\t contatore2=0;\t\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\r\n\t char cpy5[contatore-1];\t\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpy5[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero5= atoi(cpy5);\r\n\t printf(\"numero %d\\n\",numero5);\r\n\r\n\t ii=0;\r\n\t char cpy6[contatore2-contatore-1];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy6[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero6= atoi(cpy6);\r\n\t printf(\"numero %d\\n\",numero6);\r\n\t fscanf(f1,\"%s\",stringa);\r\n\t number=atof(stringa); \r\n for (k=numero5;k<=numero6;k++)\r\n\t {\r\n\t\tzeroset[k]=number;\t\r\n\t\tprintf(\"ZEROSETTTTTTTTTTTTT %f\",zeroset);\r\n\t }\r\n\r\n\t}\r\n\r\n\t}\r\n\r\n }\r\n\r\n\r\n\tif ((stringa[0]=='P') && (stringa[1]=='R') && (stringa[2]=='E') && (stringa[3]=='Y'))\r\n\t{\r\n\t look='V';\r\n\t printf(\"IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII\");\r\n\t fscanf(f1,\"%s\",stringa);\r\n\t contatore=0;\r\n\t contatore2=0;\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\t\r\n\t char cpy[contatore];\t\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpy[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero10= atoi(cpy);\r\n\t printf(\"ela peppa !!!!!! numero10 %d\\n\",numero10);\r\n\t ii=0; \r\n\t char cpy2[contatore2-1-contatore];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy2[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero11= atoi(cpy2);\r\n\t printf(\"numero11 %d\\n\",numero11);\r\n\t fscanf(f1,\"%s\",stringa); //numero di volte essere preso\r\n\t look='V';\r\n\t int numero12=atoi(stringa); \r\n\t for (i=numero10;i<=numero11;i++)\r\n\t {\r\n\t\t preypredator[i]=numero12;\r\n\t\t\r\n\t }\r\n\t printf(\"blammmm\");\t\r\n\t fscanf(f1,\"%s\",stringa); //distanza minima to catch\r\n\t double numero13=atof(stringa); \r\n\t distprey=numero13;\r\n\t \r\n\t \t\r\n\t}\r\n\r\n\r\n} \r\n // printf(\"ciao\");\r\n // printf(\"ciao %s\",stringa); \r\n \r\n\r\n\r\n\r\n\r\nfclose(f1);\r\nlook='F';\r\n \r\nfopen(\"main.aut\",\"r\");\r\n while ((stringa[0]!='E') || (stringa[1]!='N') || (stringa[2]!='D') || (stringa[4]!='Y'))\r\n {\r\n \tfscanf(f1,\"%s\",stringa);\r\n\tprintf(\"%s\",stringa);\t \r\n\r\n\tif ((stringa[0]=='F') && (stringa[1]=='O') && (stringa[2]=='R'))\r\n\t{ \r\n\t look='V';\r\n\t fscanf(f1,\"%s\",stringa);\r\n\t contatore=0;\r\n\t contatore2=0;\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\t\r\n\t char cpy[contatore];\t\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpy[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero1= atoi(cpy);\r\n\t printf(\"numero1 %d\\n\",numero1);\r\n\t ii=0; \r\n\t char cpy2[contatore2-1-contatore];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy2[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero2= atoi(cpy2);\r\n\t printf(\"numero2 %d\\n\",numero2);\r\n\t fscanf(f1,\"%s\",stringa);\r\n\t look='V';\r\n\t printf(\"blammmm\");\t\r\n//**********************************************************************************************\r\n\t if ((stringa[0]=='P') && (stringa[1]=='O') && (stringa[2]=='T'))\r\n\t {\t\r\n\t printf(\"blavvv\");\t\r\n while (look=='V')\r\n {\r\n \r\n\t\tfscanf(f1,\"%s\",stringa);\r\n\t\tnumero=atoi(stringa);\r\n\t\tprintf(\"potenziale %d\",numero);\t\r\n\t\tfor (i=numero1;i<=numero2;i++)\r\n\t\t{\r\n\t\t\tcontatore3[i]=0;\r\n\t\t while (typpot[i][contatore3[i]]!=0)\r\n\t\t\t{\r\n\t\t\t\tcontatore3[i]++;\r\n }\r\n\t\t\ttyppot[i][contatore3[i]]=numero;\r\n\t\t}\t\t\r\n\t \r\n \r\n fscanf(f1,\"%s\",stringa);\r\n\t printf(\"str %s\",stringa);\r\n\t contatore=0;\r\n\t contatore2=0;\t\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore2 %d\\n\",contatore2);\t\r\n\t char cpy3[contatore-1];\t \r\n\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t//printf(\"blo\"); \t\r\n\t\tcpy3[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero3=atoi(cpy3);\r\n\t printf(\"numero3 %d\\n\",numero3);\r\n\t ii=0;\r\n\t char cpy4[contatore2-contatore-1];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy4[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero4= atoi(cpy4);\r\n\t printf(\"numero4 %d\\n\",numero4);\r\n\r\n\t\tfor (i=numero1;i<=numero2;i++)\r\n\t\t{\r\n \t\t whoactionbeg[i][contatore3[i]]=numero3;\r\n\t\t whoactionend[i][contatore3[i]]=numero4;\t \r\n\r\n\r\n\t\t}\t\t\r\n\r\n \t\r\n \t\t\t \r\n\r\n\r\n//codice.............................\r\n fscanf(f1,\"%s\",stringa);\r\n\r\n\t contatore=0;\r\n\t contatore2=0;\t\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\r\n\t char cpy5[contatore-1];\t\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpy5[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero5= atoi(cpy5);\r\n\t printf(\"numero5 %d\\n\",numero5);\r\n\r\n\t ii=0;\r\n\t char cpy6[contatore2-contatore-1];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy6[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero6= atoi(cpy6);\r\n\t printf(\"numero6 %d\\n\",numero6);\r\n\tfor (i=numero1;i<=numero2;i++)\r\n\t{\r\n \r\n\t begintime[i][contatore3[i]]=numero5;\r\n\t endtime[i][contatore3[i]]=numero6;\t \r\n\t}\r\n\r\n fscanf(f1,\"%s\",stringa);\r\n\t printf(\"blaxx %s\",stringa);\r\n\t if ((stringa[0]=='E') && (stringa[5]=='R')) \r\n\t {\r\n\t \tlook='F';\r\n // fscanf(f1,\"%s\",stringa);\r\n\t\t\r\n\t \t\r\n\t\t}\r\n\t // if (stringa[0]!='E') fscanf(f1,\"%s\",stringa);\r\n \r\n\t printf(\"look %c\",look);\t\r\n\t \t\r\n\t\t\t\r\n\t\r\n//codice.................................\r\n }\r\n\t }\t \t\r\n\t \r\n // printf(\"stringa %s\",cpy);\r\n \r\n \t\r\n\t \r\n\t\t \t\t\t\r\n\t\t\r\n\r\n\t}\r\n\t\r\n//********************* to put IDFOR here ***************************//\r\n//*********************************************************************//\t\r\n\tif ((stringa[0]=='I') && (stringa[1]=='D') && (stringa[2]=='F') && (stringa[5]=='A'))\r\n\t{ \r\n \t fscanf(f1,\"%s\",stringa);\r\n\t contatore=0;\r\n\t contatore2=0;\r\n\t printf(\"stringa %s\",stringa);\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\t\r\n\t char cpyb[contatore];\t\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpyb[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero1= atoi(cpyb);\r\n\t printf(\"numero1 %d\\n\",numero1);\r\n\t ii=0; \r\n\t char cpy2b[contatore2-1-contatore];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy2b[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero2= atoi(cpy2b);\r\n printf(\"numero2 %d\\n\",numero2);\r\n\t fscanf(f1,\"%s\",stringa);\r\n\t int numero3 = atoi(stringa);\r\n\t fscanf(f1,\"%s\",stringa); \r\n\t double numero6 = atof(stringa);\r\n\r\n \t fscanf(f1,\"%s\",stringa);\r\n\t contatore=0;\r\n\t contatore2=0;\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\t\r\n \t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpyb[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero4= atoi(cpyb);\r\n\t printf(\"numero1 %d\\n\",numero1);\r\n\t ii=0; \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy2b[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t \r\n\t int numero5=atoi(cpy2b);\r\n\t for (i=numero1;i<=numero2;i++)\r\n\t {\r\n\t for (j=numero4;j<=numero5;j++)\r\n\t {\r\n printf(\"modifico %d\",i);\r\n connectactive[i][j]=numero3;\r\n lengthconnectactive[i][j]=numero6;\r\n \r\n//compilare connectionactive\r\n\r\n }\r\n }\r\n\t \r\n\r\n} //chiude IDF\r\n \r\n\t \r\n\r\n//**********************************************************************************************\r\n\r\n//If***********************************************\r\n//***************************************************\r\n\r\n\tif ((stringa[0]=='I') && (stringa[1]=='F'))\r\n\t{ \r\n\t look='V';\r\n\t fscanf(f1,\"%s\",stringa);\r\n\t contatore=0;\r\n\t contatore2=0;\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore %d\\n\",contatore);\t\r\n\t char cpy[contatore];\t\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t \t\r\n\t\tcpy[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero1= atoi(cpy);\r\n\t printf(\"numero1 %d\\n\",numero1);\r\n\t ii=0; \r\n\t char cpy2[contatore2-1-contatore];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy2[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero2= atoi(cpy2);\r\n\t printf(\"numero2 %d\\n\",numero2);\r\n\t fscanf(f1,\"%s\",stringa);\r\n\t look='V';\r\n\t printf(\"blammmm\");\t\r\n//**********************************************************************************************\r\n\t if ((stringa[0]=='M') && (stringa[1]=='I') && (stringa[2]=='N') && (stringa[3]=='D'))\r\n\t {\t\r\n\t printf(\"blavvv\");\t\r\n while (look=='V')\r\n {\r\n \r\n\t\tfscanf(f1,\"%s\",stringa);\r\n\t\tprintf(\"STR %s\",stringa);\r\n\t\tnumber2=atof(stringa);\r\n\t\tprintf(\"potenziale %f\",number2);\t\r\n\t\tfor (i=numero1;i<=numero2;i++)\r\n\t\t{\r\n\t\t\tcontatore4[i]=0;\r\n\t\t while (distif[i][contatore4[i]]!=0)\r\n\t\t\t{\r\n\t\t\t\tcontatore4[i]++;\r\n }\r\n distif[i][contatore4[i]]=number2;\r\n // printf(\"sioioioioioioioioioioioioioioio\");\r\n \r\n \r\n \t\t}\t\t\r\n\t \r\n \r\n fscanf(f1,\"%s\",stringa);\r\n\t printf(\"str %s\",stringa);\r\n\t contatore=0;\r\n\t contatore2=0;\t\r\n\t while (stringa[contatore]!=',') contatore++;\r\n\t while (stringa[contatore2]!=')') contatore2++;\r\n\t printf(\"contatore2 %d\\n\",contatore2);\t\r\n\t char cpy3[contatore-1];\t \r\n\r\n\t for (i=1;i<=contatore;i++) \r\n\t {\t\r\n\t\t//printf(\"blo\"); \t\r\n\t\tcpy3[i-1]=stringa[i];\r\n\t }\r\n\r\n\t int numero3=atoi(cpy3);\r\n\t printf(\"numero3 %d\\n\",numero3);\r\n\t ii=0;\r\n\t char cpy4[contatore2-contatore-1];\t \r\n\t for (i=contatore+1;i<=contatore2;i++)\r\n\t {\t\r\n\t\tcpy4[ii]=stringa[i];\r\n\t\tii++;\t\r\n\t\t}\r\n\t int numero4= atoi(cpy4);\r\n\t printf(\"numero4 %d\\n\",numero4);\r\n\r\n\t\tfor (i=numero1;i<=numero2;i++)\r\n\t\t{\r\n \t\t begintif[i][contatore4[i]]=numero3;\r\n\t\t endintif[i][contatore4[i]]=numero4;\t \r\n\r\n\r\n\t\t}\t\t\r\n//codice.............................\r\n fscanf(f1,\"%s\",stringa);\r\n\t printf(\"blaxx %s\",stringa);\r\n\t if ((stringa[0]=='E') && (stringa[4]=='F')) \r\n\t {\r\n\t \tlook='F';\r\n // fscanf(f1,\"%s\",stringa);\r\n\t\t\r\n\t \t\r\n\t\t}\r\n\t // if (stringa[0]!='E') fscanf(f1,\"%s\",stringa);\r\n \r\n\t printf(\"look %c\",look);\t\r\n\t \t\r\n\t\t\t\r\n\t\r\n//codice.................................\r\n }\r\n\t }\t \t\r\n\t \r\n // printf(\"stringa %s\",cpy);\r\n \r\n \t\r\n\t \r\n\t\t \t\t\t\r\n\t\t\r\n\r\n\t}\r\n\r\n\r\n\r\n\r\n\r\n\t}\r\nfclose(f1);\r\nstringa[0]='x';\r\n\r\nFILE *f10 = fopen(\"pos.aut\",\"r\");\r\ncontatore=0;\r\n\r\n\r\nwhile (stringa[0]!='*')\r\n{\r\n\tcontatore++;\r\n \r\nfor (w=1;w<=settings[0];w++)\r\n{\r\n\tfscanf(f10,\"%s\",stringa);\r\n\tx[contatore][w]=atof(stringa);\r\n\r\n}\r\n \r\n\r\n}\r\n// parte lettura codice \r\n\r\n// fclose(f2);\r\n// fclose(f3);\r\n// fclose(f4);\t\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.3901098966598511,
"alphanum_fraction": 0.4432234466075897,
"avg_line_length": 15.533333778381348,
"blob_id": "0a13dfa89bac860a9b60976ebe00640a62438235",
"content_id": "dc86dd8f110018b99cbf9140dad385642d708e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 30,
"path": "/v2in2017/helicopters/makepos.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\n\r\n\r\nf1=open(\"pos.aut\",\"w\")\r\nangle=0\r\nconst=0\r\n\r\nfor j in range(3):\r\n for i in range(4):\r\n \r\n x=math.cos(angle)+j\r\n y=math.sin(angle)+j\r\n z=j\r\n angle+=math.pi/2\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n f1.write(str(z)+str(\"\\n\"))\r\n \r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"1\\n\")\r\nf1.write(\"1\\n\")\r\nf1.write(\"1\\n\")\r\nf1.write(\"2\\n\")\r\nf1.write(\"2\\n\")\r\nf1.write(\"2\\n\")\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n \r\n\r\n \r\n"
},
{
"alpha_fraction": 0.40220049023628235,
"alphanum_fraction": 0.4621026813983917,
"avg_line_length": 17.463415145874023,
"blob_id": "b920e5a6bfb598add26e4a84efcff0ee806e17b9",
"content_id": "bbddbb463b24a672bf3da9809fc9aa37bd904bd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 41,
"path": "/v2in2017/cellformation/makepos.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\nimport random\r\n\r\nf1=open(\"pos.aut\",\"w\")\r\nangle=0\r\nconst=0\r\ncont=0\r\nf1.write(\"0\\n\")\r\nf1.write(\"0\\n\")\r\nfor i in range(19):\r\n x=random.uniform(-1,1)\r\n y=random.uniform(-1,1)\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n\r\nfor j in range(2):\r\n for i in range(4):\r\n \r\n x=cont*math.cos(angle)\r\n y=cont*math.sin(angle)\r\n #z=cont*0.25\r\n #z=j\r\n angle+=math.pi/4.0\r\n f1.write(str(x)+str(\"\\n\"))\r\n f1.write(str(y)+str(\"\\n\"))\r\n #f1.write(str(z)+str(\"\\n\"))\r\n cont+=0.5\r\n\r\ncont=0\r\n#f1.write(\"0\\n\")\r\n#f1.write(\"0\\n\")\r\n#f1.write(\"0\\n\")\r\n#f1.write(\"1\\n\")\r\n#f1.write(\"1\\n\")\r\n#f1.write(\"1\\n\")\r\n#f1.write(\"2\\n\")\r\n#f1.write(\"2\\n\")\r\n#f1.write(\"2\\n\")\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n \r\n\r\n \r\n"
},
{
"alpha_fraction": 0.4901960790157318,
"alphanum_fraction": 0.5424836874008179,
"avg_line_length": 10.777777671813965,
"blob_id": "7a6da0521efea8aad26e70a56330448f1a8a8422",
"content_id": "7023f0390fe3f30a5c8e83925daa336c474ec7c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 36,
"path": "/v2in2017/sheepanddogs/make.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import random\r\nimport math\r\nf1=open(\"pos.aut\",\"w\")\r\nk=0\r\n\r\nfor i in range(20):\r\n\tx=random.uniform(-2,2)\r\n\ty=random.uniform(-2,2)\r\n\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n \r\n \r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n\t\r\nf1=open(\"vinc.aut\",\"w\")\r\ncont=0\r\ninc=0.025\r\nr=4\r\nwhile cont<2*math.pi:\r\n\t\r\n\tx=r*math.cos(cont)\r\n\ty=r*math.sin(cont)\r\n\tcont+=inc\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\n\r\n \r\n\r\nf1.write(\"*\\n\")\r\nf1.close() "
},
{
"alpha_fraction": 0.49210527539253235,
"alphanum_fraction": 0.5657894611358643,
"avg_line_length": 15.952381134033203,
"blob_id": "dde97394e493afb0be1be94e928e59b8e1bd2cd4",
"content_id": "4f2e62fa5ec4bffbfc1f44d4720a81d0d7fe83d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 21,
"path": "/v2in2017/ex2/cerchio.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\nimport random \r\nf1=open(\"pos.aut\",\"w\")\r\n\r\nteta=0\r\nwhile teta<6.28:\r\n\tteta=teta+6.28/20\r\n\tx=2*math.cos(teta)\r\n\ty=2*math.sin(teta)\r\n \tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n \r\nfor i in range(20):\r\n\tx=random.uniform(-0.5,0.5)\r\n\ty=random.uniform(-0.5,0.5)\r\n \tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n\t"
},
{
"alpha_fraction": 0.42435774207115173,
"alphanum_fraction": 0.5080875158309937,
"avg_line_length": 13.188405990600586,
"blob_id": "8670645c491ed98422743d53284cfbc862b36479",
"content_id": "188891ede57a7793f2ef4c0511b082a5467bc507",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1051,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 69,
"path": "/v2in2017/ex8/make.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import random\r\nimport math\r\nf1=open(\"pos.aut\",\"w\")\r\n \r\nk=-4.5\r\nfor i in range(20):\r\n\tx=random.uniform(-2+k,-1)\r\n\ty=random.uniform(3,5)\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\nf1.write(\"-3\\n\")\r\nf1.write(\"0.5\\n\")\r\nf1.write(\"0\\n\")\r\nf1.write(\"0.5\\n\")\r\nf1.write(\"3\\n\")\r\nf1.write(\"0.5\\n\")\r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n\t\r\nf1=open(\"vinc.aut\",\"w\")\r\nalfa=0\r\n\r\n \r\n \r\nfor i in range(30):\r\n\ty=0\r\n\tx=(i)/10.0+k\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n \r\nfor i in range(30):\r\n\ty=0+(i+1)/10.0\r\n\tx=3+k\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\nfor i in range(30):\r\n\ty=0\r\n\tx=3+(i)/10.0+k\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\nfor i in range(30):\r\n\ty=0+(i+1)/10.0\r\n\tx=6+k\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\nfor i in range(60):\r\n\ty=0\r\n\tx=6+(i)/10.0+k\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\nfor i in range(30):\r\n\ty=0+(i+1)/10.0\r\n\tx=9+k\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\r\n\r\n\r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\t\r\n"
},
{
"alpha_fraction": 0.49166667461395264,
"alphanum_fraction": 0.5458333492279053,
"avg_line_length": 12.117647171020508,
"blob_id": "e87b644ebfebb462689d72ebbbee2efc87675a25",
"content_id": "873382f5e59071bfbee154dfde6e3ceed3e38659",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 17,
"path": "/v2in2017/ex4/casualscript.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "\r\nimport random\r\nf1=open('pos.aut','w')\r\n\r\nfor i in range(20):\r\n\t\r\n\tx=random.uniform(-1,1)\r\n\ty=random.uniform(-1,1)\r\n\tz=random.uniform(-1,1)\r\n\tf1.write(str(x)+str('\\n'))\r\n\tf1.write(str(y)+str('\\n'))\r\n \r\n \r\n\r\nf1.write('*\\n')\r\n\r\n\r\nf1.close()"
},
{
"alpha_fraction": 0.5605095624923706,
"alphanum_fraction": 0.6114649772644043,
"avg_line_length": 13.5,
"blob_id": "e040c4fd4a2d405adf2b8fb1456ad0dd78cd9258",
"content_id": "e7b81a16963b30ba3ece3567e87c3ff9ecd9b147",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 10,
"path": "/v3in2017/automata.h",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "struct automa\r\n{\r\n\t\tfloat x,y,z;\r\n\t\tfloat maxstep;\r\n\t\tint typpot[20];\r\n\t\tfloat begintime[20];\r\n\t\tfloat endtime[20];\r\n}; \r\n\r\nextern struct automa aut[10]; \r\n "
},
{
"alpha_fraction": 0.39559870958328247,
"alphanum_fraction": 0.4437730610370636,
"avg_line_length": 25.2822322845459,
"blob_id": "be1cbd59914837d0416a9916d7ea644f228fb0c4",
"content_id": "952bac75f8c67ef75ea8c2afb138e885c0a15beb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 42535,
"license_type": "no_license",
"max_line_length": 424,
"num_lines": 1559,
"path": "/v3in2017/calculate.c",
"repo_name": "paolopoli1980/missp",
"src_encoding": "MacCentralEurope",
"text": "#include <stdio.h>\r\n#include <stdlib.h>\r\n#include <math.h>\r\n#include <string.h>\r\n\r\n\r\n\r\nvoid seekprob(int np,int n,float dim,double x[][10],double xmov[][10],double prob[],int typpot[][np],int whoactionbeg[][np],int whoactionend[][np],int begintime[][np],int endtime[][np],int t,int v,float settings[],int z,double zeroset[],double minimi[],int begif[][np],int endif[][np],int begintif[][np],int endintif[][np],double distif[][np],int fixed[],int ** connectactive,double **lengthconnectactive,int preypredator[])\r\n{\r\n \r\n \r\n \tint h,contatore,casual,number,w,contaprob,dd,o,ff,contatoreif,metriconif,ibeg,entif,contdim,nnn,l,kk,nomove,ii,memgredyent;\t\r\n\tchar row[40],row2[40],namefile[40],stringnumb[40],stringa[40];\r\n\tfloat distx,distmem,distx2,distmem2,term,metricx,metricx2,vincdist,vincpar,incvinc,numrow,minimal2;\r\n double somprob,probtot,probsce,probnow,probafter,flotcasual,risnow,risafter,distvirt,cutoff,distcut,distcut2,num[20],param[(int)dim+1],memvinc[(int)dim+1],coef[20],vinc[10],xold[n+1][10],minimal,vincminimal,metricaconif,adj,distconnectactive,geodeticvers[20],divisorgeodetic,ptot;\r\n\r\n FILE *f20 = fopen(\"posprob.aut\",\"a\");\r\n\tFILE *f30 = fopen(\"movtable.aut\",\"a\");\r\n\tFILE *f35 = fopen(\"tableprob.aut\",\"a\");\r\n\r\n \th=1;\r\n\tsomprob=0;\r\n\tprobafter=0;\r\n\tprobnow=0;\t\r\n\trisnow=0;\r\n\trisafter=0;\r\n\tmetricx=0;\r\n\tmetricx2=0;\r\n\tint contsom=0;\t\r\n\t\r\n\tfor (h=1;h<=n;h++)\r\n\t{\r\n \trisnow=0;\r\n\trisafter=0;\r\n \tcontatore=0;\r\n\tif (h!=v)\r\n\t{\r\n\twhile ((typpot[h][contatore])!=0)\r\n\t{\r\n\tdistmem=1000000000000;\r\n\tdistmem2=100000000000;\r\n \r\n \r\n//\tprintf(\"nelemento %d\",v); \r\n//******************************************************************************************\r\n//************************inizio la scelta dei potenziali***********************************\r\n//******************************************************************************************\r\n\tif ((v>=whoactionbeg[h][contatore]) && (v<=whoactionend[h][contatore]) && (t>=begintime[h][contatore]) && (t<=endtime[h][contatore]))\r\n\t{\r\n \tif (typpot[h][contatore]>0)\t\r\n\t\t{\r\n\t\tif (settings[8]==0) strcpy(namefile,\"potential.aut\");\r\n\t\tif (settings[8]>0) strcpy(namefile,\"pot\");\r\n \t\tsprintf(stringnumb, \"%d\", typpot[h][contatore]);\r\n\t\tif (settings[8]>0) strcat(namefile,stringnumb);\r\n\t\tif (settings[8]>0) strcat(namefile,\".aut\");\r\n\t\t}\r\n //******************************************************************************************************\r\n //*************************potenziale gaussiano*********************************************************\r\n//********************************************************************************************************\r\n\tif ((typpot[h][contatore]<=-1) && (typpot[h][contatore]>-100)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n \t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n \t\t\tdistcut2=0;\r\n \t\t//\tprintf(\"cutoff %f\",cutoff);\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \r\n \r\n\t\t}\r\n \t\tdistcut=sqrt(distcut);\r\n distcut2=sqrt(distcut2);\r\n\t\tprobnow=param[0]*exp(risnow);\r\n\t\tprobafter=param[0]*exp(risafter);\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=param[0]*exp(risafter);\r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n } \r\n \t}\r\n \tfclose(f40);\t\r\n\t}// fine potmem1 (guassian pot)\r\n//*************************************************************************************************\r\n//*********************************potenziale arctan centrale*********************************************************************\r\n//*************************************************************************************************\t\r\n if ((typpot[h][contatore]<=-101) && (typpot[h][contatore]>-200)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n\t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n \twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n \t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n distcut2=0;\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \t\t}\r\n \tdistcut=sqrt(distcut);\r\n \tdistcut2=sqrt(distcut2);\r\n \t\tprobnow=param[0]*atan(sqrt(risnow));\r\n\t\tprobafter=param[0]*atan(sqrt(risafter));\r\n\t\tif (probnow<0) probnow=-probnow;\r\n\t\tif (probafter<0) probafter=-probafter;\r\n if (cutoff<distcut) probafter=0.000000000000001; \r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=param[0]*atan(sqrt(risafter));\r\n\t\tif (probafter<0) probafter=-probafter;\r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n \t \t\t\t\r\n\t\t\t }\r\n\t }\r\n\r\n \t}\t\r\n }\r\n\t\tfclose(f40);\t\t\r\n\t}// fine potmem2 (arctang pot)\r\n//************************************************************************\r\n//********************potenziale quadrico********************************\r\n//************************************************************************\r\n\tif ((typpot[h][contatore]<=-201) && (typpot[h][contatore]>-300)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n\t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n \t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=1;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n \t}\t\t\t\t\t\r\n \t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n distcut2=0;\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \t}\r\n\r\n \tdistcut=sqrt(distcut);\r\n \tdistcut2=sqrt(distcut2);\r\n\t\tprobnow=(risnow);\r\n\t\tprobafter=(risafter);\r\n if (cutoff<distcut) probafter=0;\r\n if (cutoff<distcut2) probnow=0;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=risafter;\r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n\t\t\t\t\t\r\n \t \t\t\t\r\n\t\t\t }\r\n\t\t\r\n\r\n\t }\r\n\r\n \t}\t\r\n }\r\n\t\tfclose(f40);\t\t\r\n\t}// fine potmem3 (quadrica pot)\r\n//*********************************************************************\r\n//********************potenziale sinusoidale***************************\r\n//**********************************************************************\r\n\tif ((typpot[h][contatore]<=-301) && (typpot[h][contatore]>-400)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n\t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n \t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n distcut2=0;\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \t}\r\n \tdistcut=sqrt(distcut);\r\n distcut2=sqrt(distcut2); \r\n \tprobnow=param[0]*sin(sqrt(risnow));\r\n\t\tprobafter=param[0]*sin(sqrt(risafter));\r\n\t\tif (probnow<0) probnow=-probnow;\r\n\t\tif (probafter<0) probafter=-probafter;\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=param[0]*sin(sqrt(risafter));\r\n\t\tif (probafter<0) probafter=-probafter;\r\n }\r\n\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n \t \t\t\t\r\n\t\t\t }\r\n\t\t\r\n\r\n \t }\r\n\r\n \t\t}\t\r\n }\r\n\t\tfclose(f40);\t\t\r\n \t}\r\n//****************** fine potmem4 (sin)**********************\r\n//********************************************************\r\n//**********************potenziale sinc*******************\r\n//*********************************************************\r\n\tif ((typpot[h][contatore]<=-401) && (typpot[h][contatore]>-500)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n\t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n \t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n \t\t\tdistcut2=0; \r\n\t\tfor (w=1;w<=dim;w++)\r\n {\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \t\t}\r\n \tdistcut=sqrt(distcut);\r\n \tdistcut2=sqrt(distcut2);\r\n \tprobnow=param[0]*sin(sqrt(risnow))/sqrt(risnow);\r\n\t\tprobafter=param[0]*sin(sqrt(risafter))/sqrt(risafter);\r\n\t\tif (probnow<0) probnow=-probnow;\r\n\t\tif (probafter<0) probafter=-probafter;\r\n if (cutoff<distcut) probafter=0.000000000000001;;\r\n if (cutoff<distcut2) probnow=0.000000000000001;;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) \r\n {\r\n probafter=param[0]*sin(sqrt(risafter))/sqrt(risafter);\r\n\t\t if (probafter<0) probafter=-probafter;\r\n } \r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n \t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n\t \t\t\t\r\n\t\t\t }\r\n\t\t\r\n\r\n \t }\r\n\r\n\t\t}\t\r\n }\r\n\t\tfclose(f40);\t\t\r\n \t}// *******fine potmem4 (sinc)*******************\r\n //*********************potmem******************************\r\n //**********************1/(x**2+1)**************************\r\n //************************************************************\r\n\tif ((typpot[h][contatore]<=-501) && (typpot[h][contatore]>-600)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n\t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n \t\tfor(w=0;w<=dim;w++)\r\n \t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n \t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n distcut2=0;\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t}\r\n \tdistcut=sqrt(distcut);\r\n \tdistcut2=sqrt(distcut2);\r\n\t\tprobnow=param[0]/(risnow+1);\r\n\t\tprobafter=param[0]/(risafter+1);\r\n\t\tif (probnow<0) probnow=-probnow;\r\n\t\tif (probafter<0) probafter=-probafter;\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) \r\n {\r\n \t\tprobafter=param[0]/(risafter+1);\r\n \tif (probafter<0) probafter=-probafter;\r\n } \r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n \t \t\t\t\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n\r\n \t\t}\t\r\n }\r\n\t\tfclose(f40);\t\t\r\n\t}// fine potmem4 (1/x**2+1)\r\n //******************************************************************************************************\r\n //*************************potenziale exp(ax+by+cz+......)*********************************************************\r\n//********************************************************************************************************\r\n\tif ((typpot[h][contatore]<=-601) && (typpot[h][contatore]>-700)) \r\n\t{\r\n \t double mod[20];\r\n \tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n \t\trow[0]='q';\r\n \trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n \twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n \t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n \t}\t\t\t\t\t\r\n\t\tfor(w=1;w<=dim;w++) //da sistemare e vedere per i moduli\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tmod[w]=atof(row);\r\n \t}\t\t\t\t\t\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n \t\t\tdistcut2=0;\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n if (mod[w]==0)\r\n { \r\n \t risnow=risnow+param[w]*(x[v][w]-x[h][w]);\r\n\t\t risafter=risafter+param[w]*(xmov[z][w]-x[h][w]);\r\n }\r\n if (mod[w]==1)\r\n { \r\n \t risnow=risnow+param[w]*abs((x[v][w]-x[h][w]));\r\n\t\t risafter=risafter+param[w]*abs((xmov[z][w]-x[h][w]));\r\n }\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n }\r\n \t\tdistcut=sqrt(distcut);\r\n distcut2=sqrt(distcut2);\r\n\t\tprobnow=param[0]*exp(risnow);\r\n\t\tprobafter=param[0]*exp(risafter);\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=param[0]*exp(risafter);\r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n \t \t\t\t\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n\r\n } \r\n \t}\r\n \tfclose(f40);\t\r\n\t}// fine potmem7 (exp potential)\r\n\tif ((typpot[h][contatore]<=-701) && (typpot[h][contatore]>-800)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n\t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n \t\tfor(w=0;w<=dim;w++)\r\n \t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n \t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n distcut2=0;\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t}\r\n \tdistcut=sqrt(distcut);\r\n \tdistcut2=sqrt(distcut2);\r\n\t\tprobnow=param[0]*sqrt(risnow)/(risnow+1);\r\n\t\tprobafter=param[0]*sqrt(risafter)/(risafter+1);\r\n\t\tif (probnow<0) probnow=-probnow;\r\n\t\tif (probafter<0) probafter=-probafter;\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) \r\n {\r\n \t\tprobafter=param[0]*sqrt(risafter)/(risafter+1);\r\n \tif (probafter<0) probafter=-probafter;\r\n } \r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n \t \t\t\t\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n\r\n \t\t}\t\r\n }\r\n\t\tfclose(f40);\t\t\r\n\t}// fine potmem4 (x/x**2+1)\t\r\n\r\n\tif ((typpot[h][contatore]<=-801) && (typpot[h][contatore]>-900)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n \t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n \t\t\tdistcut2=0;\r\n \t\t//\tprintf(\"cutoff %f\",cutoff);\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \r\n \r\n\t\t}\r\n \t\tdistcut=sqrt(distcut);\r\n distcut2=sqrt(distcut2);\r\n\t\tprobnow=param[0]*exp((risnow-(int)risafter)*2*3.1416)*sin(risnow);\r\n\t\tprobafter=param[0]*exp((risafter-(int)risafter)*2*3.1416)*sin(risafter);\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=param[0]*exp((risafter-(int)risafter)*2*3.1416)*sin(risafter);\r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n } \r\n \t}\r\n \tfclose(f40);\t\r\n\t}// fine potmem1 (guassian wave)\r\n\r\n\tif ((typpot[h][contatore]<=-901) && (typpot[h][contatore]>-1000)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n \t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n \t\t\tdistcut2=0;\r\n \t\t//\tprintf(\"cutoff %f\",cutoff);\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \r\n \r\n\t\t}\r\n \t\tdistcut=sqrt(distcut);\r\n distcut2=sqrt(distcut2);\r\n\t\tprobnow=param[0]*exp(-risnow)*risnow;\r\n\t\tprobafter=param[0]*exp(-risafter)*risafter;\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=param[0]*exp(-risafter)*risafter;\r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n } \r\n \t}\r\n \tfclose(f40);\t\r\n\t}// fine potmem1 (x2e-x2)\r\n\tif ((typpot[h][contatore]<=-1001) && (typpot[h][contatore]>-1100)) \r\n\t{\r\n \t\tFILE *f40 = fopen(\"potmem.aut\",\"r\");\r\n \t\trow[0]='q';\r\n\t\trisnow=0;\r\n\t\trisafter=0;\r\n\t\twhile (row[0]!='*')\r\n\t\t{\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\twhile ((row[0]!='P') && (row[0]!='*'))\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t}\r\n\t\tfscanf(f40,\"%s\",row);\r\n \r\n\t\tnumber=atoi(row)*(-1);\r\n \t\tif (number==typpot[h][contatore])\r\n\t\t{\r\n\t\tfor(w=0;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n\t\t\tparam[w]=atof(row);\r\n\t\t}\t\t\t\t\t\r\n\t\t\tfscanf(f40,\"%s\",row);\r\n \t\t\tcutoff=atof(row);\r\n \t\t\tdistcut=0;\r\n \t\t\tdistcut2=0;\r\n \t\t//\tprintf(\"cutoff %f\",cutoff);\r\n \t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\r\n\t\t\trisnow=risnow+param[w]*(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n\t\t\trisafter=risafter+param[w]*(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\t\t\t\r\n distcut=distcut+(xmov[z][w]-x[h][w])*(xmov[z][w]-x[h][w]);\r\n distcut2=distcut2+(x[v][w]-x[h][w])*(x[v][w]-x[h][w]);\r\n \r\n \r\n\t\t}\r\n \t\tdistcut=sqrt(distcut);\r\n distcut2=sqrt(distcut2);\r\n\t\tprobnow=param[0]*exp(-risnow)*risnow*risnow;\r\n\t\tprobafter=param[0]*exp(-risafter)*risafter*risafter;\r\n if (cutoff<distcut) probafter=0.000000000000001;\r\n if (cutoff<distcut2) probnow=0.000000000000001;\r\n if (settings[16]>0)\r\n {\r\n if ((distcut2>cutoff) && (distcut<cutoff)) probafter=0.000000000000001;\r\n if ((distcut2<cutoff) && (distcut>cutoff)) probafter=param[0]*exp(-risafter)*risafter*risafter;\r\n }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n } \r\n \t}\r\n \tfclose(f40);\t\r\n\t}// fine potmem1 (x4e-x2)\r\n\r\n\r\n\r\n//*****************************************************************************\r\n//******************potenziale da caricare tramite file************************\r\n//*****************************************************************************\r\n//********spiegazione del cut off per i potenziali impostati***********************\r\n//---------------------------------------------------------------------------------\r\n\tif (typpot[h][contatore]>0)\r\n\t{\r\n\tFILE *f3 = fopen(namefile,\"r\");\r\n row[0]='q';\r\n\twhile ((row[0]!='e'))\r\n\t{\r\n \t\tfscanf(f3,\"%s\",row);\r\n\t\tif (row[0]=='P')\r\n\t\t{\r\n \t\t\tfscanf(f3,\"%s\",row);\r\n\t\t\tnumber=atoi(row);\r\n \t\t\tfscanf(f3,\"%s\",row);\r\n\t\t\tcutoff=atoi(row);\r\n\t\t\tif (number==typpot[h][contatore])\r\n\t\t\t{\t \r\n //******************************implemento per dimensione generica************************************************\r\n\t\t\t\t \twhile(row[0]!='*')\r\n\t\t\t\t\t{\r\n \t \t\t\t\tfor (w=1;w<=dim;w++)\r\n\t\t\t\t {\r\n \t\t\t\t\t\tif (row[0]!='*')\r\n\t\t\t\t\t\t\t{\t\t\r\n\t\t\t\t\t\t\tfscanf(f3,\"%s\",row);\r\n\t\t\t\t\t\t\tif (row[0]!='*')\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tnum[w]=atof(row);\r\n\t\t\t\t\t\t\tnum[w]=num[w]+x[h][w];\r\n \t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\t\r\n\t\t\t\t\t\tdistx=0;\r\n\t\t\t\t\t\tdistx2=0;\r\n\t\t\t\t\t\tmetricx=0;\r\n\t\t\t\t\t\tmetricx2=0;\r\n\t\t\t\t\t\tdistcut=0;\r\n\t\t\t\t\t\tdistcut2=0;\r\n\t\t\t\t\t\tif (row[0]!='*') \r\n \t\t\t {\r\n \t\t\t\t fscanf(f3,\"%s\",row);\r\n\t\t\t\t\t\t fscanf(f3,\"%s\",row2);\r\n\t\t\t\t\t\t cutoff=atof(row2);\r\n \t\t\t\t }\r\n \t\t \t\t\t\tfor (w=1;w<=dim;w++)\r\n\t\t {\r\n \t\t\t\t\tmetricx=metricx+(num[w]-x[v][w])*(num[w]-x[v][w]);\r\n\t\t\t\t\t\tmetricx2=metricx2+(num[w]-xmov[z][w])*(num[w]-xmov[z][w]);\r\n \t\t\t\t\t\t}\r\n \t\t\t\t\tdistx=sqrt(metricx);\r\n\t\t\t\t\t\tdistx2=sqrt(metricx2);\r\n//*****************prende il punto piu vicino alla posizione presente****************\r\n \t\t \t\t\tif (distx<distmem)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tdistmem=distx;\r\n\t\t\t\t\t\t\tprobnow=atof(row);\r\n \t\t\t\t\t\t}\r\n//*****************prende la posiziome piu vicina al posizione scelta*****************************\r\n\t\t\t\t\t\tif (distx2<distmem2)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tdistmem2=distx2;\r\n \t\t\t\t\t\t\tprobafter=atof(row);\r\n \t\t\t\t\t}\r\n \t\t\t\t\t\t}//chiudo while\r\n \t\t\t\t\t\tcontsom++; \r\n \t\t\t\t\t\tdouble pbaftmem=probafter;\r\n\t\t\t\t\t\tdouble pbnowmem=probnow;\t\r\n//*****************per entrambi se esce dal raggio di azione non considera la funzione***********************\r\n\t\t\t\t\t if (cutoff<distmem2) probafter=0.000000000000001;;\r\n \t\t\t\t\tif (cutoff<distmem) probnow=0.000000000000001;;\r\n//**********setta l'opzione \"spingi fuori o lascia fuori************************\r\n//***********se distmem ovvero posizione presente e' ancora dentro il raggio e distmem2 ovvero posizione futura************\r\n//**********e fouri forza fuori con probabilita uguale a quella calcolata in quel punto*************************\r\n\t\t\t\t\t if (settings[16]>0)\r\n \t\t\t\t\t{\r\n \t\t\t\t\tif ((distmem>cutoff) && (distmem2<cutoff)) probafter=0.000000000000001;\r\n \t\t\t\t\tif ((distmem<cutoff) && (distmem2>cutoff)) \r\n \t\t\t\t\t\t{\r\n \t\t\t\t\t\t\tprobafter=pbaftmem; //da capire meglio\r\n\t\t\t\t\t } \r\n \t\t\t }\r\n if (preypredator[h]!=0)\r\n\t\t{\r\n\t\t if (settings[13]!=1000)\r\n\t\t {\r\n\t\t\t\r\n\t \t\tprob[0]=prob[0]+probnow;\r\n\t \t\tprob[z]=prob[z]+probafter;\r\n\t \t }\r\n\t \t\tif (settings[13]==4)\r\n\t \t\t{\r\n\t\t\t\tif (prob[0]==0)\r\n\t\t\t\t{\r\n\t\t\t\t\tprob[0]=probnow;\r\n\t\t\t\t}\r\n\t\t\t\t\t\r\n\t \t\t\tif ((prob[0]!=0) && (probnow<prob[0]))\r\n\t\t\t\t {\r\n\t\t\t\t \r\n\t\t\t\t memgredyent=h;\r\n\t\t\t\t prob[0]=probnow;\r\n\t \t\t }\r\n \t \t\t\t\r\n\t\t\t }\r\n\t\t\r\n \t }\r\n\r\n \t\t\t}\r\n \t\t\t}//chiudo pot\r\n\t\t}//chiudo row 2* \r\n\t\tfclose(f3);\r\n\t}//chiudo if typpot\r\n \t\t//anche qui nello spostamento cambierai le dimensioni e scriverai sui diversi files\r\n\t}\r\n contatore++;\r\n// printf (\"connectactive %d\",connectactive[v][h]);\r\n \r\n if (connectactive[v][h]==0)\r\n {\r\n \r\n prob[0]=prob[0]-probnow+0.0000000000000001;\r\n prob[z]=prob[z]-probafter+0.000000000000001;\r\n } \r\n if (connectactive[v][h]>0) \r\n {\r\n // printf(\"connect[8] %d\",connectactive[1][8]);\r\n\r\n distconnectactive=0;\r\n for (w=1;w<=dim;w++)\r\n {\r\n distconnectactive=distconnectactive+(x[h][w]-x[v][w])*(x[h][w]-x[v][w]);\r\n \r\n }\r\n distconnectactive=sqrt(distconnectactive);\r\n if (distconnectactive<lengthconnectactive[v][h]) \r\n {\r\n connectactive[v][h]--;\r\n // printf (\"connectactive %f\",connectactive[v][h]);\r\n }\r\n }\r\n\r\n\t}//chiudo while typpot\r\n\t}//chiudo if h div\r\n }\r\n//************* setting[15] esclude la considerazione del punto presente nel calcolo delle scelte per il futuro movimento***************\r\n if (settings[15]>0)\r\n {\r\n prob[0]=0.0000000001;\r\n if (settings[13]==2) prob[0]=10000000000000000;\r\n } \r\n//*************************settings[9] Ť il numero di possibili scelte************************ \r\n\t\tif (z==settings[9])\r\n\t\t{\r\n \t\tif (settings[12]==2)\r\n\t\t{\r\n \t\t\tdouble min=1000000000000000;\r\n\t\t\tfor(o=0;o<=settings[9];o++)\r\n\t\t\t{\r\n\t\t\t \tif (min>prob[o]) min=prob[o];\r\n\t\t\t}\r\n\t\t\tfor(o=0;o<=settings[9];o++)\r\n\t\t\t{\r\n\t\t\t \tprob[o]=prob[o]-min;\r\n\t\t\t}\r\n\t\t}\r\n\t\t// mettere opzione parti dal minimo assoluto//***********\r\n//****************** entra solo se ha consideratižo tutti i passi dati nel MAIN**************\r\n \t\tsomprob=0;\r\n if (settings[12]==4) adj=zeroset[v];\r\n if (settings[12]!=4) adj=0;\r\n \t\tfor (dd=1;dd<=settings[9];dd++)\r\n\t\t{\r\n prob[dd]=prob[dd]-adj; \r\n\t\t\tsomprob=somprob+prob[dd];\r\n if ((prob[dd])<minimi[v]) minimi[v]=prob[dd];\r\n \t\t}\r\n prob[0]=prob[0]-adj; \r\n\t\t\tsomprob=somprob+prob[0];\r\n\t\tif (somprob==0)\r\n\t\t{\r\n\t\t\tsomprob=settings[9]*0.01;\r\n\t\t\tfor(o=0;o<=settings[9];o++)\r\n\t\t\t{\r\n\t\t\t\tprob[o]=0.0000000000000000000000000000000000000000000000001;\r\n\t\t\t\tsomprob=somprob+0.0000000000000000000000000000000000000000000000001;\r\n\t\t\t\t}\r\n\t\t}\r\n for (o=1;o<=dim;o++)\r\n {\r\n xold[v][o]=x[v][o];\r\n }\r\n //**************************gestione blocco if********************\r\n contatoreif=0;\r\n metriconif=0;\r\n while (( distif[v][contatoreif]!=0 ) && (contatoreif<=20))\r\n {\r\n metricaconif=0;\r\n for (ibeg=begintif[v][contatoreif];ibeg<=endintif[v][contatoreif];ibeg++)\r\n {\r\n for (contdim=1;contdim<=dim;contdim++)\r\n {\r\n metricaconif=metricaconif+(x[v][contdim]-x[ibeg][contdim])*(x[v][contdim]-x[ibeg][contdim]);\r\n } \r\n if (sqrt(metricaconif)<distif[v][contatoreif])\r\n {\r\n fixed[v]=1; \r\n }\r\n } \r\n contatoreif++;\r\n}\r\n \tif (settings[13]==1) \r\n\t{\r\n \t\tcasual=rand()%1000;\r\n\t\tflotcasual=casual*somprob/1000.0;\r\n\t\tif (flotcasual==0) flotcasual=0.001*somprob/1000.0;\r\n \t\tprobsce=0;\r\n\t\tcontaprob=-1;\r\n \t//*******pezzo salva min prob**********\r\n \twhile (probsce<flotcasual)\r\n\t\t{ \t\r\n\t\t\tcontaprob++;\r\n\t\t\tprobsce=probsce+(prob[contaprob]);\r\n \t}\r\n\t\t\r\n\t\tif (contaprob>0)\r\n\t\t\t{\r\n\t\t\tfor (w=1;w<=dim;w++)\r\n {\r\n \t\t\t\tif (settings[11]!=0)\r\n\t\t\t\t{\r\n\t\t\t\tdistvirt=0;\r\n\t\t\t\tfor (o=1;o<=dim;o++)\r\n\t\t\t\t{\r\n\t\t\t\t\tdistvirt=distvirt+(xmov[contaprob][o])*(xmov[contaprob][o]);\r\n \t\t\t} \r\n\t\t\t\tdistvirt=sqrt(distvirt);\r\n\t\t\t\tif (distvirt<settings[11])\r\n\t\t\t\t{\r\n\t\t\t\t\tx[v][w]=xmov[contaprob][w];\r\n\t\t\t\t}\t \r\n\t\t\t}\r\n\t\t\tif (settings[11]==0)\r\n\t\t\t{\r\n\t\t\t\tx[v][w]=xmov[contaprob][w];\r\n\t\t\r\n\t\t\t}\r\n\t\t\t}\t\r\n\t\t\t}\t\t\t\t \r\n\t}\r\n//**************************modalita minimo*****************************\r\n\t\tif (settings[13]==2) \r\n\t\t{\r\n\t\tminimal=1000000000000000;\r\n \t\tfor (l=0;l<=settings[9];l++)\r\n\t\t{\r\n\t\tif (minimal>prob[l])\r\n\t\t{\r\n \t\tminimal=prob[l];\r\n \t\tnnn=l;\t\r\n\t\t}\t\r\n\t\t}\r\n \t\tfor (w=1;w<=dim;w++)\r\n {\r\n \t\t\t\tif (settings[11]!=0)\r\n\t\t\t\t{\r\n\t\t\t\tdistvirt=0;\r\n\t\t\t\tfor (o=1;o<=dim;o++)\r\n\t\t\t\t{\r\n\t\t\t\t\tdistvirt=distvirt+(xmov[nnn][o])*(xmov[nnn][o]);\r\n\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t} \r\n\t\t\t\tdistvirt=sqrt(distvirt);\r\n\t\t\t\tif (distvirt<settings[11])\r\n\t\t\t\t{\r\n\t\t\t\t\tif (nnn>=0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tx[v][w]=xmov[nnn][w];\r\n\t\t\t\t\t}\r\n\t\t\t\t}\t \r\n\t\t\t}\r\n\t\t\tif (settings[11]==0)\r\n\t\t\t{\r\n\t\t\t\tif (nnn>=0)\r\n\t\t\t\t{\r\n\t\t\t\t\tx[v][w]=xmov[nnn][w];\r\n\t\t\t\t}\t\r\n\t\t\t}\r\n\t\t\t}\t\r\n\t\t}\t\t\t\r\n//***************************modalita massimo*******************************\r\n//**************************************************************************\r\n\t\tif (settings[13]==3) \r\n\t\t{\r\n\t\tminimal=-10000000000000000;\r\n \t\tfor (l=0;l<=settings[9];l++)\r\n\t\t{\r\n\t\tif (minimal<prob[l])\r\n\t\t{\r\n\t\tminimal=prob[l];\r\n\t\tnnn=l;\t\r\n\t\t}\t\r\n\t\t}\r\n \t\tfor (w=1;w<=dim;w++)\r\n {\r\n\t\t\t\tif (settings[11]!=0)\r\n\t\t\t\t{\r\n\t\t\t\tdistvirt=0;\r\n\t\t\t\tfor (o=1;o<=dim;o++)\r\n\t\t\t\t{\r\n\t\t\t\t\tdistvirt=distvirt+(xmov[nnn][o])*(xmov[nnn][o]);\r\n\t\t\t\t\t} \r\n\t\t\t\tdistvirt=sqrt(distvirt);\r\n\t\t\t\tif (distvirt<settings[11])\r\n\t\t\t\t{\r\n\t\t\t\t\tif (nnn>=0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tx[v][w]=xmov[nnn][w];\r\n\t\t\t\t\t}\r\n\t\t\t\t}\t \r\n\t\t\t}\r\n\t\t\tif (settings[11]==0)\r\n\t\t\t{\r\n\t\t\t\tif (nnn>=0)\r\n\t\t\t\t{\r\n\t\t\t\t\tx[v][w]=xmov[nnn][w];\r\n\t\t\t\t}\t\r\n\t\t\t}\r\n\t\t\t}\t\r\n\t\t \r\n\t\t}\t\t\r\n\t\t//\tprintf(\"memgredyent %d\",memgredyent);\r\n\t\t\r\n//***************************modalita media*******************************\r\n//**************************************************************************\r\n\t\tif (settings[13]==4) \r\n\t\t{\r\n\t\tptot=0;\t\r\n \t\tfor (l=0;l<=settings[9];l++)\r\n\t\t{\r\n\t\tptot=ptot+prob[l];\r\n\t\t}\r\n\t\tptot=ptot/settings[9];\r\n\t\t\r\n\t//\tprintf(\"vediamo un ptot %f and settings %f\",ptot,settings[9]);\r\n\t\tminimal=100000000000000000;\r\n \t\tfor (l=0;l<=settings[9];l++)\r\n\t\t{\r\n\t\tif (abs(ptot-prob[l])<minimal)\r\n\t\t{\r\n\t\t\tnnn=l;\r\n\t\t\tminimal=abs(ptot-prob[l]);\r\n\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t\t\t\r\n \t\tfor (w=1;w<=dim;w++)\r\n {\r\n\t\t\t\tif (settings[11]!=0)\r\n\t\t\t\t{\r\n\t\t\t\tdistvirt=0;\r\n\t\t\t\tfor (o=1;o<=dim;o++)\r\n\t\t\t\t{\r\n\t\t\t\t\tdistvirt=distvirt+(xmov[nnn][o])*(xmov[nnn][o]);\r\n\t\t\t\t\t} \r\n\t\t\t\tdistvirt=sqrt(distvirt);\r\n\t\t\t\tif (distvirt<settings[11])\r\n\t\t\t\t{\r\n\t\t\t\t\tif (nnn>=0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tx[v][w]=xmov[nnn][w];\r\n\t\t\t\t\t}\r\n\t\t\t\t}\t \r\n\t\t\t}\r\n\t\t\tif (settings[11]==0)\r\n\t\t\t{\r\n\t\t\t\tif (nnn>=0)\r\n\t\t\t\t{\r\n\t\t\t\t\tx[v][w]=xmov[nnn][w];\r\n\t\t\t\t}\t\r\n\t\t\t}\r\n\t\t\t}\t\r\n\t\t \r\n\t\t}\t\r\n\t\t\t//printf(\"fine\");\r\n \t\tif ((settings[13]==10000) && (memgredyent<=n))\r\n\t {\r\n\t\t\t//calcolare distanza gredy e versore spostamento\r\n \t\t\tprintf(\"qui va bene %d\",v);\r\n\t\t\tprintf(\"settings %f\",settings[3]);\r\n\t\t\tprintf(\"divisorgeodetic %f\",divisorgeodetic);\r\n\t\t\tprintf(\"memgredyent %d\",memgredyent);\r\n \t\t\r\n divisorgeodetic=0;\r\n\r\n \t\tfor (w=1;w<=dim;w++)\r\n {\r\n \t\t geodeticvers[w]=0;\t\t\t\t\t \r\n\r\n\t\t}\r\n\r\n \t\tfor (w=1;w<=dim;w++)\r\n {\r\n\t\t geodeticvers[w]=(x[v][w]-x[memgredyent][w])*(x[v][w]-x[memgredyent][w]);\r\n\t\t\tdivisorgeodetic=divisorgeodetic+geodeticvers[w]*geodeticvers[w];\r\n\t\t\t\t\t \r\n\r\n\t\t}\r\n\t\tdivisorgeodetic=sqrt(divisorgeodetic);\r\n \t\t\r\n\t//\tdivisorgeodetic=divisorgeodetic/settings[3];\r\n \r\n \t\tfor (w=1;w<=dim;w++)\r\n {\r\n \t\t\tx[v][w]=x[v][w]+geodeticvers[w]/divisorgeodetic;\t\t\t\t\t \r\n \r\n\t\t}\r\n\t\t\r\n\t\t\t\r\n\t\t\t\t\t}\t\t\t\r\n\t\t\r\n //*****************************************************************************\r\n //******************verifica la presenza di vincoli e contorni prima di muoversi\r\n //***************************************************************************\r\n if (settings[14]>0) \r\n { \t\t\r\n vincminimal=10000000000000000; \r\n \tFILE *f36 = fopen(\"vinc.aut\",\"r\");\r\n \trow[0]='q';\r\n while (row[0]!='*')\r\n {\r\n for (o=1;o<=dim;o++)\r\n {\r\n if (row[0]!='*')\r\n {\r\n fscanf(f36,\"%s\",row);\r\n if (row[0]!='*') vinc[o]=atof(row);\r\n }\r\n }\r\n if (row[0]!='*')\r\n { \r\n for (o=1;o<=10;o++)\r\n {\r\n coef[o]=0;\r\n }\r\n for (o=1;o<=dim;o++)\r\n {\r\n coef[o]=(x[v][o]-xold[v][o]);\r\n }\r\n vincpar=1.0/settings[14];\r\n incvinc=0; \r\n for (o=1;o<=settings[14];o++)\r\n {\r\n vincdist=0;\r\n for (kk=1;kk<=dim;kk++)\r\n { \r\n vincdist=vincdist+(xold[v][kk]+coef[kk]*(incvinc)-vinc[kk])*(xold[v][kk]+coef[kk]*(incvinc)-vinc[kk]);\r\n }\r\n vincdist=sqrt(vincdist); \r\n if (vincdist<vincminimal) vincminimal=vincdist;\r\n incvinc=incvinc+vincpar;\r\n }\r\n } //chiude if row \t \r\n } // chiude while row\r\n nomove=0;\r\n if (vincminimal<settings[17]) // distanza di tocco per riconoscere il vincolo\r\n {\r\n for (kk=1;kk<=dim;kk++)\r\n {\r\n\t\t\t\r\n x[v][kk]=xold[v][kk];\r\n } \r\n }\r\n \r\nfclose(f36);\r\n} // chiude settings 14\r\n//****************************scrittura per i vari controllo*******************\r\n//*****************************************************************************\r\n\t\tif (settings[6]==1)\r\n\t\t{\r\n\t\tfor (w=1;w<=dim;w++)\r\n\t\t{\r\n\t\t\tfprintf(f20,\"%f\\n\",x[v][w]);\t\t\t\r\n\t\t}\r\n\t\t\tif (contaprob==0) fprintf(f20,\"nonmosso\\n\");\r\n\t\t\tif (contaprob>0) fprintf(f20,\"mosso\\n\");\r\n \t\t\tfprintf(f20,\"somprobafter %f , somprobnow %f\\n\",prob[contaprob],prob[0]);\r\n\tfor (ff=1;ff<=settings[9];ff++)\r\n\t{\t\t\r\n\t \tfor (w=1;w<=dim;w++)\r\n\t\t\t{\r\n\t\t\t\tfprintf(f30,\"%f\\n\",xmov[ff][w]);\r\n\t\t\t}\r\n\t\tfprintf(f30,\"*\\n\");\r\n\t}\t\t\r\n fprintf(f30,\"***********************************\\n\");\r\n\t\t}// chiudo setting \r\n\tfor (o=0;o<=settings[9];o++)\r\n\t{\r\n\t\tfprintf(f35,\"%f\\n\",prob[o]);\r\n\t}\r\n\t\tfprintf(f35,\"*\\n\");\r\n\t}//chiudo if scelta prob (settings[9])\r\nfclose(f20); \r\nfclose(f30); \r\nfclose(f35);\r\n}//chiudo void\r\n\r\nvoid calculate(int np,int n,float settings[],double x[][10],int typpot[][np],int whoactionbeg[][np],int whoactionend[][np],int begintime[][np],int endtime[][np],int fixed[],double zeroset[],double minimi[],int begif[][np], int endif[][np], int begintif[][np], int endintif[][np],double distif[][np],int **connectactive,double **lengthconnectactive,int preypredator[],double distpreyset)\r\n{\r\nint step=0;\r\nchar stringa[40];\r\nfloat casual;\r\nint i,sign,k,j,w,r,z,s,t,distpreypredator;\r\ndouble prob[(int)settings[9]+1];\r\ndouble xmov[(int)settings[9]+1][10];\r\nFILE *f4 = fopen(\"posres.aut\",\"w\");\r\nFILE *f5=fopen(\"minimi.aut\",\"w\");\r\n//***********dimension 1*****************************\r\n//minabs=10000000000000;\r\n//printf(\"connect[8] %d\",connectactive[1][8]);\r\n \tfor (i=0;i<=settings[9];i++)\r\n\t{ \t\r\n\t\tfor(j=0;j<=10;j++)\r\n\t\t{\r\n\t\t\txmov[i][j]=0;\r\n\t\t} \r\n\t}\r\n \tsrand((unsigned)time(NULL));\r\n \tfor(j=1;j<=settings[2];j++)\r\n\t{\r\n \tfor (i=1;i<=n;i++)\r\n\t{\t\r\n \t\tfor (r=0;r<=settings[9]+1;r++) prob[r]=0;\r\n\t\tfor (z=1;z<=settings[9];z++)\r\n\t\t{\r\n\t \t//qui va ciclato sulle realizzazioni\r\n\t\tif (fixed[i]!=1) \r\n\t\t{\r\n//*************il primo tipo di step \\E8 settato qui in futuro degli if cambieranno varie tipologie*************\r\n for (w=1;w<=settings[0];w++)\r\n\t {\r\n \t\tcasual=((rand()%100 +1)/(float)(100));\r\n\t\t sign=(rand()%2);\r\n\t\t if (sign==0) sign=-1;\r\n\t\t if (sign==1) sign=1;\r\n\t\t casual=casual*settings[5]*sign;\r\n \t\tif ((casual>0) && (casual<settings[10])) casual=settings[10];\r\n \t\tif ((casual<0) && (casual>-settings[10])) casual=-settings[10];\r\n \t\txmov[z][w]=x[i][w]+casual;\r\n \t\txmov[0][w]=x[i][w];\r\n \t\t\r\n \t\t}\r\n \t \tprob[0]=0;\r\n\t\tseekprob(np,n,settings[0],x,xmov,prob,typpot,whoactionbeg,whoactionend,begintime,endtime,j,i,settings,z,zeroset,minimi,begif,endif,begintif,endintif,distif,fixed,connectactive,lengthconnectactive,preypredator);\r\n\t\t//***************controlla le distanze con le prede e cala 1 in caso*********\r\n \t\t\r\n \t}//chiudo fixed\r\n\r\n\t}//chiduo for realizzazioni\t\t\r\n\t}//chiudo for\r\n\t\tfor (t=1;t<=n;t++)\r\n\t\t{\r\n\t\t\tfor (s=1;s<=n;s++)\r\n\t\t\t{\r\n\t\t\t\tif (s!=t)\r\n\t\t\t\t{\r\n\t\t\t\t\tif (preypredator[t]>0 && preypredator[s]<0)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tdistpreypredator=0;\r\n\t\t\t\t\t\tfor (w=1;w<=settings[0];w++)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tdistpreypredator=distpreypredator+(x[t][w]-x[s][w])*(x[t][w]-x[s][w]);\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tdistpreypredator=sqrt(distpreypredator);\r\n\t\t\t\t\t\tif (distpreypredator<distpreyset) preypredator[t]--;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\r\n\r\n\tfor (k=1;k<=n;k++)\r\n\t\t{\t\t\r\n for (w=1;w<=settings[0];w++)\r\n\t {\r\n\t\t\tfprintf(f4,\"%f\\n\",x[k][w]);\r\n \t\t}\r\n\t}\r\n\tfprintf(f4,\"*\\n\");\r\n}//chiudo for\r\nfprintf(f4,\"**\\n\");\r\n for (k=1;k<=n;k++)\r\n {\r\n fprintf(f5,\"%f\\n\",minimi[k]); \r\n }\r\n\tfprintf(f5,\"*\\n\");\r\nfclose(f4);\r\nfclose(f5);\r\n}\r\n"
},
{
"alpha_fraction": 0.4731001555919647,
"alphanum_fraction": 0.5243253111839294,
"avg_line_length": 26.291458129882812,
"blob_id": "8377ef316e54bd784dd33d91c6668f5e3294f0c0",
"content_id": "a55b6764c4f78566cb332cbd5399ed9fffb585db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11264,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 398,
"path": "/v2in2017/movieshow.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "from visual import *\r\nfrom time import sleep\r\nnex=13\r\nfor t in range(nex):\r\n# gruppi=input('numero di gruppi=')\r\n# dim=input('inserisci la dimensione:=')\r\n# frame=input('rate:=')\r\n raggio=[]\r\n colore=[]\r\n \r\n# for i in range(gruppi):\r\n# \tr=input(\"Inserisci raggio:=\")\r\n# \tc=raw_input(\"inserisci colore:=\")\r\n# \tt=input(\"inserisci il range del gruppo n.\"+str(i)+\":=\")\r\n# \tfor j in range(t):\r\n# \t\traggio.append(r)\r\n# \t\tcolore.append(c)\r\n\t \t\t\r\n \t\r\n \r\n \r\n# f1=open('pos.aut','r')\r\n\r\n stringa='aaa'\r\n r=0.5\r\n if t==0:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex1/'\r\n\t\r\n\r\n gruppi=2\r\n dim=3\r\n frame=400\r\n \tfor k in range(20):\r\n \tcolore.append('red')\r\n\t\traggio.append(0.1)\r\n\tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n\r\n if t==1:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex2/'\r\n\tpoint=[]\r\n\tpointgraf=[]\r\n\t\r\n gruppi=2\r\n dim=2\r\n frame=400\r\n \tfor k in range(20):\r\n \tcolore.append('red')\r\n\t\traggio.append(0.1)\r\n\tfor k in range(20):\r\n \tcolore.append('yellow')\r\n\t\traggio.append(0.05)\r\n\r\n if t==2:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex4/'\r\n\r\n gruppi=2\r\n dim=2\r\n frame=400\r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.2)\r\n\r\n if t==3:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex5/'\r\n\r\n gruppi=2\r\n dim=3\r\n frame=400\r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n\r\n if t==4:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex6/'\r\n\r\n gruppi=2\r\n dim=3\r\n frame=400\r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n \tcolore.append('red')\r\n\traggio.append(0.1)\r\n\r\n if t==5:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n \tcolore.append('green')\r\n\traggio.append(0.1)\r\n \tcolore.append('red')\r\n\traggio.append(0.1)\r\n\tcolore[0]='red'\r\n\r\n path='tutorial/ex7/'\r\n\r\n gruppi=1\r\n dim=2\r\n frame=400\r\n if t==6:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex8/'\r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n \tcolore.append('red')\r\n\traggio.append(0.1)\r\n \tcolore.append('red')\r\n\traggio.append(0.1) \r\n \tcolore.append('red')\r\n\traggio.append(0.1) \r\n\r\n dim=2\r\n frame=400\r\n if t==7:\r\n path='tutorial/ex9/'\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n \r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n \tcolore.append('red')\r\n\traggio.append(0.1)\r\n \tcolore.append('red')\r\n\traggio.append(0.1) \r\n \tcolore.append('red')\r\n\traggio.append(0.1) \r\n\r\n dim=2\r\n frame=400\r\n\r\n if t==8:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex10/'\r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n \tfor k in range(20):\r\n \tcolore.append('red')\r\n\t\traggio.append(0.1)\r\n\r\n gruppi=2\r\n dim=3\r\n frame=400\r\n\r\n# if t==9:\r\n# scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n# path='tutorial/ex11/'\r\n# \tfor k in range(20):\r\n# \tcolore.append('green')\r\n#\t\traggio.append(0.1)\r\n# \tfor k in range(20):\r\n# \tcolore.append('red')\r\n#\t\traggio.append(0.1)\r\n\r\n# gruppi=2\r\n# dim=3\r\n# frame=400\r\n if t==10:\r\n path='tutorial/ex12/'\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n \r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n \tfor k in range(20):\r\n \tcolore.append('red')\r\n\t\traggio.append(0.1)\r\n \tfor k in range(20):\r\n \tcolore.append('yellow')\r\n\t\traggio.append(0.1)\r\n\r\n gruppi=2\r\n dim=3\r\n frame=400\r\n if t==11:\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n \r\n path='tutorial/ex13/'\r\n \r\n \r\n \tfor k in range(5):\r\n \tcolore.append('yellow')\r\n\t\traggio.append(0.1)\r\n \tfor k in range(55):\r\n \tcolore.append('red')\r\n\t\traggio.append(0.1)\r\n \r\n gruppi=2\r\n dim=3\r\n frame=400\r\n if t==12:\r\n path='tutorial/ex14/'\r\n scene = display(title='Visual Missp',x=300, y=200, width=600, height=600,center=(0,0,0), background=(0,1,1))\r\n\r\n path='tutorial/ex14/'\r\n \tfor k in range(20):\r\n \tcolore.append('green')\r\n\t\traggio.append(0.1)\r\n \tcolore.append('red')\r\n\traggio.append(0.1)\r\n \r\n gruppi=2\r\n dim=3\r\n frame=400\r\n r=[0.1,0.1]\r\n \r\n \r\n f1=open(str(path)+'pos.aut','r')\r\n \r\n \r\n \r\n point=[]\r\n pointgraf=[]\r\n contatore=-1\r\n if dim==1:\r\n \twhile (stringa!='*\\n'):\r\n \t\tcontatore+=1\r\n \t\tstringa=f1.readline()\r\n \t\tif (stringa!='*\\n'):\r\n \t\t\tprint (stringa)\r\n \t\t\tif (colore[contatore]==\"red\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.red))\t \t\r\n \t\t\tif (colore[contatore]==\"green\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.green))\t \t\r\n \t\t\tif (colore[contatore]==\"yellow\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.yellow))\t \t\r\n \t\t\tif (colore[contatore]==\"white\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.white))\t \t\r\n \r\n \r\n \r\n \tf1.close()\t\r\n \tf1=open(str(path)+'posres.aut','r')\r\n \twhile (stringa!='e\\n'):\r\n \t\tcont=0;\r\n \t\tstringa='a'\r\n \t\twhile (stringa!='*\\n'):\r\n \t\t\trate(frame)\r\n \t\t\tstringa=f1.readline()\r\n \t\t\tif (stringa!='*\\n' and stringa!='e\\n'):\r\n \t\t\t\t#print stringa\r\n \t\t\t\tpoint[cont].pos.x=float(stringa)\r\n \t\t\t\t#sleep(0.01)\r\n \t\t\tcont=cont+1;\t\r\n \r\n if dim==2:\r\n \tf2=open(str(path)+\"vinc.aut\",\"r\")\r\n \tvx=\"xx\"\r\n \tvy=\"yy\"\r\n \twhile vx!=\"*\\n\":\r\n \t\tvx=f2.readline()\r\n \t\tif vx!='*\\n':\r\n \t\t\tvy=f2.readline()\r\n \t\t\tpointgraf.append(sphere(pos=(float(vx),float(vy),0), radius=0.05, color=color.white))\r\n \r\n \t\r\n \tball = sphere(pos=(0,0,0), radius=0.01)\r\n \t#cr = shapes.circle(radius=1, np=64)\r\n \tprint dim\r\n \twhile (stringa!='*\\n'):\r\n \t\tcontatore+=1\r\n \t\tstringa=f1.readline()\r\n \t\tstringa2=f1.readline()\r\n \r\n \t\tif (stringa!='*\\n'):\r\n \t\t\t#print (stringa)\r\n \t\t\t#print (stringa2)\r\n \t\t\tif (colore[contatore]==\"red\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.red))\t \t\r\n \t\t\tif (colore[contatore]==\"green\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.green))\t \t\r\n \t\t\tif (colore[contatore]==\"yellow\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.yellow))\t \t\r\n \t\t\tif (colore[contatore]==\"white\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.white))\t \t\r\n \r\n \r\n \tf1.close()\t\r\n \tf1=open(str(path)+'posres.aut','r')\r\n \twhile ((stringa!='e\\n') and (stringa2!='e\\n')):\r\n \t\tcont=0;\r\n\t\tif stringa=='**\\n' or stringa2=='**\\n':\r\n\t break\r\n \t\t\r\n \t\t#print cont\r\n \t\tstringa='a'\r\n \t\tstringa2='a'\r\n \t\twhile ((stringa!='*\\n') and (stringa2!='*\\n')):\r\n \t\t\tprint cont\r\n \t\t\trate(frame)\r\n \t\t\t\r\n \t\t\tstringa=f1.readline()\r\n \t\t\tif (stringa!='*\\n'):\r\n \t\t\t\tstringa2=f1.readline()\r\n \t\t\tif stringa=='**\\n' or stringa2=='**\\n':\r\n \t\t\t\tbreak\r\n\r\n \t\t\tif (stringa!='*\\n' and stringa!='e\\n' and stringa2!='*\\n' and stringa2!='e\\n'):\r\n \t\t\t\tprint stringa\r\n \t\t\t\tprint stringa2\r\n \t\t\t\tpoint[cont].pos.x=float(stringa)\r\n \t\t\t\tpoint[cont].pos.y=float(stringa2)\r\n \t\t\t\t#sleep(0.05)\r\n \t\t\tcont=cont+1;\t\t\t\t\r\n if dim==3:\r\n \tf2=open(str(path)+\"vinc.aut\",\"r\")\r\n\r\n \tprint dim\r\n\tvx=\"xx\"\r\n\tvy=\"yy\"\r\n\tvz=\"zz\"\r\n\twhile vx!=\"*\\n\":\r\n\t\tvx=f2.readline()\r\n\t\tif vx!='*\\n':\r\n\t\t\tvy=f2.readline()\r\n\t\t\tvz=f2.readline()\r\n\t\t\tpointgraf.append(sphere(pos=(float(vx),float(vy),float(vz)), radius=0.05, color=color.white))\r\n\r\n\t\r\n\tball = sphere(pos=(0,0,0), radius=0.01)\r\n \twhile (stringa!='*\\n'):\r\n \t\tcontatore+=1\r\n \t\tstringa=f1.readline()\r\n \t\tstringa2=f1.readline()\r\n \t\tstringa3=f1.readline()\r\n \t\tif (stringa!='*\\n'):\r\n \t\t\tprint (stringa)\r\n \t\t\tprint (stringa2)\r\n \t\t\tprint (stringa3)\r\n \t\t\tif (colore[contatore]==\"red\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.red))\t \t\r\n \t\t\tif (colore[contatore]==\"green\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.green))\t \t\r\n \t\t\tif (colore[contatore]==\"yellow\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.yellow))\t \t\r\n \t\t\tif (colore[contatore]==\"white\"):\r\n \t\t \t\tpoint.append(sphere(pos=(float(stringa),0,0), radius=raggio[contatore], color=color.white))\t \t\r\n \r\n \r\n \tf1.close()\t\r\n \tf1=open(str(path)+'posres.aut','r')\r\n \twhile ((stringa!='e\\n') and (stringa2!='e\\n') and (stringa3!='e\\n')):\r\n \t\tcont=0;\r\n \t\t#print cont\r\n\t\tif stringa=='**\\n' or stringa2=='**\\n' or stringa3=='**\\n':\r\n\t break\r\n\r\n \t\tstringa='a'\r\n \t\tstringa2='a'\r\n \t\tstringa3='a'\r\n\r\n \t\twhile ((stringa!='*\\n') and (stringa2!='*\\n') and (stringa3!='*\\n')):\r\n \t\t\t#print cont\r\n \t\t\trate(frame)\r\n \t\t\tstringa=f1.readline()\r\n \t\t\tif stringa=='**\\n':\r\n \t\t\t\tbreak\r\n \t\t\tif stringa=='*\\n':\r\n \t\t\t\tprint stringa\r\n \t\t\t\tcont=0\r\n \t\t\t\tstringa=f1.readline()\r\n \t\t\tif stringa!='*\\n':\r\n \t\t\t\tstringa2=f1.readline()\r\n \t\t\tif stringa2!='*\\n':\r\n \t\t\t\tstringa3=f1.readline()\r\n \t\t\tif stringa=='**\\n' or stringa2=='**\\n' or stringa3=='**\\n':\r\n \t\t\t\tbreak\r\n \r\n \t\t\t#print stringa2\r\n \t\t\tif (stringa!='*\\n' and stringa!='e\\n' and stringa2!='*\\n' and stringa2!='e\\n' and stringa3!='*\\n' and stringa3!='e\\n'):\r\n \t\t\t\tprint stringa\r\n \t\t\t\tprint stringa2\r\n \t\t\t\tprint stringa3\r\n \t\t\t\tpoint[cont].pos.x=float(stringa)\r\n \t\t\t\tpoint[cont].pos.y=float(stringa2)\r\n \t\t\t\tpoint[cont].pos.z=float(stringa3)\r\n \t\t\t\t#sleep(0.01)\r\n \t\t\tcont=cont+1\t\t\t\t\r\n"
},
{
"alpha_fraction": 0.44949495792388916,
"alphanum_fraction": 0.5111111402511597,
"avg_line_length": 15.636363983154297,
"blob_id": "15d1163d5542aafea841c212618f721f1f8f103e",
"content_id": "bc9c71fc9ae6334f2eb06d4ae826ed002875a527",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 55,
"path": "/v2in2017/twisterini/makepos.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import math\r\nimport random\r\n\r\nf1=open(\"pos.aut\",\"w\")\r\nangle=0\r\nconst=0\r\nx=0\r\nfor i in range(2):\r\n\t#x=random.uniform(-3,-2)\r\n\t#y=random.uniform(-3,-2)\r\n\t#z=random.uniform(-3,-2)\r\n\tx+=3\r\n\ty=0\r\n\t#z=0\r\n\tf1.write(str(x)+str('\\n'))\r\n\tf1.write(str(y)+str('\\n'))\t\r\n\t#f1.write(str(z)+str('\\n'))\r\n\r\nx=1\r\nfor i in range(2):\r\n\t#x=random.uniform(3,4)\r\n\t#y=random.uniform(3,4)\r\n\t#z=random.uniform(3,4)\r\n\tx+=3\r\n\ty=0\r\n\t#z=0\r\n\tf1.write(str(x)+str('\\n'))\r\n\tf1.write(str(y)+str('\\n'))\t\r\n\t#f1.write(str(z)+str('\\n'))\r\n\r\nx=2\r\nfor i in range(2):\r\n\t#x=random.uniform(3,4)\r\n\t#y=random.uniform(3,4)\r\n\t#z=random.uniform(3,4)\r\n\tx+=3\r\n\ty=0\r\n\t#z=0\r\n\tf1.write(str(x)+str('\\n'))\r\n\tf1.write(str(y)+str('\\n'))\t\r\n\t#f1.write(str(z)+str('\\n'))\r\n\r\n\r\n#f1.write(str('-1')+str('\\n'))\r\n#f1.write(str('-1')+str('\\n'))\t\r\n#f1.write(str('0')+str('\\n'))\r\n#f1.write(str('1')+str('\\n'))\r\n#f1.write(str('1')+str('\\n'))\t\r\n#f1.write(str('0')+str('\\n'))\r\n\r\n#f1.write(str('0')+str('\\n'))\t\r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n \r\n\r\n \r\n"
},
{
"alpha_fraction": 0.42610836029052734,
"alphanum_fraction": 0.4950738847255707,
"avg_line_length": 12.5,
"blob_id": "8a35c2994357e077599fa5bf2562221e63344ca5",
"content_id": "94844d6c084a43d9fa4c5543af297c76ccb26b26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 28,
"path": "/v2in2017/ex3/casualscript.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "\r\nimport random\r\nf1=open('pos.aut','w')\r\n\r\nfor j in range(5):\r\n\tfor i in range(5):\r\n\t\r\n\t\tx=0.6*i\r\n\t\ty=j*0.6\r\n \t\tf1.write(str(x)+str('\\n'))\r\n\t\tf1.write(str(y)+str('\\n'))\r\n \r\nfor j in range(5):\r\n\tfor i in range(5):\r\n\t\r\n\t\tx=0.6*i+3\r\n\t\ty=j*0.6\r\n \t\tf1.write(str(x)+str('\\n'))\r\n\t\tf1.write(str(y)+str('\\n'))\r\n \r\nf1.write(\"1\\n\")\r\nf1.write(\"1\\n\")\r\nf1.write(\"3\\n\")\r\nf1.write(\"1\\n\")\r\n\r\nf1.write('*\\n')\r\n\r\n\r\nf1.close()"
},
{
"alpha_fraction": 0.4252013862133026,
"alphanum_fraction": 0.436133474111557,
"avg_line_length": 21.027027130126953,
"blob_id": "d2633a75b69c6fba5357c18369ae3786d27c6676",
"content_id": "5f9a97604142b544689d601f5aea93ddbd526961",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1738,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 74,
"path": "/v2in2017/correlator.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\r\nimport math\r\nclass ent():\r\n\r\n def __init__(self,n,nsteps):\r\n \r\n self.euclidcomp=[[0 for x in range(nsteps)] for y in range(n)] \r\n self.pos=[] \r\n\r\n\r\ndef filereader(dim,n,nsteps):\r\n f5=open(\"posres.aut\",\"r\")\r\n string=\"\"\r\n for t in range(nsteps): \r\n \r\n \r\n \r\n for k in range(n):\r\n listanum=[]\r\n \r\n for d in range(dim):\r\n string=f5.readline()\r\n \r\n if string!=\"\\n\" and string!=\"**\\n\":\r\n n1=float(string[0:len(string)-1])\r\n listanum.append(n1)\r\n \r\n entsfield[k].pos.append(listanum) \r\n if string=='**\\n':\r\n break\r\n \r\n if string!='**\\n': \r\n string=f5.readline() \r\n \r\n \r\n f5.close()\r\n\r\ndim=3\r\nn=60\r\nnsteps=500\r\nentsfield=[]\r\nlista=[]\r\nfor i in range(n):\r\n entsfield.append(ent(n,nsteps))\r\n \r\n\r\n\r\n \r\nfilereader(dim,n,nsteps)\r\nprint \r\nfor t in range(nsteps):\r\n for i in range(n):\r\n for j in range(n):\r\n if i!=j:\r\n dist=0\r\n for s in range(dim):\r\n \r\n dist+=(float(entsfield[i].pos[t][s])-float(entsfield[j].pos[t][s]))**2\r\n else:\r\n dist=0\r\n entsfield[i].euclidcomp[j][t]=math.sqrt(dist) \r\nprint entsfield[1].euclidcomp\r\nanswer=\"\"\r\nwhile answer!=\"exit\":\r\n w=input(\"n=\")\r\n lista=[]\r\n fig=plt.figure()\r\n for j in range(n):\r\n lista=[]\r\n for t in range(nsteps):\r\n lista.append(entsfield[w].euclidcomp[j][t])\r\n \r\n plt.plot(lista)\r\n plt.show() \r\n \r\n \r\n \r\n"
},
{
"alpha_fraction": 0.4496336877346039,
"alphanum_fraction": 0.5155677795410156,
"avg_line_length": 15.34920597076416,
"blob_id": "fa89a4baa92cbe0985b5f6a1fa88b056a90c51c1",
"content_id": "a90e9597f4919d2e8cffdf3e3eb574930dc63c6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 63,
"path": "/v2in2017/ex14/make.py",
"repo_name": "paolopoli1980/missp",
"src_encoding": "UTF-8",
"text": "import random\r\nimport math\r\nf1=open(\"pos.aut\",\"w\")\r\nk=0\r\n\r\nfor i in range(20):\r\n\tx=random.uniform(-2+k,-1)\r\n\ty=random.uniform(1,2)\r\n\tz=random.uniform(1,2)\r\n\tf1.write(str(x)+str(\"\\n\"))\r\n\tf1.write(str(y)+str(\"\\n\"))\r\n\tf1.write(str(z)+str(\"\\n\"))\r\n\r\nx=1\r\ny=1\r\nz=1\r\nf1.write(str(x)+str(\"\\n\"))\r\nf1.write(str(y)+str(\"\\n\"))\r\nf1.write(str(z)+str(\"\\n\"))\r\n\t \r\n\r\nf1.write(\"*\\n\")\r\n\r\nf1.close()\r\n\r\n\t\r\nf1=open(\"vinc.aut\",\"w\")\r\nfor i in range(20):\r\n\tx=i*0.1\t\r\n\tfor j in range(20):\r\n\t\ty=j*0.1\r\n\t\tz=0\r\n\t\tf1.write(str(x)+str(\"\\n\"))\r\n\t\tf1.write(str(y)+str(\"\\n\"))\r\n\t\tf1.write(str(z)+str(\"\\n\"))\r\nfor i in range(20):\r\n\tx=i*0.1\t\r\n\tfor j in range(21):\r\n\t\ty=j*0.1\r\n\t\tz=2\r\n\t\tf1.write(str(x)+str(\"\\n\"))\r\n\t\tf1.write(str(y)+str(\"\\n\"))\r\n\t\tf1.write(str(z)+str(\"\\n\"))\r\nfor i in range(20):\r\n\tz=i*0.1\t\r\n\tfor j in range(20):\r\n\t\tx=j*0.1\r\n\t\ty=0\r\n\t\tf1.write(str(x)+str(\"\\n\"))\r\n\t\tf1.write(str(y)+str(\"\\n\"))\r\n\t\tf1.write(str(z)+str(\"\\n\"))\r\nfor i in range(20):\r\n\tz=i*0.1\t\r\n\tfor j in range(20):\r\n\t\tx=j*0.1\r\n\t\ty=2\r\n\t\tf1.write(str(x)+str(\"\\n\"))\r\n\t\tf1.write(str(y)+str(\"\\n\"))\r\n\t\tf1.write(str(z)+str(\"\\n\"))\r\n\r\n\r\nf1.write(\"*\\n\")\r\nf1.close() "
}
] | 25 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.